summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-10-30 17:42:58 +0100
committerDavid S. Miller <davem@davemloft.net>2016-10-30 17:42:58 +0100
commit27058af401e49d88a905df000dd26f443fcfa8ce (patch)
tree819f32113d3b8374b9fbf72e2202d4c4d4511a60
parentfirewire: net: really fix maximum possible MTU (diff)
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff)
downloadlinux-27058af401e49d88a905df000dd26f443fcfa8ce.tar.xz
linux-27058af401e49d88a905df000dd26f443fcfa8ce.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Mostly simple overlapping changes. For example, David Ahern's adjacency list revamp in 'net-next' conflicted with an adjacency list traversal bug fix in 'net'. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--.mailmap1
-rw-r--r--CREDITS5
-rw-r--r--Documentation/00-INDEX3
-rw-r--r--Documentation/80211/cfg80211.rst345
-rw-r--r--Documentation/80211/conf.py5
-rw-r--r--Documentation/80211/index.rst17
-rw-r--r--Documentation/80211/introduction.rst17
-rw-r--r--Documentation/80211/mac80211-advanced.rst295
-rw-r--r--Documentation/80211/mac80211.rst216
-rw-r--r--Documentation/ABI/testing/sysfs-class-cxl7
-rw-r--r--Documentation/DocBook/80211.tmpl584
-rw-r--r--Documentation/DocBook/Makefile2
-rw-r--r--Documentation/Makefile4
-rw-r--r--Documentation/accounting/Makefile7
-rw-r--r--Documentation/accounting/delay-accounting.txt6
-rw-r--r--Documentation/arm/00-INDEX2
-rw-r--r--Documentation/auxdisplay/Makefile7
-rw-r--r--Documentation/auxdisplay/cfag12864b2
-rw-r--r--Documentation/blackfin/00-INDEX4
-rw-r--r--Documentation/blackfin/Makefile5
-rw-r--r--Documentation/device-mapper/dm-raid.txt1
-rw-r--r--Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt17
-rw-r--r--Documentation/devicetree/bindings/clock/uniphier-clock.txt16
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c.txt8
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt1
-rw-r--r--Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt21
-rw-r--r--Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt23
-rw-r--r--Documentation/devicetree/bindings/ipmi/ipmi-smic.txt (renamed from Documentation/devicetree/bindings/ipmi.txt)0
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/soc.txt4
-rw-r--r--Documentation/devicetree/bindings/net/marvell-orion-net.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt4
-rw-r--r--Documentation/devicetree/bindings/reset/uniphier-reset.txt62
-rw-r--r--Documentation/devicetree/bindings/rtc/dallas,ds1390.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/epson,rx8900.txt22
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-omap.txt21
-rw-r--r--Documentation/devicetree/bindings/serial/cdns,uart.txt4
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt8
-rw-r--r--Documentation/devicetree/bindings/timer/jcore,pit.txt24
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.txt5
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/features/perf/kprobes-event/arch-support.txt2
-rw-r--r--Documentation/filesystems/00-INDEX2
-rw-r--r--Documentation/filesystems/Makefile5
-rw-r--r--Documentation/filesystems/proc.txt26
-rw-r--r--Documentation/gpio/board.txt11
-rw-r--r--Documentation/ia64/Makefile5
-rw-r--r--Documentation/index.rst1
-rw-r--r--Documentation/input/alps.txt57
-rw-r--r--Documentation/kbuild/makefiles.txt16
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--Documentation/kselftest.txt5
-rw-r--r--Documentation/laptops/00-INDEX4
-rw-r--r--Documentation/laptops/Makefile5
-rw-r--r--Documentation/laptops/laptop-mode.txt2
-rw-r--r--Documentation/mic/Makefile1
-rw-r--r--Documentation/mic/mpssd/Makefile21
-rw-r--r--Documentation/misc-devices/Makefile1
-rw-r--r--Documentation/misc-devices/mei/Makefile5
-rw-r--r--Documentation/networking/00-INDEX2
-rw-r--r--Documentation/networking/Makefile1
-rw-r--r--Documentation/networking/netdev-FAQ.txt8
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.txt18
-rw-r--r--Documentation/networking/timestamping/Makefile14
-rw-r--r--Documentation/pcmcia/Makefile7
-rw-r--r--Documentation/pcmcia/devicetable.txt4
-rw-r--r--Documentation/prctl/Makefile10
-rw-r--r--Documentation/ptp/Makefile8
-rw-r--r--Documentation/scsi/g_NCR5380.txt34
-rw-r--r--Documentation/spi/00-INDEX2
-rw-r--r--Documentation/timers/00-INDEX4
-rw-r--r--Documentation/timers/Makefile5
-rw-r--r--Documentation/timers/hpet.txt2
-rw-r--r--Documentation/vDSO/Makefile17
-rw-r--r--Documentation/watchdog/Makefile1
-rw-r--r--Documentation/watchdog/src/Makefile5
-rw-r--r--Documentation/watchdog/watchdog-api.txt2
-rw-r--r--Documentation/watchdog/wdt.txt2
-rw-r--r--MAINTAINERS103
-rw-r--r--Makefile26
-rw-r--r--arch/Kconfig39
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/asm/uaccess.h9
-rw-r--r--arch/alpha/kernel/Makefile2
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c102
-rw-r--r--arch/alpha/kernel/machvec_impl.h6
-rw-r--r--arch/alpha/kernel/ptrace.c9
-rw-r--r--arch/alpha/kernel/setup.c1
-rw-r--r--arch/alpha/lib/callback_srm.S5
-rw-r--r--arch/alpha/lib/checksum.c3
-rw-r--r--arch/alpha/lib/clear_page.S3
-rw-r--r--arch/alpha/lib/clear_user.S2
-rw-r--r--arch/alpha/lib/copy_page.S3
-rw-r--r--arch/alpha/lib/copy_user.S19
-rw-r--r--arch/alpha/lib/csum_ipv6_magic.S2
-rw-r--r--arch/alpha/lib/csum_partial_copy.c2
-rw-r--r--arch/alpha/lib/dec_and_lock.c2
-rw-r--r--arch/alpha/lib/divide.S3
-rw-r--r--arch/alpha/lib/ev6-clear_page.S3
-rw-r--r--arch/alpha/lib/ev6-clear_user.S3
-rw-r--r--arch/alpha/lib/ev6-copy_page.S3
-rw-r--r--arch/alpha/lib/ev6-copy_user.S26
-rw-r--r--arch/alpha/lib/ev6-csum_ipv6_magic.S2
-rw-r--r--arch/alpha/lib/ev6-divide.S3
-rw-r--r--arch/alpha/lib/ev6-memchr.S3
-rw-r--r--arch/alpha/lib/ev6-memcpy.S3
-rw-r--r--arch/alpha/lib/ev6-memset.S7
-rw-r--r--arch/alpha/lib/ev67-strcat.S3
-rw-r--r--arch/alpha/lib/ev67-strchr.S3
-rw-r--r--arch/alpha/lib/ev67-strlen.S3
-rw-r--r--arch/alpha/lib/ev67-strncat.S3
-rw-r--r--arch/alpha/lib/ev67-strrchr.S3
-rw-r--r--arch/alpha/lib/fpreg.c7
-rw-r--r--arch/alpha/lib/memchr.S3
-rw-r--r--arch/alpha/lib/memcpy.c5
-rw-r--r--arch/alpha/lib/memmove.S3
-rw-r--r--arch/alpha/lib/memset.S7
-rw-r--r--arch/alpha/lib/strcat.S2
-rw-r--r--arch/alpha/lib/strchr.S3
-rw-r--r--arch/alpha/lib/strcpy.S3
-rw-r--r--arch/alpha/lib/strlen.S3
-rw-r--r--arch/alpha/lib/strncat.S3
-rw-r--r--arch/alpha/lib/strncpy.S3
-rw-r--r--arch/alpha/lib/strrchr.S3
-rw-r--r--arch/arc/Kconfig27
-rw-r--r--arch/arc/Makefile3
-rw-r--r--arch/arc/boot/Makefile16
-rw-r--r--arch/arc/include/asm/arcregs.h3
-rw-r--r--arch/arc/include/asm/cache.h2
-rw-r--r--arch/arc/include/asm/elf.h2
-rw-r--r--arch/arc/include/asm/mcip.h16
-rw-r--r--arch/arc/include/asm/module.h1
-rw-r--r--arch/arc/include/asm/setup.h6
-rw-r--r--arch/arc/include/asm/syscalls.h1
-rw-r--r--arch/arc/include/uapi/asm/unistd.h9
-rw-r--r--arch/arc/kernel/mcip.c31
-rw-r--r--arch/arc/kernel/module.c53
-rw-r--r--arch/arc/kernel/process.c33
-rw-r--r--arch/arc/kernel/setup.c113
-rw-r--r--arch/arc/kernel/signal.c8
-rw-r--r--arch/arc/kernel/troubleshoot.c110
-rw-r--r--arch/arc/mm/cache.c19
-rw-r--r--arch/arc/mm/dma.c4
-rw-r--r--arch/arc/mm/tlb.c6
-rw-r--r--arch/arc/mm/tlbex.S21
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts15
-rw-r--r--arch/arm/boot/dts/uniphier-pro5.dtsi4
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2.dtsi4
-rw-r--r--arch/arm/boot/dts/vf500.dtsi2
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/uaccess.h11
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/armksyms.c183
-rw-r--r--arch/arm/kernel/entry-ftrace.S3
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/smccc-call.S3
-rw-r--r--arch/arm/kvm/arm.c7
-rw-r--r--arch/arm/lib/ashldi3.S3
-rw-r--r--arch/arm/lib/ashrdi3.S3
-rw-r--r--arch/arm/lib/bitops.h5
-rw-r--r--arch/arm/lib/bswapsdi2.S3
-rw-r--r--arch/arm/lib/clear_user.S4
-rw-r--r--arch/arm/lib/copy_from_user.S11
-rw-r--r--arch/arm/lib/copy_page.S2
-rw-r--r--arch/arm/lib/copy_to_user.S4
-rw-r--r--arch/arm/lib/csumipv6.S3
-rw-r--r--arch/arm/lib/csumpartial.S2
-rw-r--r--arch/arm/lib/csumpartialcopy.S1
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S2
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S1
-rw-r--r--arch/arm/lib/delay.c2
-rw-r--r--arch/arm/lib/div64.S2
-rw-r--r--arch/arm/lib/findbit.S9
-rw-r--r--arch/arm/lib/getuser.S9
-rw-r--r--arch/arm/lib/io-readsb.S2
-rw-r--r--arch/arm/lib/io-readsl.S2
-rw-r--r--arch/arm/lib/io-readsw-armv3.S3
-rw-r--r--arch/arm/lib/io-readsw-armv4.S2
-rw-r--r--arch/arm/lib/io-writesb.S2
-rw-r--r--arch/arm/lib/io-writesl.S2
-rw-r--r--arch/arm/lib/io-writesw-armv3.S2
-rw-r--r--arch/arm/lib/io-writesw-armv4.S2
-rw-r--r--arch/arm/lib/lib1funcs.S9
-rw-r--r--arch/arm/lib/lshrdi3.S3
-rw-r--r--arch/arm/lib/memchr.S2
-rw-r--r--arch/arm/lib/memcpy.S3
-rw-r--r--arch/arm/lib/memmove.S2
-rw-r--r--arch/arm/lib/memset.S3
-rw-r--r--arch/arm/lib/memzero.S2
-rw-r--r--arch/arm/lib/muldi3.S3
-rw-r--r--arch/arm/lib/putuser.S5
-rw-r--r--arch/arm/lib/strchr.S2
-rw-r--r--arch/arm/lib/strrchr.S2
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c3
-rw-r--r--arch/arm/lib/ucmpdi2.S3
-rw-r--r--arch/arm/mach-imx/Makefile1
-rw-r--r--arch/arm/mach-imx/gpc.c15
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c2
-rw-r--r--arch/arm/mach-imx/ssi-fiq-ksym.c20
-rw-r--r--arch/arm/mach-imx/ssi-fiq.S7
-rw-r--r--arch/arm/mach-mvebu/Kconfig4
-rw-r--r--arch/arm/mach-uniphier/Kconfig1
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2-svk.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts4
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi12
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/exec.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h11
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/module.h5
-rw-r--r--arch/arm64/include/asm/percpu.h120
-rw-r--r--arch/arm64/include/asm/processor.h6
-rw-r--r--arch/arm64/include/asm/sysreg.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h18
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c36
-rw-r--r--arch/arm64/kernel/cpu_errata.c3
-rw-r--r--arch/arm64/kernel/cpufeature.c10
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/process.c18
-rw-r--r--arch/arm64/kernel/sleep.S2
-rw-r--r--arch/arm64/kernel/smp.c1
-rw-r--r--arch/arm64/kernel/suspend.c11
-rw-r--r--arch/arm64/kernel/traps.c30
-rw-r--r--arch/arm64/lib/copy_from_user.S7
-rw-r--r--arch/arm64/mm/fault.c15
-rw-r--r--arch/arm64/mm/init.c26
-rw-r--r--arch/arm64/mm/numa.c9
-rw-r--r--arch/blackfin/include/asm/uaccess.h32
-rw-r--r--arch/blackfin/kernel/ptrace.c5
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c6
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c4
-rw-r--r--arch/h8300/include/asm/thread_info.h4
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/ia64/hp/sim/boot/Makefile2
-rw-r--r--arch/ia64/include/asm/export.h3
-rw-r--r--arch/ia64/include/asm/libata-portmap.h4
-rw-r--r--arch/ia64/kernel/entry.S3
-rw-r--r--arch/ia64/kernel/err_inject.c2
-rw-r--r--arch/ia64/kernel/esi_stub.S2
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c94
-rw-r--r--arch/ia64/kernel/ivt.S2
-rw-r--r--arch/ia64/kernel/pal.S7
-rw-r--r--arch/ia64/kernel/ptrace.c14
-rw-r--r--arch/ia64/kernel/setup.c4
-rw-r--r--arch/ia64/lib/Makefile8
-rw-r--r--arch/ia64/lib/clear_page.S2
-rw-r--r--arch/ia64/lib/clear_user.S2
-rw-r--r--arch/ia64/lib/copy_page.S2
-rw-r--r--arch/ia64/lib/copy_page_mck.S2
-rw-r--r--arch/ia64/lib/copy_user.S2
-rw-r--r--arch/ia64/lib/flush.S2
-rw-r--r--arch/ia64/lib/idiv32.S2
-rw-r--r--arch/ia64/lib/idiv64.S2
-rw-r--r--arch/ia64/lib/ip_fast_csum.S3
-rw-r--r--arch/ia64/lib/memcpy.S2
-rw-r--r--arch/ia64/lib/memcpy_mck.S3
-rw-r--r--arch/ia64/lib/memset.S2
-rw-r--r--arch/ia64/lib/strlen.S2
-rw-r--r--arch/ia64/lib/strlen_user.S2
-rw-r--r--arch/ia64/lib/strncpy_from_user.S2
-rw-r--r--arch/ia64/lib/strnlen_user.S2
-rw-r--r--arch/ia64/lib/xor.S5
-rw-r--r--arch/m32r/kernel/ptrace.c15
-rw-r--r--arch/m68k/include/asm/export.h3
-rw-r--r--arch/m68k/kernel/Makefile2
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c32
-rw-r--r--arch/m68k/lib/ashldi3.c4
-rw-r--r--arch/m68k/lib/ashrdi3.c4
-rw-r--r--arch/m68k/lib/divsi3.S3
-rw-r--r--arch/m68k/lib/lshrdi3.c4
-rw-r--r--arch/m68k/lib/modsi3.S3
-rw-r--r--arch/m68k/lib/muldi3.c4
-rw-r--r--arch/m68k/lib/mulsi3.S4
-rw-r--r--arch/m68k/lib/udivsi3.S4
-rw-r--r--arch/m68k/lib/umodsi3.S4
-rw-r--r--arch/metag/include/asm/atomic.h3
-rw-r--r--arch/mips/Kbuild.platforms2
-rw-r--r--arch/mips/Kconfig121
-rw-r--r--arch/mips/Makefile77
-rw-r--r--arch/mips/alchemy/common/setup.c6
-rw-r--r--arch/mips/bcm47xx/serial.c11
-rw-r--r--arch/mips/bcm63xx/clk.c3
-rw-r--r--arch/mips/bmips/Kconfig20
-rw-r--r--arch/mips/bmips/setup.c12
-rw-r--r--arch/mips/boot/Makefile66
-rw-r--r--arch/mips/boot/dts/brcm/Makefile36
-rw-r--r--arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts22
-rw-r--r--arch/mips/boot/dts/brcm/bcm3368.dtsi101
-rw-r--r--arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts108
-rw-r--r--arch/mips/boot/dts/brcm/bcm63268.dtsi134
-rw-r--r--arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts (renamed from arch/mips/boot/dts/brcm/bcm96358nb4ser.dts)1
-rw-r--r--arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts22
-rw-r--r--arch/mips/boot/dts/brcm/bcm6362.dtsi134
-rw-r--r--arch/mips/boot/dts/brcm/bcm7125.dtsi34
-rw-r--r--arch/mips/boot/dts/brcm/bcm7346.dtsi97
-rw-r--r--arch/mips/boot/dts/brcm/bcm7358.dtsi89
-rw-r--r--arch/mips/boot/dts/brcm/bcm7360.dtsi89
-rw-r--r--arch/mips/boot/dts/brcm/bcm7362.dtsi89
-rw-r--r--arch/mips/boot/dts/brcm/bcm7420.dtsi42
-rw-r--r--arch/mips/boot/dts/brcm/bcm7425.dtsi109
-rw-r--r--arch/mips/boot/dts/brcm/bcm7435.dtsi109
-rw-r--r--arch/mips/boot/dts/brcm/bcm97125cbmb.dts4
-rw-r--r--arch/mips/boot/dts/brcm/bcm97346dbsmb.dts17
-rw-r--r--arch/mips/boot/dts/brcm/bcm97358svmb.dts13
-rw-r--r--arch/mips/boot/dts/brcm/bcm97360svmb.dts8
-rw-r--r--arch/mips/boot/dts/brcm/bcm97362svmb.dts13
-rw-r--r--arch/mips/boot/dts/brcm/bcm97420c.dts8
-rw-r--r--arch/mips/boot/dts/brcm/bcm97425svmb.dts21
-rw-r--r--arch/mips/boot/dts/brcm/bcm97435svmb.dts21
-rw-r--r--arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch24.dtsi25
-rw-r--r--arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch4.dtsi25
-rw-r--r--arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts45
-rw-r--r--arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n-1000n.dtsi58
-rw-r--r--arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n.dts40
-rw-r--r--arch/mips/boot/dts/mti/Makefile2
-rw-r--r--arch/mips/boot/dts/mti/malta.dts99
-rw-r--r--arch/mips/boot/dts/mti/sead3.dts238
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-board.c337
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c5
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c1
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c10
-rw-r--r--arch/mips/cavium-octeon/setup.c38
-rw-r--r--arch/mips/configs/generic/32r1.config2
-rw-r--r--arch/mips/configs/generic/32r2.config3
-rw-r--r--arch/mips/configs/generic/32r6.config2
-rw-r--r--arch/mips/configs/generic/64r1.config4
-rw-r--r--arch/mips/configs/generic/64r2.config5
-rw-r--r--arch/mips/configs/generic/64r6.config4
-rw-r--r--arch/mips/configs/generic/board-sead-3.config32
-rw-r--r--arch/mips/configs/generic/eb.config1
-rw-r--r--arch/mips/configs/generic/el.config1
-rw-r--r--arch/mips/configs/generic/micro32r2.config4
-rw-r--r--arch/mips/configs/generic_defconfig96
-rw-r--r--arch/mips/configs/loongson1c_defconfig126
-rw-r--r--arch/mips/configs/malta_defconfig4
-rw-r--r--arch/mips/configs/malta_kvm_defconfig4
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig4
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig2
-rw-r--r--arch/mips/configs/maltaaprp_defconfig2
-rw-r--r--arch/mips/configs/maltasmvp_defconfig2
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig2
-rw-r--r--arch/mips/configs/maltaup_defconfig2
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig4
-rw-r--r--arch/mips/configs/pistachio_defconfig2
-rw-r--r--arch/mips/configs/sead3_defconfig121
-rw-r--r--arch/mips/configs/sead3micro_defconfig122
-rw-r--r--arch/mips/generic/Kconfig19
-rw-r--r--arch/mips/generic/Makefile15
-rw-r--r--arch/mips/generic/Platform14
-rw-r--r--arch/mips/generic/board-sead3.c376
-rw-r--r--arch/mips/generic/init.c176
-rw-r--r--arch/mips/generic/irq.c64
-rw-r--r--arch/mips/generic/proc.c29
-rw-r--r--arch/mips/generic/vmlinux.its.S31
-rw-r--r--arch/mips/include/asm/addrspace.h3
-rw-r--r--arch/mips/include/asm/barrier.h96
-rw-r--r--arch/mips/include/asm/cacheflush.h5
-rw-r--r--arch/mips/include/asm/cpu-type.h3
-rw-r--r--arch/mips/include/asm/cpu.h1
-rw-r--r--arch/mips/include/asm/device.h5
-rw-r--r--arch/mips/include/asm/dma-coherence.h16
-rw-r--r--arch/mips/include/asm/dma-mapping.h10
-rw-r--r--arch/mips/include/asm/i8259.h12
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h14
-rw-r--r--arch/mips/include/asm/mach-generic/floppy.h6
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h8
-rw-r--r--arch/mips/include/asm/mach-ip27/spaces.h1
-rw-r--r--arch/mips/include/asm/mach-loongson32/irq.h41
-rw-r--r--arch/mips/include/asm/mach-loongson32/loongson1.h5
-rw-r--r--arch/mips/include/asm/mach-loongson32/platform.h1
-rw-r--r--arch/mips/include/asm/mach-loongson32/regs-clk.h34
-rw-r--r--arch/mips/include/asm/mach-loongson32/regs-mux.h61
-rw-r--r--arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h72
-rw-r--r--arch/mips/include/asm/mach-sead3/irq.h9
-rw-r--r--arch/mips/include/asm/mach-sead3/kernel-entry-init.h21
-rw-r--r--arch/mips/include/asm/mach-sead3/war.h24
-rw-r--r--arch/mips/include/asm/machine.h63
-rw-r--r--arch/mips/include/asm/mips-boards/sead3int.h32
-rw-r--r--arch/mips/include/asm/mips-cm.h1
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-board.h30
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mdio.h506
-rw-r--r--arch/mips/include/asm/pci.h60
-rw-r--r--arch/mips/include/asm/pgalloc.h6
-rw-r--r--arch/mips/include/asm/pm-cps.h6
-rw-r--r--arch/mips/include/asm/ptrace.h2
-rw-r--r--arch/mips/include/asm/smp.h14
-rw-r--r--arch/mips/include/asm/uaccess.h18
-rw-r--r--arch/mips/include/uapi/asm/unistd.h22
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c8
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c8
-rw-r--r--arch/mips/kernel/branch.c36
-rw-r--r--arch/mips/kernel/kprobes.c67
-rw-r--r--arch/mips/kernel/linux32.c1
-rw-r--r--arch/mips/kernel/mips-cpc.c17
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c1
-rw-r--r--arch/mips/kernel/module.c1
-rw-r--r--arch/mips/kernel/pm-cps.c160
-rw-r--r--arch/mips/kernel/probes-common.h83
-rw-r--r--arch/mips/kernel/proc.c7
-rw-r--r--arch/mips/kernel/ptrace32.c5
-rw-r--r--arch/mips/kernel/scall32-o32.S3
-rw-r--r--arch/mips/kernel/scall64-64.S3
-rw-r--r--arch/mips/kernel/scall64-n32.S3
-rw-r--r--arch/mips/kernel/scall64-o32.S3
-rw-r--r--arch/mips/kernel/smp-gic.c66
-rw-r--r--arch/mips/kernel/smp-mt.c23
-rw-r--r--arch/mips/kernel/smp.c65
-rw-r--r--arch/mips/kernel/traps.c53
-rw-r--r--arch/mips/kernel/uprobes.c88
-rw-r--r--arch/mips/kvm/commpage.c1
-rw-r--r--arch/mips/kvm/dyntrans.c5
-rw-r--r--arch/mips/kvm/emulate.c1
-rw-r--r--arch/mips/kvm/interrupt.c1
-rw-r--r--arch/mips/kvm/mips.c1
-rw-r--r--arch/mips/kvm/trap_emul.c1
-rw-r--r--arch/mips/lantiq/xway/vmmc.c6
-rw-r--r--arch/mips/lantiq/xway/xrx200_phy_fw.c12
-rw-r--r--arch/mips/lib/ashldi3.c2
-rw-r--r--arch/mips/lib/ashrdi3.c2
-rw-r--r--arch/mips/lib/bswapdi.c3
-rw-r--r--arch/mips/lib/bswapsi.c3
-rw-r--r--arch/mips/lib/cmpdi2.c2
-rw-r--r--arch/mips/lib/delay.c2
-rw-r--r--arch/mips/lib/iomap-pci.c6
-rw-r--r--arch/mips/lib/iomap.c2
-rw-r--r--arch/mips/lib/lshrdi3.c2
-rw-r--r--arch/mips/lib/ucmpdi2.c2
-rw-r--r--arch/mips/loongson32/Kconfig15
-rw-r--r--arch/mips/loongson32/Makefile6
-rw-r--r--arch/mips/loongson32/Platform1
-rw-r--r--arch/mips/loongson32/common/irq.c55
-rw-r--r--arch/mips/loongson32/common/platform.c32
-rw-r--r--arch/mips/loongson32/common/setup.c4
-rw-r--r--arch/mips/loongson32/ls1c/Makefile5
-rw-r--r--arch/mips/loongson32/ls1c/board.c27
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mm/c-r3k.c2
-rw-r--r--arch/mips/mm/c-r4k.c61
-rw-r--r--arch/mips/mm/c-tx39.c3
-rw-r--r--arch/mips/mm/cache.c8
-rw-r--r--arch/mips/mm/dma-default.c18
-rw-r--r--arch/mips/mm/extable.c2
-rw-r--r--arch/mips/mm/fault.c1
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/mips/mm/highmem.c3
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/mm/ioremap.c2
-rw-r--r--arch/mips/mm/mmap.c2
-rw-r--r--arch/mips/mm/page.c1
-rw-r--r--arch/mips/mm/tlb-r4k.c9
-rw-r--r--arch/mips/mti-malta/malta-dt.c15
-rw-r--r--arch/mips/mti-malta/malta-dtshim.c187
-rw-r--r--arch/mips/mti-malta/malta-init.c17
-rw-r--r--arch/mips/mti-malta/malta-int.c111
-rw-r--r--arch/mips/mti-malta/malta-platform.c73
-rw-r--r--arch/mips/mti-malta/malta-reset.c21
-rw-r--r--arch/mips/mti-malta/malta-setup.c10
-rw-r--r--arch/mips/mti-sead3/Makefile15
-rw-r--r--arch/mips/mti-sead3/Platform7
-rw-r--r--arch/mips/mti-sead3/sead3-console.c46
-rw-r--r--arch/mips/mti-sead3/sead3-display.c77
-rw-r--r--arch/mips/mti-sead3/sead3-init.c152
-rw-r--r--arch/mips/mti-sead3/sead3-int.c42
-rw-r--r--arch/mips/mti-sead3/sead3-lcd.c43
-rw-r--r--arch/mips/mti-sead3/sead3-platform.c223
-rw-r--r--arch/mips/mti-sead3/sead3-reset.c40
-rw-r--r--arch/mips/mti-sead3/sead3-setup.c108
-rw-r--r--arch/mips/mti-sead3/sead3-time.c99
-rw-r--r--arch/mips/pci/Makefile2
-rw-r--r--arch/mips/pci/pci-alchemy.c3
-rw-r--r--arch/mips/pci/pci-ar71xx.c2
-rw-r--r--arch/mips/pci/pci-ar724x.c2
-rw-r--r--arch/mips/pci/pci-generic.c52
-rw-r--r--arch/mips/pci/pci-lantiq.c2
-rw-r--r--arch/mips/pci/pci-legacy.c302
-rw-r--r--arch/mips/pci/pci-mt7620.c3
-rw-r--r--arch/mips/pci/pci-octeon.c2
-rw-r--r--arch/mips/pci/pci-rt2880.c2
-rw-r--r--arch/mips/pci/pci-rt3883.c2
-rw-r--r--arch/mips/pci/pci.c297
-rw-r--r--arch/mips/pci/pcie-octeon.c2
-rw-r--r--arch/mips/pnx833x/common/platform.c8
-rw-r--r--arch/mips/ralink/timer.c28
-rw-r--r--arch/mips/txx9/Kconfig2
-rw-r--r--arch/mips/txx9/generic/pci.c6
-rw-r--r--arch/mips/txx9/generic/setup.c70
-rw-r--r--arch/mips/txx9/generic/setup_tx3927.c1
-rw-r--r--arch/mips/txx9/generic/setup_tx4927.c1
-rw-r--r--arch/mips/txx9/generic/setup_tx4938.c1
-rw-r--r--arch/mips/txx9/jmr3927/setup.c11
-rw-r--r--arch/mips/txx9/rbtx4927/setup.c32
-rw-r--r--arch/mips/txx9/rbtx4938/setup.c1
-rw-r--r--arch/mips/vdso/Makefile4
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/boot/main.c18
-rw-r--r--arch/powerpc/configs/dpaa.config1
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/checksum.h12
-rw-r--r--arch/powerpc/include/asm/cpuidle.h2
-rw-r--r--arch/powerpc/include/asm/cputable.h1
-rw-r--r--arch/powerpc/include/asm/exception-64s.h16
-rw-r--r--arch/powerpc/include/asm/hw_irq.h6
-rw-r--r--arch/powerpc/include/asm/libata-portmap.h4
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_8xx.h4
-rw-r--r--arch/powerpc/include/asm/tlb.h12
-rw-r--r--arch/powerpc/include/asm/unistd.h4
-rw-r--r--arch/powerpc/kernel/Makefile9
-rw-r--r--arch/powerpc/kernel/cputable.c1
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S3
-rw-r--r--arch/powerpc/kernel/epapr_hcalls.S2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S52
-rw-r--r--arch/powerpc/kernel/fpu.S3
-rw-r--r--arch/powerpc/kernel/head_32.S5
-rw-r--r--arch/powerpc/kernel/head_40x.S2
-rw-r--r--arch/powerpc/kernel/head_44x.S2
-rw-r--r--arch/powerpc/kernel/head_64.S2
-rw-r--r--arch/powerpc/kernel/head_8xx.S138
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c2
-rw-r--r--arch/powerpc/kernel/idle_book3s.S35
-rw-r--r--arch/powerpc/kernel/misc.S2
-rw-r--r--arch/powerpc/kernel/misc_32.S10
-rw-r--r--arch/powerpc/kernel/misc_64.S4
-rw-r--r--arch/powerpc/kernel/pci-common.c1
-rw-r--r--arch/powerpc/kernel/pci_32.c2
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c37
-rw-r--r--arch/powerpc/kernel/ppc_ksyms_32.c60
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/ptrace32.c5
-rw-r--r--arch/powerpc/kernel/setup-common.c27
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/time.c1
-rw-r--r--arch/powerpc/kernel/traps.c45
-rw-r--r--arch/powerpc/kernel/vector.S3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c1
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/checksum_32.S3
-rw-r--r--arch/powerpc/lib/checksum_64.S3
-rw-r--r--arch/powerpc/lib/copy_32.S5
-rw-r--r--arch/powerpc/lib/copypage_64.S2
-rw-r--r--arch/powerpc/lib/copyuser_64.S4
-rw-r--r--arch/powerpc/lib/hweight_64.S5
-rw-r--r--arch/powerpc/lib/mem_64.S3
-rw-r--r--arch/powerpc/lib/memcmp_64.S2
-rw-r--r--arch/powerpc/lib/memcpy_64.S2
-rw-r--r--arch/powerpc/lib/ppc_ksyms.c29
-rw-r--r--arch/powerpc/lib/string.S6
-rw-r--r--arch/powerpc/lib/string_64.S2
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/hash_low_32.S3
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/numa.c46
-rw-r--r--arch/powerpc/mm/tlb-radix.c8
-rw-r--r--arch/powerpc/platforms/82xx/Kconfig4
-rw-r--r--arch/powerpc/platforms/82xx/ep8248e.c4
-rw-r--r--arch/powerpc/platforms/83xx/asp834x.c4
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c5
-rw-r--r--arch/powerpc/platforms/83xx/misc.c8
-rw-r--r--arch/powerpc/platforms/83xx/mpc830x_rdb.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_mds.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_rdk.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_mds.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h1
-rw-r--r--arch/powerpc/platforms/83xx/sbc834x.c5
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/85xx/bsc913x_qds.c1
-rw-r--r--arch/powerpc/platforms/85xx/bsc913x_rdb.c1
-rw-r--r--arch/powerpc/platforms/85xx/c293pcie.c1
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c1
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c25
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c12
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c10
-rw-r--r--arch/powerpc/platforms/85xx/mvme2500.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rdb.c1
-rw-r--r--arch/powerpc/platforms/85xx/ppa8548.c1
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c1
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c1
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c8
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c1
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c1
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c1
-rw-r--r--arch/powerpc/platforms/85xx/twr_p102x.c1
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c3
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c1
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c1
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c1
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c1
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c1
-rw-r--r--arch/powerpc/platforms/86xx/mvme7100.c1
-rw-r--r--arch/powerpc/platforms/86xx/sbc8641d.c1
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c4
-rwxr-xr-xarch/powerpc/relocs_check.sh4
-rw-r--r--arch/powerpc/sysdev/cpm1.c2
-rw-r--r--arch/powerpc/sysdev/cpm2.c4
-rw-r--r--arch/powerpc/sysdev/cpm_common.c15
-rw-r--r--arch/powerpc/sysdev/dcr-low.S3
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c12
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c33
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h2
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/ftrace.h4
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/unistd.h3
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/dis.c4
-rw-r--r--arch/s390/kernel/dumpstack.c63
-rw-r--r--arch/s390/kernel/entry.S6
-rw-r--r--arch/s390/kernel/mcount.S3
-rw-r--r--arch/s390/kernel/perf_event.c2
-rw-r--r--arch/s390/kernel/s390_ksyms.c15
-rw-r--r--arch/s390/kernel/stacktrace.c4
-rw-r--r--arch/s390/kvm/intercept.c9
-rw-r--r--arch/s390/lib/mem.S3
-rw-r--r--arch/s390/mm/gup.c3
-rw-r--r--arch/s390/mm/hugetlbpage.c1
-rw-r--r--arch/s390/mm/init.c38
-rw-r--r--arch/s390/oprofile/init.c2
-rw-r--r--arch/score/kernel/ptrace.c10
-rw-r--r--arch/score/kernel/traps.c1
-rw-r--r--arch/sh/Makefile2
-rw-r--r--arch/sh/boards/Kconfig10
-rw-r--r--arch/sh/configs/j2_defconfig2
-rw-r--r--arch/sh/mm/gup.c3
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/string.h34
-rw-r--r--arch/sparc/include/asm/string_32.h56
-rw-r--r--arch/sparc/include/asm/string_64.h44
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/entry.S3
-rw-r--r--arch/sparc/kernel/head_32.S3
-rw-r--r--arch/sparc/kernel/head_64.S7
-rw-r--r--arch/sparc/kernel/helpers.S2
-rw-r--r--arch/sparc/kernel/hvcalls.S5
-rw-r--r--arch/sparc/kernel/ptrace_64.c24
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c12
-rw-r--r--arch/sparc/kernel/sparc_ksyms_32.c31
-rw-r--r--arch/sparc/kernel/sparc_ksyms_64.c53
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/U1memcpy.S2
-rw-r--r--arch/sparc/lib/VISsave.S2
-rw-r--r--arch/sparc/lib/ashldi3.S2
-rw-r--r--arch/sparc/lib/ashrdi3.S2
-rw-r--r--arch/sparc/lib/atomic_64.S16
-rw-r--r--arch/sparc/lib/bitops.S7
-rw-r--r--arch/sparc/lib/blockops.S3
-rw-r--r--arch/sparc/lib/bzero.S4
-rw-r--r--arch/sparc/lib/checksum_32.S3
-rw-r--r--arch/sparc/lib/checksum_64.S2
-rw-r--r--arch/sparc/lib/clear_page.S3
-rw-r--r--arch/sparc/lib/copy_in_user.S2
-rw-r--r--arch/sparc/lib/copy_page.S2
-rw-r--r--arch/sparc/lib/copy_user.S2
-rw-r--r--arch/sparc/lib/csum_copy.S3
-rw-r--r--arch/sparc/lib/divdi3.S2
-rw-r--r--arch/sparc/lib/ffs.S3
-rw-r--r--arch/sparc/lib/hweight.S5
-rw-r--r--arch/sparc/lib/ipcsum.S2
-rw-r--r--arch/sparc/lib/ksyms.c174
-rw-r--r--arch/sparc/lib/locks.S5
-rw-r--r--arch/sparc/lib/lshrdi3.S2
-rw-r--r--arch/sparc/lib/mcount.S2
-rw-r--r--arch/sparc/lib/memcmp.S2
-rw-r--r--arch/sparc/lib/memcpy.S86
-rw-r--r--arch/sparc/lib/memmove.S2
-rw-r--r--arch/sparc/lib/memscan_32.S4
-rw-r--r--arch/sparc/lib/memscan_64.S4
-rw-r--r--arch/sparc/lib/memset.S3
-rw-r--r--arch/sparc/lib/muldi3.S2
-rw-r--r--arch/sparc/lib/strlen.S2
-rw-r--r--arch/sparc/lib/strncmp_32.S2
-rw-r--r--arch/sparc/lib/strncmp_64.S2
-rw-r--r--arch/sparc/lib/xor.S9
-rw-r--r--arch/sparc/mm/gup.c3
-rw-r--r--arch/x86/entry/Makefile4
-rw-r--r--arch/x86/entry/entry_32.S2
-rw-r--r--arch/x86/entry/entry_64.S2
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/entry/thunk_32.S3
-rw-r--r--arch/x86/entry/thunk_64.S3
-rw-r--r--arch/x86/events/intel/core.c13
-rw-r--r--arch/x86/events/intel/cstate.c30
-rw-r--r--arch/x86/events/intel/lbr.c4
-rw-r--r--arch/x86/events/intel/rapl.c1
-rw-r--r--arch/x86/events/intel/uncore.c1
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/export.h4
-rw-r--r--arch/x86/include/asm/intel-family.h1
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/percpu.h5
-rw-r--r--arch/x86/include/asm/rwsem.h6
-rw-r--r--arch/x86/include/asm/thread_info.h9
-rw-r--r--arch/x86/kernel/Makefile4
-rw-r--r--arch/x86/kernel/acpi/boot.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/cpu/scattered.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c5
-rw-r--r--arch/x86/kernel/e820.c2
-rw-r--r--arch/x86/kernel/fpu/xstate.c2
-rw-r--r--arch/x86/kernel/head_32.S2
-rw-r--r--arch/x86/kernel/head_64.S3
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c47
-rw-r--r--arch/x86/kernel/kprobes/core.c11
-rw-r--r--arch/x86/kernel/mcount_64.S3
-rw-r--r--arch/x86/kernel/quirks.c3
-rw-r--r--arch/x86/kernel/setup.c7
-rw-r--r--arch/x86/kernel/signal_compat.c3
-rw-r--r--arch/x86/kernel/smp.c2
-rw-r--r--arch/x86/kernel/smpboot.c16
-rw-r--r--arch/x86/kernel/step.c3
-rw-r--r--arch/x86/kernel/unwind_guess.c9
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c85
-rw-r--r--arch/x86/kvm/ioapic.c2
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/lib/checksum_32.S3
-rw-r--r--arch/x86/lib/clear_page_64.S2
-rw-r--r--arch/x86/lib/cmpxchg8b_emu.S2
-rw-r--r--arch/x86/lib/copy_page_64.S2
-rw-r--r--arch/x86/lib/copy_user_64.S8
-rw-r--r--arch/x86/lib/csum-partial_64.c1
-rw-r--r--arch/x86/lib/getuser.S5
-rw-r--r--arch/x86/lib/hweight.S3
-rw-r--r--arch/x86/lib/memcpy_64.S4
-rw-r--r--arch/x86/lib/memmove_64.S3
-rw-r--r--arch/x86/lib/memset_64.S3
-rw-r--r--arch/x86/lib/putuser.S5
-rw-r--r--arch/x86/lib/strstr_32.c3
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/kaslr.c6
-rw-r--r--arch/x86/mm/mpx.c5
-rw-r--r--arch/x86/mm/pat.c14
-rw-r--r--arch/x86/platform/uv/bios_uv.c10
-rw-r--r--arch/x86/um/Makefile2
-rw-r--r--arch/x86/um/checksum_32.S2
-rw-r--r--arch/x86/um/ksyms.c13
-rw-r--r--arch/x86/um/ptrace_32.c3
-rw-r--r--arch/x86/um/ptrace_64.c3
-rw-r--r--arch/x86/xen/enlighten.c2
-rw-r--r--block/badblocks.c29
-rw-r--r--block/blk-cgroup.c4
-rw-r--r--block/blk-flush.c28
-rw-r--r--block/blk-mq.c6
-rw-r--r--block/blk-softirq.c2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/acpi_pad.c5
-rw-r--r--drivers/acpi/acpica/dsinit.c11
-rw-r--r--drivers/acpi/acpica/dsmethod.c50
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c3
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/fan.c12
-rw-r--r--drivers/acpi/osl.c13
-rw-r--r--drivers/acpi/pci_link.c38
-rw-r--r--drivers/acpi/property.c117
-rw-r--r--drivers/android/binder.c35
-rw-r--r--drivers/ata/ahci.c156
-rw-r--r--drivers/ata/ahci.h24
-rw-r--r--drivers/ata/ahci_qoriq.c20
-rw-r--r--drivers/ata/ahci_st.c4
-rw-r--r--drivers/ata/libahci.c9
-rw-r--r--drivers/ata/libata-scsi.c288
-rw-r--r--drivers/ata/pata_at91.c4
-rw-r--r--drivers/ata/pata_octeon_cf.c3
-rw-r--r--drivers/ata/sata_mv.c6
-rw-r--r--drivers/auxdisplay/Kconfig9
-rw-r--r--drivers/auxdisplay/Makefile1
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c443
-rw-r--r--drivers/base/Kconfig6
-rw-r--r--drivers/block/DAC960.c4
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/rbd.c50
-rw-r--r--drivers/bluetooth/btwilink.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c8
-rw-r--r--drivers/bus/Kconfig1
-rw-r--r--drivers/char/hw_random/core.c6
-rw-r--r--drivers/char/ipmi/Kconfig8
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/bt-bmc.c505
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c7
-rw-r--r--drivers/char/random.c4
-rw-r--r--drivers/clk/at91/clk-programmable.c2
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c11
-rw-r--r--drivers/clk/clk-max77686.c1
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c4
-rw-r--r--drivers/clk/mediatek/Kconfig2
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c11
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c1
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c20
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mio.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mux.c2
-rw-r--r--drivers/clk/uniphier/clk-uniphier.h2
-rw-r--r--drivers/clocksource/Kconfig10
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/jcore-pit.c249
-rw-r--r--drivers/clocksource/timer-sun5i.c16
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c19
-rw-r--r--drivers/cpufreq/intel_pstate.c81
-rw-r--r--drivers/cpuidle/Kconfig.mips2
-rw-r--r--drivers/cpuidle/cpuidle-cps.c2
-rw-r--r--drivers/dax/Kconfig2
-rw-r--r--drivers/dax/pmem.c2
-rw-r--r--drivers/devfreq/devfreq.c8
-rw-r--r--drivers/devfreq/event/Kconfig1
-rw-r--r--drivers/devfreq/event/exynos-nocp.c3
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/firewire/nosy.c13
-rw-r--r--drivers/firmware/efi/libstub/Makefile5
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-ath79.c1
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-mxs.c8
-rw-r--r--drivers/gpio/gpio-pca953x.c16
-rw-r--r--drivers/gpio/gpio-stmpe.c2
-rw-r--r--drivers/gpio/gpio-ts4800.c1
-rw-r--r--drivers/gpio/gpiolib-acpi.c7
-rw-r--r--drivers/gpio/gpiolib.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c15
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c53
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c18
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c7
-rw-r--r--drivers/gpu/drm/drm_info.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c56
-rw-r--r--drivers/hid/hid-dr.c83
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-led.c23
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/hv_util.c10
-rw-r--r--drivers/hwmon/adm9240.c6
-rw-r--r--drivers/hwmon/max31790.c4
-rw-r--r--drivers/i2c/busses/Kconfig12
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c17
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c11
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c1
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c2
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c1
-rw-r--r--drivers/i2c/busses/i2c-xlr.c1
-rw-r--r--drivers/i2c/i2c-core.c21
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c7
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c16
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/umem.c6
-rw-r--r--drivers/infiniband/core/umem_odp.c7
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c23
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h18
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c146
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c76
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c293
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c36
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c22
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c67
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/qedr/Kconfig8
-rw-r--r--drivers/infiniband/hw/qedr/Makefile3
-rw-r--r--drivers/infiniband/hw/qedr/main.c914
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h495
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c622
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.h61
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi.h56
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi_rdma.h748
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c3547
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h101
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c54
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c6
-rw-r--r--drivers/input/mouse/alps.c87
-rw-r--r--drivers/input/mouse/alps.h2
-rw-r--r--drivers/input/mouse/elantech.c27
-rw-r--r--drivers/input/rmi4/rmi_i2c.c38
-rw-r--r--drivers/input/rmi4/rmi_spi.c22
-rw-r--r--drivers/input/serio/i8042-io.h2
-rw-r--r--drivers/input/serio/i8042-ip22io.h2
-rw-r--r--drivers/input/serio/i8042-ppcio.h2
-rw-r--r--drivers/input/serio/i8042-sparcio.h2
-rw-r--r--drivers/input/serio/i8042-unicore32io.h2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h96
-rw-r--r--drivers/input/serio/i8042.c55
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c38
-rw-r--r--drivers/ipack/ipack.c2
-rw-r--r--drivers/irqchip/Kconfig4
-rw-r--r--drivers/irqchip/irq-eznps.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c10
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-i8259.c30
-rw-r--r--drivers/irqchip/irq-jcore-aic.c20
-rw-r--r--drivers/md/dm-raid.c15
-rw-r--r--drivers/md/dm-raid1.c22
-rw-r--r--drivers/md/dm-rq.c7
-rw-r--r--drivers/md/dm-table.c24
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c5
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/v4l2-core/Kconfig2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c6
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c6
-rw-r--r--drivers/misc/cxl/api.c11
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h24
-rw-r--r--drivers/misc/cxl/file.c15
-rw-r--r--drivers/misc/cxl/guest.c3
-rw-r--r--drivers/misc/cxl/main.c42
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c27
-rw-r--r--drivers/misc/genwqe/card_utils.c12
-rw-r--r--drivers/misc/mei/hw-txe.c6
-rw-r--r--drivers/misc/mic/scif/scif_rma.c3
-rw-r--r--drivers/misc/sgi-gru/grufault.c2
-rw-r--r--drivers/misc/sgi-gru/grumain.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c8
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/mmc/card/block.c3
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/mmc.c12
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c23
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c26
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c54
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c2
-rw-r--r--drivers/mmc/host/sdhci.c42
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/ubi/eba.c1
-rw-r--r--drivers/mtd/ubi/fastmap.c12
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c16
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c32
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c107
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c45
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig12
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c216
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c99
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c15
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c13
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c4
-rw-r--r--drivers/net/geneve.c47
-rw-r--r--drivers/net/hyperv/netvsc_drv.c25
-rw-r--r--drivers/net/macsec.c26
-rw-r--r--drivers/net/phy/at803x.c65
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/usb/asix_common.c8
-rw-r--r--drivers/net/usb/kalmia.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c17
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/net/vxlan.c82
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/slic_ds26522.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c75
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c8
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c4
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/namespace_devs.c14
-rw-r--r--drivers/nvdimm/pmem.c8
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/pci.c77
-rw-r--r--drivers/nvme/host/scsi.c4
-rw-r--r--drivers/nvme/target/admin-cmd.c8
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/discovery.c4
-rw-r--r--drivers/of/platform.c1
-rw-r--r--drivers/pci/host/pci-layerscape.c2
-rw-r--r--drivers/pci/host/pcie-designware-plat.c2
-rw-r--r--drivers/pci/msi.c2
-rw-r--r--drivers/perf/xgene_pmu.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c100
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c12
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c25
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c3
-rw-r--r--drivers/reset/reset-uniphier.c16
-rw-r--r--drivers/rtc/Kconfig38
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-ac100.c5
-rw-r--r--drivers/rtc/rtc-asm9260.c20
-rw-r--r--drivers/rtc/rtc-at32ap700x.c2
-rw-r--r--drivers/rtc/rtc-bq32k.c16
-rw-r--r--drivers/rtc/rtc-cmos.c93
-rw-r--r--drivers/rtc/rtc-coh901331.c2
-rw-r--r--drivers/rtc/rtc-davinci.c2
-rw-r--r--drivers/rtc/rtc-digicolor.c2
-rw-r--r--drivers/rtc/rtc-ds1302.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c54
-rw-r--r--drivers/rtc/rtc-ds1347.c96
-rw-r--r--drivers/rtc/rtc-gemini.c2
-rw-r--r--drivers/rtc/rtc-isl12057.c643
-rw-r--r--drivers/rtc/rtc-jz4740.c2
-rw-r--r--drivers/rtc/rtc-mcp795.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/rtc/rtc-nuc900.c2
-rw-r--r--drivers/rtc/rtc-omap.c170
-rw-r--r--drivers/rtc/rtc-palmas.c2
-rw-r--r--drivers/rtc/rtc-pcf2123.c5
-rw-r--r--drivers/rtc/rtc-pcf50633.c2
-rw-r--r--drivers/rtc/rtc-pic32.c1
-rw-r--r--drivers/rtc/rtc-rv8803.c50
-rw-r--r--drivers/rtc/rtc-rx6110.c3
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-spear.c2
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/rtc/rtc-sysfs.c4
-rw-r--r--drivers/rtc/rtc-tegra.c2
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c4
-rw-r--r--drivers/s390/cio/chp.c6
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/scsi/NCR5380.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.c49
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c5
-rw-r--r--drivers/scsi/g_NCR5380.c699
-rw-r--r--drivers/scsi/g_NCR5380.h8
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/scsi_dh.c6
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/ufs/Kconfig2
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.c10
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/qbman/Kconfig67
-rw-r--r--drivers/soc/fsl/qbman/Makefile12
-rw-r--r--drivers/soc/fsl/qbman/bman.c797
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c263
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c219
-rw-r--r--drivers/soc/fsl/qbman/bman_priv.h80
-rw-r--r--drivers/soc/fsl/qbman/bman_test.c53
-rw-r--r--drivers/soc/fsl/qbman/bman_test.h35
-rw-r--r--drivers/soc/fsl/qbman/bman_test_api.c151
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h103
-rw-r--r--drivers/soc/fsl/qbman/qman.c2881
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c808
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c355
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h371
-rw-r--r--drivers/soc/fsl/qbman/qman_test.c62
-rw-r--r--drivers/soc/fsl/qbman/qman_test.h36
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c252
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c617
-rw-r--r--drivers/soc/fsl/qe/gpio.c3
-rw-r--r--drivers/soc/fsl/qe/qe.c10
-rw-r--r--drivers/soc/fsl/qe/qe_common.c8
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c4
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/android/ion/ion_of.c2
-rw-r--r--drivers/staging/greybus/arche-platform.c1
-rw-r--r--drivers/staging/greybus/es2.c3
-rw-r--r--drivers/staging/greybus/gpio.c6
-rw-r--r--drivers/staging/greybus/module.c2
-rw-r--r--drivers/staging/greybus/uart.c2
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c34
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c3
-rw-r--r--drivers/staging/wilc1000/host_interface.c1
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/target_core_transport.c39
-rw-r--r--drivers/target/target_core_user.c50
-rw-r--r--drivers/target/target_core_xcopy.c34
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c4
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c42
-rw-r--r--drivers/thermal/intel_pch_thermal.c60
-rw-r--r--drivers/thermal/intel_powerclamp.c14
-rw-r--r--drivers/tty/serial/8250/8250_lpss.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c3
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c4
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/atmel_serial.c26
-rw-r--r--drivers/tty/serial/fsl_lpuart.c3
-rw-r--r--drivers/tty/serial/pch_uart.c1
-rw-r--r--drivers/tty/serial/sc16is7xx.c8
-rw-r--r--drivers/tty/serial/serial_core.c8
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/vt/vt.c7
-rw-r--r--drivers/usb/chipidea/host.c2
-rw-r--r--drivers/usb/dwc2/core.c11
-rw-r--r--drivers/usb/dwc2/core.h7
-rw-r--r--drivers/usb/dwc2/gadget.c53
-rw-r--r--drivers/usb/dwc3/gadget.c26
-rw-r--r--drivers/usb/gadget/function/f_fs.c107
-rw-r--r--drivers/usb/gadget/function/u_ether.c5
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c5
-rw-r--r--drivers/usb/host/ehci-platform.c2
-rw-r--r--drivers/usb/host/ehci-sead3.c185
-rw-r--r--drivers/usb/host/ohci-at91.c9
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c41
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/omap2430.c7
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c8
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h5
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/usb/wusbcore/crypto.c61
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c42
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/vme/vme.c4
-rw-r--r--drivers/watchdog/wdat_wdt.c4
-rw-r--r--drivers/xen/manage.c45
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c4
-rw-r--r--fs/afs/cmservice.c6
-rw-r--r--fs/afs/fsclient.c4
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/rxrpc.c3
-rw-r--r--fs/befs/befs.h19
-rw-r--r--fs/befs/btree.c60
-rw-r--r--fs/befs/datastream.c253
-rw-r--r--fs/befs/debug.c1
-rw-r--r--fs/befs/io.c26
-rw-r--r--fs/befs/io.h2
-rw-r--r--fs/befs/linuxvfs.c130
-rw-r--r--fs/befs/super.c36
-rw-r--r--fs/btrfs/compression.c4
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/disk-io.c33
-rw-r--r--fs/btrfs/extent_io.c64
-rw-r--r--fs/btrfs/extent_io.h22
-rw-r--r--fs/btrfs/free-space-tree.c19
-rw-r--r--fs/btrfs/send.c58
-rw-r--r--fs/btrfs/tests/extent-io-tests.c87
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c189
-rw-r--r--fs/btrfs/tree-log.c20
-rw-r--r--fs/ceph/file.c3
-rw-r--r--fs/ceph/inode.c3
-rw-r--r--fs/ceph/super.c2
-rw-r--r--fs/ceph/xattr.c4
-rw-r--r--fs/cifs/cifs_debug.c1
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifs_ioctl.h8
-rw-r--r--fs/cifs/cifsacl.c123
-rw-r--r--fs/cifs/cifsfs.c11
-rw-r--r--fs/cifs/cifsglob.h50
-rw-r--r--fs/cifs/cifsproto.h2
-rw-r--r--fs/cifs/cifssmb.c4
-rw-r--r--fs/cifs/connect.c43
-rw-r--r--fs/cifs/file.c105
-rw-r--r--fs/cifs/ioctl.c16
-rw-r--r--fs/cifs/misc.c15
-rw-r--r--fs/cifs/readdir.c6
-rw-r--r--fs/cifs/smb2inode.c6
-rw-r--r--fs/cifs/smb2misc.c16
-rw-r--r--fs/cifs/smb2ops.c62
-rw-r--r--fs/cifs/smb2pdu.c587
-rw-r--r--fs/cifs/smb2pdu.h2
-rw-r--r--fs/cifs/xattr.c62
-rw-r--r--fs/crypto/crypto.c15
-rw-r--r--fs/crypto/policy.c4
-rw-r--r--fs/exec.c9
-rw-r--r--fs/exofs/dir.c2
-rw-r--r--fs/ext2/inode.c7
-rw-r--r--fs/ext4/block_validity.c4
-rw-r--r--fs/ext4/mballoc.h17
-rw-r--r--fs/ext4/namei.c18
-rw-r--r--fs/ext4/super.c21
-rw-r--r--fs/ext4/sysfs.c4
-rw-r--r--fs/ext4/xattr.c20
-rw-r--r--fs/f2fs/gc.c10
-rw-r--r--fs/iomap.c5
-rw-r--r--fs/isofs/inode.c8
-rw-r--r--fs/jbd2/transaction.c3
-rw-r--r--fs/kernfs/dir.c84
-rw-r--r--fs/kernfs/file.c1
-rw-r--r--fs/locks.c6
-rw-r--r--fs/namei.c25
-rw-r--r--fs/namespace.c1
-rw-r--r--fs/nfs/blocklayout/blocklayout.c3
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/orangefs/dcache.c5
-rw-r--r--fs/orangefs/file.c14
-rw-r--r--fs/orangefs/namei.c8
-rw-r--r--fs/orangefs/orangefs-kernel.h7
-rw-r--r--fs/overlayfs/copy_up.c67
-rw-r--r--fs/overlayfs/dir.c5
-rw-r--r--fs/overlayfs/inode.c44
-rw-r--r--fs/overlayfs/super.c44
-rw-r--r--fs/proc/array.c9
-rw-r--r--fs/proc/base.c14
-rw-r--r--fs/proc/task_mmu.c29
-rw-r--r--fs/proc/task_nommu.c28
-rw-r--r--fs/read_write.c29
-rw-r--r--fs/super.c43
-rw-r--r--fs/sysfs/dir.c6
-rw-r--r--fs/ubifs/dir.c28
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c418
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h8
-rw-r--r--fs/xfs/libxfs/xfs_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c3
-rw-r--r--fs/xfs/libxfs/xfs_format.h1
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c13
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.h2
-rw-r--r--fs/xfs/xfs_file.c232
-rw-r--r--fs/xfs/xfs_icache.c8
-rw-r--r--fs/xfs/xfs_iomap.c57
-rw-r--r--fs/xfs/xfs_mount.c1
-rw-r--r--fs/xfs/xfs_reflink.c499
-rw-r--r--fs/xfs/xfs_reflink.h11
-rw-r--r--fs/xfs/xfs_sysfs.c4
-rw-r--r--fs/xfs/xfs_trace.h4
-rw-r--r--include/acpi/pcc.h2
-rw-r--r--include/asm-generic/export.h94
-rw-r--r--include/asm-generic/libata-portmap.h7
-rw-r--r--include/asm-generic/percpu.h53
-rw-r--r--include/asm-generic/vmlinux.lds.h57
-rw-r--r--include/linux/acpi.h23
-rw-r--r--include/linux/ata.h69
-rw-r--r--include/linux/blk-cgroup.h11
-rw-r--r--include/linux/cgroup.h9
-rw-r--r--include/linux/clk-provider.h2
-rw-r--r--include/linux/compiler-gcc.h7
-rw-r--r--include/linux/compiler.h27
-rw-r--r--include/linux/cpufreq.h104
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/export.h30
-rw-r--r--include/linux/fdtable.h2
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/init.h43
-rw-r--r--include/linux/io.h22
-rw-r--r--include/linux/iomap.h17
-rw-r--r--include/linux/ipv6.h17
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/kconfig.h5
-rw-r--r--include/linux/kernfs.h28
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mlx5/driver.h16
-rw-r--r--include/linux/mm.h25
-rw-r--r--include/linux/mmzone.h30
-rw-r--r--include/linux/netdevice.h39
-rw-r--r--include/linux/nvme.h49
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/pkeys.h1
-rw-r--r--include/linux/qed/qed_if.h1
-rw-r--r--include/linux/qed/qede_roce.h2
-rw-r--r--include/linux/random.h15
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/thread_info.h11
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/cfg80211.h107
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/net/mac80211.h21
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h13
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/vxlan.h4
-rw-r--r--include/soc/fsl/bman.h129
-rw-r--r--include/soc/fsl/qman.h1074
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/cgroup.h163
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/bt-bmc.h18
-rw-r--r--include/uapi/linux/btrfs.h12
-rw-r--r--include/uapi/linux/ethtool.h3
-rw-r--r--include/uapi/linux/pci_regs.h3
-rw-r--r--include/uapi/linux/rtnetlink.h2
-rw-r--r--include/uapi/rdma/qedr-abi.h106
-rw-r--r--init/Makefile2
-rw-r--r--init/main.c1
-rw-r--r--ipc/msgutil.c4
-rw-r--r--kernel/cgroup.c75
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/cpuset.c13
-rw-r--r--kernel/events/core.c23
-rw-r--r--kernel/events/uprobes.c6
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/irq/manage.c1
-rw-r--r--kernel/kcov.c9
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/printk/printk.c4
-rw-r--r--kernel/ptrace.c16
-rw-r--r--kernel/rcu/tiny.c2
-rw-r--r--kernel/rcu/tree.c2
-rw-r--r--kernel/sched/core.c16
-rw-r--r--kernel/sched/debug.c3
-rw-r--r--kernel/sched/fair.c25
-rw-r--r--kernel/sched/wait.c10
-rw-r--r--kernel/softirq.c6
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--kernel/time/timer.c76
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/genalloc.c3
-rw-r--r--lib/iov_iter.c22
-rw-r--r--lib/irq_poll.c2
-rw-r--r--lib/percpu-refcount.c169
-rw-r--r--lib/random32.c2
-rw-r--r--lib/stackdepot.c2
-rw-r--r--lib/test_bpf.c2
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/frame_vector.c9
-rw-r--r--mm/gup.c67
-rw-r--r--mm/kasan/kasan.c22
-rw-r--r--mm/kmemleak.c7
-rw-r--r--mm/list_lru.c2
-rw-r--r--mm/memcontrol.c9
-rw-r--r--mm/memory.c16
-rw-r--r--mm/memory_hotplug.c29
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/mprotect.c1
-rw-r--r--mm/nommu.c40
-rw-r--r--mm/page_alloc.c136
-rw-r--r--mm/percpu.c38
-rw-r--r--mm/process_vm_access.c7
-rw-r--r--mm/slab.c45
-rw-r--r--mm/slab.h1
-rw-r--r--mm/util.c12
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/batman-adv/hard-interface.c1
-rw-r--r--net/batman-adv/log.h2
-rw-r--r--net/batman-adv/originator.c2
-rw-r--r--net/bluetooth/hci_request.c49
-rw-r--r--net/bluetooth/hci_request.h2
-rw-r--r--net/bluetooth/mgmt.c26
-rw-r--r--net/bridge/br_multicast.c23
-rw-r--r--net/ceph/pagevec.c2
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/flow_dissector.c12
-rw-r--r--net/core/net_namespace.c35
-rw-r--r--net/core/pktgen.c17
-rw-r--r--net/core/sock_reuseport.c1
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/hsr/hsr_forward.c4
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/fou.c4
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_sockglue.c11
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c8
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/addrconf.c101
-rw-r--r--net/ipv6/inet6_hashtables.c13
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/mcast.c17
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/reassembly.c3
-rw-r--r--net/ipv6/route.c74
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/mac80211/aes_ccm.c46
-rw-r--r--net/mac80211/aes_ccm.h8
-rw-r--r--net/mac80211/aes_gcm.c43
-rw-r--r--net/mac80211/aes_gcm.h6
-rw-r--r--net/mac80211/aes_gmac.c26
-rw-r--r--net/mac80211/aes_gmac.h4
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/rx.c51
-rw-r--r--net/mac80211/wpa.c22
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-aen.c18
-rw-r--r--net/ncsi/ncsi-manage.c126
-rw-r--r--net/netfilter/core.c13
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_internals.h2
-rw-r--r--net/netfilter/nf_queue.c48
-rw-r--r--net/netfilter/nf_tables_api.c2
-rw-r--r--net/netfilter/nft_dynset.c6
-rw-r--r--net/netfilter/nft_exthdr.c3
-rw-r--r--net/netfilter/nft_hash.c1
-rw-r--r--net/netfilter/nft_range.c26
-rw-r--r--net/netfilter/x_tables.c2
-rw-r--r--net/netfilter/xt_NFLOG.c1
-rw-r--r--net/netfilter/xt_hashlimit.c4
-rw-r--r--net/netfilter/xt_ipcomp.c2
-rw-r--r--net/packet/af_packet.c9
-rw-r--r--net/rds/Makefile2
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rxrpc/call_object.c2
-rw-r--r--net/rxrpc/peer_object.c4
-rw-r--r--net/sched/act_api.c3
-rw-r--r--net/sched/act_mirred.c5
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/switchdev/switchdev.c9
-rw-r--r--net/tipc/bcast.c14
-rw-r--r--net/tipc/bcast.h3
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/msg.h17
-rw-r--r--net/tipc/name_distr.c1
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/wireless/sysfs.c5
-rw-r--r--net/wireless/util.c34
-rw-r--r--samples/Kconfig6
-rw-r--r--samples/Makefile2
-rw-r--r--samples/auxdisplay/.gitignore (renamed from Documentation/auxdisplay/.gitignore)0
-rw-r--r--samples/auxdisplay/Makefile9
-rw-r--r--samples/auxdisplay/cfag12864b-example.c (renamed from Documentation/auxdisplay/cfag12864b-example.c)0
-rw-r--r--samples/blackfin/Makefile1
-rw-r--r--samples/blackfin/gptimers-example.c (renamed from Documentation/blackfin/gptimers-example.c)0
-rw-r--r--samples/bpf/parse_ldabs.c1
-rw-r--r--samples/bpf/parse_simple.c1
-rw-r--r--samples/bpf/parse_varlen.c1
-rw-r--r--samples/bpf/tcbpf1_kern.c1
-rw-r--r--samples/bpf/tcbpf2_kern.c1
-rw-r--r--samples/bpf/test_cgrp2_tc_kern.c1
-rw-r--r--samples/mei/.gitignore (renamed from Documentation/misc-devices/mei/.gitignore)0
-rw-r--r--samples/mei/Makefile9
-rw-r--r--samples/mei/TODO (renamed from Documentation/misc-devices/mei/TODO)0
-rw-r--r--samples/mei/mei-amt-version.c (renamed from Documentation/misc-devices/mei/mei-amt-version.c)0
-rw-r--r--samples/mic/mpssd/.gitignore (renamed from Documentation/mic/mpssd/.gitignore)0
-rw-r--r--samples/mic/mpssd/Makefile27
-rwxr-xr-xsamples/mic/mpssd/micctrl (renamed from Documentation/mic/mpssd/micctrl)0
-rwxr-xr-xsamples/mic/mpssd/mpss (renamed from Documentation/mic/mpssd/mpss)0
-rw-r--r--samples/mic/mpssd/mpssd.c (renamed from Documentation/mic/mpssd/mpssd.c)0
-rw-r--r--samples/mic/mpssd/mpssd.h (renamed from Documentation/mic/mpssd/mpssd.h)0
-rw-r--r--samples/mic/mpssd/sysfs.c (renamed from Documentation/mic/mpssd/sysfs.c)0
-rw-r--r--samples/timers/.gitignore (renamed from Documentation/timers/.gitignore)0
-rw-r--r--samples/timers/Makefile15
-rw-r--r--samples/timers/hpet_example.c (renamed from Documentation/timers/hpet_example.c)0
-rw-r--r--samples/watchdog/.gitignore (renamed from Documentation/watchdog/src/.gitignore)1
-rw-r--r--samples/watchdog/Makefile8
-rw-r--r--samples/watchdog/watchdog-simple.c (renamed from Documentation/watchdog/src/watchdog-simple.c)0
-rw-r--r--scripts/Makefile.build43
-rw-r--r--scripts/Makefile.gcc-plugins9
-rw-r--r--scripts/Makefile.modpost14
-rw-r--r--scripts/basic/fixdep.c86
-rwxr-xr-xscripts/coccicheck2
-rw-r--r--scripts/coccinelle/api/memdup_user.cocci8
-rw-r--r--scripts/coccinelle/api/pm_runtime.cocci18
-rw-r--r--scripts/coccinelle/misc/cond_no_effect.cocci64
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c640
-rwxr-xr-xscripts/gen_initramfs_list.sh5
-rw-r--r--scripts/genksyms/lex.l35
-rw-r--r--scripts/genksyms/lex.lex.c_shipped35
-rwxr-xr-xscripts/link-vmlinux.sh71
-rw-r--r--security/keys/Kconfig2
-rw-r--r--security/keys/big_key.c59
-rw-r--r--security/keys/proc.c2
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/tomoyo/domain.c2
-rw-r--r--sound/core/seq/seq_compat.c2
-rw-r--r--sound/core/seq/seq_timer.c4
-rw-r--r--sound/pci/asihpi/hpioctl.c2
-rw-r--r--sound/pci/hda/dell_wmi_helper.c2
-rw-r--r--sound/pci/hda/hda_intel.c7
-rw-r--r--sound/pci/hda/patch_realtek.c30
-rw-r--r--sound/pci/hda/thinkpad_helper.c2
-rw-r--r--sound/usb/line6/driver.c4
-rw-r--r--sound/usb/line6/podhd.c3
-rw-r--r--sound/usb/quirks-table.h17
-rw-r--r--tools/accounting/.gitignore (renamed from Documentation/accounting/.gitignore)0
-rw-r--r--tools/accounting/Makefile9
-rw-r--r--tools/accounting/getdelays.c (renamed from Documentation/accounting/getdelays.c)0
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/laptop/dslm/.gitignore (renamed from Documentation/laptops/.gitignore)0
-rw-r--r--tools/laptop/dslm/Makefile9
-rw-r--r--tools/laptop/dslm/dslm.c (renamed from Documentation/laptops/dslm.c)0
-rw-r--r--tools/objtool/arch/x86/decode.c9
-rw-r--r--tools/objtool/builtin-check.c68
-rw-r--r--tools/pcmcia/.gitignore (renamed from Documentation/pcmcia/.gitignore)0
-rw-r--r--tools/pcmcia/Makefile9
-rw-r--r--tools/pcmcia/crc32hash.c (renamed from Documentation/pcmcia/crc32hash.c)0
-rw-r--r--tools/perf/jvmti/Makefile2
-rw-r--r--tools/perf/ui/browsers/hists.c3
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/parse-events.l4
-rw-r--r--tools/testing/selftests/filesystems/.gitignore (renamed from Documentation/filesystems/.gitignore)0
-rw-r--r--tools/testing/selftests/filesystems/Makefile7
-rw-r--r--tools/testing/selftests/filesystems/dnotify_test.c (renamed from Documentation/filesystems/dnotify_test.c)0
-rwxr-xr-xtools/testing/selftests/futex/functional/run.sh2
-rwxr-xr-xtools/testing/selftests/futex/run.sh2
-rw-r--r--tools/testing/selftests/ia64/.gitignore (renamed from Documentation/ia64/.gitignore)0
-rw-r--r--tools/testing/selftests/ia64/Makefile8
-rw-r--r--tools/testing/selftests/ia64/aliasing-test.c (renamed from Documentation/ia64/aliasing-test.c)0
-rw-r--r--tools/testing/selftests/networking/timestamping/.gitignore (renamed from Documentation/networking/timestamping/.gitignore)0
-rw-r--r--tools/testing/selftests/networking/timestamping/Makefile8
-rw-r--r--tools/testing/selftests/networking/timestamping/hwtstamp_config.c (renamed from Documentation/networking/timestamping/hwtstamp_config.c)0
-rw-r--r--tools/testing/selftests/networking/timestamping/timestamping.c (renamed from Documentation/networking/timestamping/timestamping.c)0
-rw-r--r--tools/testing/selftests/networking/timestamping/txtimestamp.c (renamed from Documentation/networking/timestamping/txtimestamp.c)0
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/export.h1
-rw-r--r--tools/testing/selftests/powerpc/math/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/signal/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/stringloops/asm/export.h1
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore4
-rw-r--r--tools/testing/selftests/prctl/.gitignore (renamed from Documentation/prctl/.gitignore)0
-rw-r--r--tools/testing/selftests/prctl/Makefile15
-rw-r--r--tools/testing/selftests/prctl/disable-tsc-ctxt-sw-stress-test.c (renamed from Documentation/prctl/disable-tsc-ctxt-sw-stress-test.c)0
-rw-r--r--tools/testing/selftests/prctl/disable-tsc-on-off-stress-test.c (renamed from Documentation/prctl/disable-tsc-on-off-stress-test.c)0
-rw-r--r--tools/testing/selftests/prctl/disable-tsc-test.c (renamed from Documentation/prctl/disable-tsc-test.c)0
-rw-r--r--tools/testing/selftests/ptp/.gitignore (renamed from Documentation/ptp/.gitignore)0
-rw-r--r--tools/testing/selftests/ptp/Makefile8
-rw-r--r--tools/testing/selftests/ptp/testptp.c (renamed from Documentation/ptp/testptp.c)0
-rw-r--r--tools/testing/selftests/ptp/testptp.mk (renamed from Documentation/ptp/testptp.mk)0
-rw-r--r--tools/testing/selftests/timers/posix_timers.c4
-rw-r--r--tools/testing/selftests/vDSO/.gitignore (renamed from Documentation/vDSO/.gitignore)0
-rw-r--r--tools/testing/selftests/vDSO/Makefile20
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.c (renamed from Documentation/vDSO/parse_vdso.c)0
-rw-r--r--tools/testing/selftests/vDSO/vdso_standalone_test_x86.c (renamed from Documentation/vDSO/vdso_standalone_test_x86.c)0
-rw-r--r--tools/testing/selftests/vDSO/vdso_test.c (renamed from Documentation/vDSO/vdso_test.c)0
-rw-r--r--tools/testing/selftests/watchdog/.gitignore1
-rw-r--r--tools/testing/selftests/watchdog/Makefile8
-rw-r--r--tools/testing/selftests/watchdog/watchdog-test.c (renamed from Documentation/watchdog/src/watchdog-test.c)0
-rw-r--r--tools/testing/selftests/zram/README2
-rw-r--r--virt/kvm/async_pf.c3
-rw-r--r--virt/kvm/kvm_main.c21
1707 files changed, 35360 insertions, 13824 deletions
diff --git a/.mailmap b/.mailmap
index 2408e56e241b..02d261407683 100644
--- a/.mailmap
+++ b/.mailmap
@@ -127,6 +127,7 @@ Peter Oruba <peter@oruba.de>
Peter Oruba <peter.oruba@amd.com>
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
Praveen BP <praveenbp@ti.com>
+Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
Rajesh Shah <rajesh.shah@intel.com>
Ralf Baechle <ralf@linux-mips.org>
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
diff --git a/CREDITS b/CREDITS
index 513aaa3546bf..837367624e45 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1864,10 +1864,11 @@ S: The Netherlands
N: Martin Kepplinger
E: martink@posteo.de
-E: martin.kepplinger@theobroma-systems.com
+E: martin.kepplinger@ginzinger.com
W: http://www.martinkepplinger.com
D: mma8452 accelerators iio driver
-D: Kernel cleanups
+D: pegasus_notetaker input driver
+D: Kernel fixes and cleanups
S: Garnisonstraße 26
S: 4020 Linz
S: Austria
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index cb9a6c6fa83b..3acc4f1a6f84 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -46,7 +46,8 @@ IRQ.txt
Intel-IOMMU.txt
- basic info on the Intel IOMMU virtualization support.
Makefile
- - some files in Documentation dir are actually sample code to build
+ - This file does nothing. Removing it breaks make htmldocs and
+ make distclean.
ManagementStyle
- how to (attempt to) manage kernel hackers.
RCU/
diff --git a/Documentation/80211/cfg80211.rst b/Documentation/80211/cfg80211.rst
new file mode 100644
index 000000000000..b1e149ea6fee
--- /dev/null
+++ b/Documentation/80211/cfg80211.rst
@@ -0,0 +1,345 @@
+==================
+cfg80211 subsystem
+==================
+
+Device registration
+===================
+
+.. kernel-doc:: include/net/cfg80211.h
+ :doc: Device registration
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_channel_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_channel
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_rate_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_rate
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_sta_ht_cap
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_supported_band
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_signal_type
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_params_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wireless_dev
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_new
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_register
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_unregister
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_free
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_name
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_dev
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_priv
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: priv_to_wiphy
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: set_wiphy_dev
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wdev_priv
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_iface_limit
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_iface_combination
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_check_combinations
+
+Actions and configuration
+=========================
+
+.. kernel-doc:: include/net/cfg80211.h
+ :doc: Actions and configuration
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_ops
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: vif_params
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: key_params
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: survey_info_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: survey_info
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_beacon_data
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_ap_settings
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: station_parameters
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: rate_info_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: rate_info
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: station_info
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: monitor_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: mpath_info_flags
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: mpath_info
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: bss_parameters
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_txq_params
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_crypto_settings
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_auth_request
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_assoc_request
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_deauth_request
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_disassoc_request
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_ibss_params
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_connect_params
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_pmksa
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_rx_mlme_mgmt
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_auth_timeout
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_rx_assoc_resp
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_assoc_timeout
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_tx_mlme_mgmt
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_ibss_joined
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_connect_result
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_connect_bss
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_connect_timeout
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_roamed
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_disconnected
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_ready_on_channel
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_remain_on_channel_expired
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_new_sta
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_rx_mgmt
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_mgmt_tx_status
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_cqm_rssi_notify
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_cqm_pktloss_notify
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_michael_mic_failure
+
+Scanning and BSS list handling
+==============================
+
+.. kernel-doc:: include/net/cfg80211.h
+ :doc: Scanning and BSS list handling
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_ssid
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_scan_request
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_scan_done
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_bss
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_inform_bss
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_inform_bss_frame_data
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_inform_bss_data
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_unlink_bss
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_find_ie
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_bss_get_ie
+
+Utility functions
+=================
+
+.. kernel-doc:: include/net/cfg80211.h
+ :doc: Utility functions
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_channel_to_frequency
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_frequency_to_channel
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_get_channel
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_get_response_rate
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_hdrlen
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_get_hdrlen_from_skb
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_radiotap_iterator
+
+Data path helpers
+=================
+
+.. kernel-doc:: include/net/cfg80211.h
+ :doc: Data path helpers
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_data_to_8023
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_data_from_8023
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: ieee80211_amsdu_to_8023s
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_classify8021d
+
+Regulatory enforcement infrastructure
+=====================================
+
+.. kernel-doc:: include/net/cfg80211.h
+ :doc: Regulatory enforcement infrastructure
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: regulatory_hint
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_apply_custom_regulatory
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: freq_reg_info
+
+RFkill integration
+==================
+
+.. kernel-doc:: include/net/cfg80211.h
+ :doc: RFkill integration
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_rfkill_set_hw_state
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_rfkill_start_polling
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_rfkill_stop_polling
+
+Test mode
+=========
+
+.. kernel-doc:: include/net/cfg80211.h
+ :doc: Test mode
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_testmode_alloc_reply_skb
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_testmode_reply
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_testmode_alloc_event_skb
+
+.. kernel-doc:: include/net/cfg80211.h
+ :functions: cfg80211_testmode_event
diff --git a/Documentation/80211/conf.py b/Documentation/80211/conf.py
new file mode 100644
index 000000000000..20c7c275ef4a
--- /dev/null
+++ b/Documentation/80211/conf.py
@@ -0,0 +1,5 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "Linux 802.11 Driver Developer's Guide"
+
+tags.add("subproject")
diff --git a/Documentation/80211/index.rst b/Documentation/80211/index.rst
new file mode 100644
index 000000000000..90bba476f442
--- /dev/null
+++ b/Documentation/80211/index.rst
@@ -0,0 +1,17 @@
+=====================================
+Linux 802.11 Driver Developer's Guide
+=====================================
+
+.. toctree::
+
+ introduction
+ cfg80211
+ mac80211
+ mac80211-advanced
+
+.. only:: subproject
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/80211/introduction.rst b/Documentation/80211/introduction.rst
new file mode 100644
index 000000000000..4938fa87691c
--- /dev/null
+++ b/Documentation/80211/introduction.rst
@@ -0,0 +1,17 @@
+============
+Introduction
+============
+
+Explaining wireless 802.11 networking in the Linux kernel
+
+Copyright 2007-2009 Johannes Berg
+
+These books attempt to give a description of the various subsystems
+that play a role in 802.11 wireless networking in Linux. Since these
+books are for kernel developers they attempts to document the
+structures and functions used in the kernel as well as giving a
+higher-level overview.
+
+The reader is expected to be familiar with the 802.11 standard as
+published by the IEEE in 802.11-2007 (or possibly later versions).
+References to this standard will be given as "802.11-2007 8.1.5".
diff --git a/Documentation/80211/mac80211-advanced.rst b/Documentation/80211/mac80211-advanced.rst
new file mode 100644
index 000000000000..70a89b2163c2
--- /dev/null
+++ b/Documentation/80211/mac80211-advanced.rst
@@ -0,0 +1,295 @@
+=============================
+mac80211 subsystem (advanced)
+=============================
+
+Information contained within this part of the book is of interest only
+for advanced interaction of mac80211 with drivers to exploit more
+hardware capabilities and improve performance.
+
+LED support
+===========
+
+Mac80211 supports various ways of blinking LEDs. Wherever possible,
+device LEDs should be exposed as LED class devices and hooked up to the
+appropriate trigger, which will then be triggered appropriately by
+mac80211.
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_get_tx_led_name
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_get_rx_led_name
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_get_assoc_led_name
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_get_radio_led_name
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_tpt_blink
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_tpt_led_trigger_flags
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_create_tpt_led_trigger
+
+Hardware crypto acceleration
+============================
+
+.. kernel-doc:: include/net/mac80211.h
+ :doc: Hardware crypto acceleration
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: set_key_cmd
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_key_conf
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_key_flags
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_get_tkip_p1k
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_get_tkip_p1k_iv
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_get_tkip_p2k
+
+Powersave support
+=================
+
+.. kernel-doc:: include/net/mac80211.h
+ :doc: Powersave support
+
+Beacon filter support
+=====================
+
+.. kernel-doc:: include/net/mac80211.h
+ :doc: Beacon filter support
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_beacon_loss
+
+Multiple queues and QoS support
+===============================
+
+TBD
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_tx_queue_params
+
+Access point mode support
+=========================
+
+TBD
+
+Some parts of the if_conf should be discussed here instead
+
+Insert notes about VLAN interfaces with hw crypto here or in the hw
+crypto chapter.
+
+support for powersaving clients
+-------------------------------
+
+.. kernel-doc:: include/net/mac80211.h
+ :doc: AP support for powersaving clients
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_get_buffered_bc
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_beacon_get
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_sta_eosp
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_frame_release_type
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_sta_ps_transition
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_sta_ps_transition_ni
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_sta_set_buffered
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_sta_block_awake
+
+Supporting multiple virtual interfaces
+======================================
+
+TBD
+
+Note: WDS with identical MAC address should almost always be OK
+
+Insert notes about having multiple virtual interfaces with different MAC
+addresses here, note which configurations are supported by mac80211, add
+notes about supporting hw crypto with it.
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_iterate_active_interfaces
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_iterate_active_interfaces_atomic
+
+Station handling
+================
+
+TODO
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_sta
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: sta_notify_cmd
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_find_sta
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_find_sta_by_ifaddr
+
+Hardware scan offload
+=====================
+
+TBD
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_scan_completed
+
+Aggregation
+===========
+
+TX A-MPDU aggregation
+---------------------
+
+.. kernel-doc:: net/mac80211/agg-tx.c
+ :doc: TX A-MPDU aggregation
+
+.. WARNING: DOCPROC directive not supported: !Cnet/mac80211/agg-tx.c
+
+RX A-MPDU aggregation
+---------------------
+
+.. kernel-doc:: net/mac80211/agg-rx.c
+ :doc: RX A-MPDU aggregation
+
+.. WARNING: DOCPROC directive not supported: !Cnet/mac80211/agg-rx.c
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_ampdu_mlme_action
+
+Spatial Multiplexing Powersave (SMPS)
+=====================================
+
+.. kernel-doc:: include/net/mac80211.h
+ :doc: Spatial multiplexing power save
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_request_smps
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_smps_mode
+
+TBD
+
+This part of the book describes the rate control algorithm interface and
+how it relates to mac80211 and drivers.
+
+Rate Control API
+================
+
+TBD
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_start_tx_ba_session
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_start_tx_ba_cb_irqsafe
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_stop_tx_ba_session
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_stop_tx_ba_cb_irqsafe
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_rate_control_changed
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_tx_rate_control
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: rate_control_send_low
+
+TBD
+
+This part of the book describes mac80211 internals.
+
+Key handling
+============
+
+Key handling basics
+-------------------
+
+.. kernel-doc:: net/mac80211/key.c
+ :doc: Key handling basics
+
+MORE TBD
+--------
+
+TBD
+
+Receive processing
+==================
+
+TBD
+
+Transmit processing
+===================
+
+TBD
+
+Station info handling
+=====================
+
+Programming information
+-----------------------
+
+.. kernel-doc:: net/mac80211/sta_info.h
+ :functions: sta_info
+
+.. kernel-doc:: net/mac80211/sta_info.h
+ :functions: ieee80211_sta_info_flags
+
+STA information lifetime rules
+------------------------------
+
+.. kernel-doc:: net/mac80211/sta_info.c
+ :doc: STA information lifetime rules
+
+Aggregation
+===========
+
+.. kernel-doc:: net/mac80211/sta_info.h
+ :functions: sta_ampdu_mlme
+
+.. kernel-doc:: net/mac80211/sta_info.h
+ :functions: tid_ampdu_tx
+
+.. kernel-doc:: net/mac80211/sta_info.h
+ :functions: tid_ampdu_rx
+
+Synchronisation
+===============
+
+TBD
+
+Locking, lots of RCU
diff --git a/Documentation/80211/mac80211.rst b/Documentation/80211/mac80211.rst
new file mode 100644
index 000000000000..85a8335e80b6
--- /dev/null
+++ b/Documentation/80211/mac80211.rst
@@ -0,0 +1,216 @@
+===========================
+mac80211 subsystem (basics)
+===========================
+
+You should read and understand the information contained within this
+part of the book while implementing a mac80211 driver. In some chapters,
+advanced usage is noted, those may be skipped if this isn't needed.
+
+This part of the book only covers station and monitor mode
+functionality, additional information required to implement the other
+modes is covered in the second part of the book.
+
+Basic hardware handling
+=======================
+
+TBD
+
+This chapter shall contain information on getting a hw struct allocated
+and registered with mac80211.
+
+Since it is required to allocate rates/modes before registering a hw
+struct, this chapter shall also contain information on setting up the
+rate/mode structs.
+
+Additionally, some discussion about the callbacks and the general
+programming model should be in here, including the definition of
+ieee80211_ops which will be referred to a lot.
+
+Finally, a discussion of hardware capabilities should be done with
+references to other parts of the book.
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_hw
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_hw_flags
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: SET_IEEE80211_DEV
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: SET_IEEE80211_PERM_ADDR
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_ops
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_alloc_hw
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_register_hw
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_unregister_hw
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_free_hw
+
+PHY configuration
+=================
+
+TBD
+
+This chapter should describe PHY handling including start/stop callbacks
+and the various structures used.
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_conf
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_conf_flags
+
+Virtual interfaces
+==================
+
+TBD
+
+This chapter should describe virtual interface basics that are relevant
+to the driver (VLANs, MGMT etc are not.) It should explain the use of
+the add_iface/remove_iface callbacks as well as the interface
+configuration callbacks.
+
+Things related to AP mode should be discussed there.
+
+Things related to supporting multiple interfaces should be in the
+appropriate chapter, a BIG FAT note should be here about this though and
+the recommendation to allow only a single interface in STA mode at
+first!
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_vif
+
+Receive and transmit processing
+===============================
+
+what should be here
+-------------------
+
+TBD
+
+This should describe the receive and transmit paths in mac80211/the
+drivers as well as transmit status handling.
+
+Frame format
+------------
+
+.. kernel-doc:: include/net/mac80211.h
+ :doc: Frame format
+
+Packet alignment
+----------------
+
+.. kernel-doc:: net/mac80211/rx.c
+ :doc: Packet alignment
+
+Calling into mac80211 from interrupts
+-------------------------------------
+
+.. kernel-doc:: include/net/mac80211.h
+ :doc: Calling mac80211 from interrupts
+
+functions/definitions
+---------------------
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_rx_status
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: mac80211_rx_flags
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: mac80211_tx_info_flags
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: mac80211_tx_control_flags
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: mac80211_rate_control_flags
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_tx_rate
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_tx_info
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_tx_info_clear_status
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_rx
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_rx_ni
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_rx_irqsafe
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_tx_status
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_tx_status_ni
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_tx_status_irqsafe
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_rts_get
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_rts_duration
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_ctstoself_get
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_ctstoself_duration
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_generic_frame_duration
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_wake_queue
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_stop_queue
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_wake_queues
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_stop_queues
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_queue_stopped
+
+Frame filtering
+===============
+
+.. kernel-doc:: include/net/mac80211.h
+ :doc: Frame filtering
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_filter_flags
+
+The mac80211 workqueue
+======================
+
+.. kernel-doc:: include/net/mac80211.h
+ :doc: mac80211 workqueue
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_queue_work
+
+.. kernel-doc:: include/net/mac80211.h
+ :functions: ieee80211_queue_delayed_work
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
index 4ba0a2a61926..640f65e79ef1 100644
--- a/Documentation/ABI/testing/sysfs-class-cxl
+++ b/Documentation/ABI/testing/sysfs-class-cxl
@@ -220,8 +220,11 @@ What: /sys/class/cxl/<card>/reset
Date: October 2014
Contact: linuxppc-dev@lists.ozlabs.org
Description: write only
- Writing 1 will issue a PERST to card which may cause the card
- to reload the FPGA depending on load_image_on_perst.
+ Writing 1 will issue a PERST to card provided there are no
+ contexts active on any one of the card AFUs. This may cause
+ the card to reload the FPGA depending on load_image_on_perst.
+ Writing -1 will do a force PERST irrespective of any active
+ contexts on the card AFUs.
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/perst_reloads_same_image (not in a guest)
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
deleted file mode 100644
index 800fe7a9024c..000000000000
--- a/Documentation/DocBook/80211.tmpl
+++ /dev/null
@@ -1,584 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE set PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
-<set>
- <setinfo>
- <title>The 802.11 subsystems &ndash; for kernel developers</title>
- <subtitle>
- Explaining wireless 802.11 networking in the Linux kernel
- </subtitle>
-
- <copyright>
- <year>2007-2009</year>
- <holder>Johannes Berg</holder>
- </copyright>
-
- <authorgroup>
- <author>
- <firstname>Johannes</firstname>
- <surname>Berg</surname>
- <affiliation>
- <address><email>johannes@sipsolutions.net</email></address>
- </affiliation>
- </author>
- </authorgroup>
-
- <legalnotice>
- <para>
- This documentation is free software; you can redistribute
- it and/or modify it under the terms of the GNU General Public
- License version 2 as published by the Free Software Foundation.
- </para>
- <para>
- This documentation is distributed in the hope that it will be
- useful, but WITHOUT ANY WARRANTY; without even the implied
- warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- </para>
- <para>
- You should have received a copy of the GNU General Public
- License along with this documentation; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
- </para>
- <para>
- For more details see the file COPYING in the source
- distribution of Linux.
- </para>
- </legalnotice>
-
- <abstract>
- <para>
- These books attempt to give a description of the
- various subsystems that play a role in 802.11 wireless
- networking in Linux. Since these books are for kernel
- developers they attempts to document the structures
- and functions used in the kernel as well as giving a
- higher-level overview.
- </para>
- <para>
- The reader is expected to be familiar with the 802.11
- standard as published by the IEEE in 802.11-2007 (or
- possibly later versions). References to this standard
- will be given as "802.11-2007 8.1.5".
- </para>
- </abstract>
- </setinfo>
- <book id="cfg80211-developers-guide">
- <bookinfo>
- <title>The cfg80211 subsystem</title>
-
- <abstract>
-!Pinclude/net/cfg80211.h Introduction
- </abstract>
- </bookinfo>
- <chapter>
- <title>Device registration</title>
-!Pinclude/net/cfg80211.h Device registration
-!Finclude/net/cfg80211.h ieee80211_channel_flags
-!Finclude/net/cfg80211.h ieee80211_channel
-!Finclude/net/cfg80211.h ieee80211_rate_flags
-!Finclude/net/cfg80211.h ieee80211_rate
-!Finclude/net/cfg80211.h ieee80211_sta_ht_cap
-!Finclude/net/cfg80211.h ieee80211_supported_band
-!Finclude/net/cfg80211.h cfg80211_signal_type
-!Finclude/net/cfg80211.h wiphy_params_flags
-!Finclude/net/cfg80211.h wiphy_flags
-!Finclude/net/cfg80211.h wiphy
-!Finclude/net/cfg80211.h wireless_dev
-!Finclude/net/cfg80211.h wiphy_new
-!Finclude/net/cfg80211.h wiphy_register
-!Finclude/net/cfg80211.h wiphy_unregister
-!Finclude/net/cfg80211.h wiphy_free
-
-!Finclude/net/cfg80211.h wiphy_name
-!Finclude/net/cfg80211.h wiphy_dev
-!Finclude/net/cfg80211.h wiphy_priv
-!Finclude/net/cfg80211.h priv_to_wiphy
-!Finclude/net/cfg80211.h set_wiphy_dev
-!Finclude/net/cfg80211.h wdev_priv
-!Finclude/net/cfg80211.h ieee80211_iface_limit
-!Finclude/net/cfg80211.h ieee80211_iface_combination
-!Finclude/net/cfg80211.h cfg80211_check_combinations
- </chapter>
- <chapter>
- <title>Actions and configuration</title>
-!Pinclude/net/cfg80211.h Actions and configuration
-!Finclude/net/cfg80211.h cfg80211_ops
-!Finclude/net/cfg80211.h vif_params
-!Finclude/net/cfg80211.h key_params
-!Finclude/net/cfg80211.h survey_info_flags
-!Finclude/net/cfg80211.h survey_info
-!Finclude/net/cfg80211.h cfg80211_beacon_data
-!Finclude/net/cfg80211.h cfg80211_ap_settings
-!Finclude/net/cfg80211.h station_parameters
-!Finclude/net/cfg80211.h rate_info_flags
-!Finclude/net/cfg80211.h rate_info
-!Finclude/net/cfg80211.h station_info
-!Finclude/net/cfg80211.h monitor_flags
-!Finclude/net/cfg80211.h mpath_info_flags
-!Finclude/net/cfg80211.h mpath_info
-!Finclude/net/cfg80211.h bss_parameters
-!Finclude/net/cfg80211.h ieee80211_txq_params
-!Finclude/net/cfg80211.h cfg80211_crypto_settings
-!Finclude/net/cfg80211.h cfg80211_auth_request
-!Finclude/net/cfg80211.h cfg80211_assoc_request
-!Finclude/net/cfg80211.h cfg80211_deauth_request
-!Finclude/net/cfg80211.h cfg80211_disassoc_request
-!Finclude/net/cfg80211.h cfg80211_ibss_params
-!Finclude/net/cfg80211.h cfg80211_connect_params
-!Finclude/net/cfg80211.h cfg80211_pmksa
-!Finclude/net/cfg80211.h cfg80211_rx_mlme_mgmt
-!Finclude/net/cfg80211.h cfg80211_auth_timeout
-!Finclude/net/cfg80211.h cfg80211_rx_assoc_resp
-!Finclude/net/cfg80211.h cfg80211_assoc_timeout
-!Finclude/net/cfg80211.h cfg80211_tx_mlme_mgmt
-!Finclude/net/cfg80211.h cfg80211_ibss_joined
-!Finclude/net/cfg80211.h cfg80211_connect_result
-!Finclude/net/cfg80211.h cfg80211_connect_bss
-!Finclude/net/cfg80211.h cfg80211_connect_timeout
-!Finclude/net/cfg80211.h cfg80211_roamed
-!Finclude/net/cfg80211.h cfg80211_disconnected
-!Finclude/net/cfg80211.h cfg80211_ready_on_channel
-!Finclude/net/cfg80211.h cfg80211_remain_on_channel_expired
-!Finclude/net/cfg80211.h cfg80211_new_sta
-!Finclude/net/cfg80211.h cfg80211_rx_mgmt
-!Finclude/net/cfg80211.h cfg80211_mgmt_tx_status
-!Finclude/net/cfg80211.h cfg80211_cqm_rssi_notify
-!Finclude/net/cfg80211.h cfg80211_cqm_pktloss_notify
-!Finclude/net/cfg80211.h cfg80211_michael_mic_failure
- </chapter>
- <chapter>
- <title>Scanning and BSS list handling</title>
-!Pinclude/net/cfg80211.h Scanning and BSS list handling
-!Finclude/net/cfg80211.h cfg80211_ssid
-!Finclude/net/cfg80211.h cfg80211_scan_request
-!Finclude/net/cfg80211.h cfg80211_scan_done
-!Finclude/net/cfg80211.h cfg80211_bss
-!Finclude/net/cfg80211.h cfg80211_inform_bss
-!Finclude/net/cfg80211.h cfg80211_inform_bss_frame_data
-!Finclude/net/cfg80211.h cfg80211_inform_bss_data
-!Finclude/net/cfg80211.h cfg80211_unlink_bss
-!Finclude/net/cfg80211.h cfg80211_find_ie
-!Finclude/net/cfg80211.h ieee80211_bss_get_ie
- </chapter>
- <chapter>
- <title>Utility functions</title>
-!Pinclude/net/cfg80211.h Utility functions
-!Finclude/net/cfg80211.h ieee80211_channel_to_frequency
-!Finclude/net/cfg80211.h ieee80211_frequency_to_channel
-!Finclude/net/cfg80211.h ieee80211_get_channel
-!Finclude/net/cfg80211.h ieee80211_get_response_rate
-!Finclude/net/cfg80211.h ieee80211_hdrlen
-!Finclude/net/cfg80211.h ieee80211_get_hdrlen_from_skb
-!Finclude/net/cfg80211.h ieee80211_radiotap_iterator
- </chapter>
- <chapter>
- <title>Data path helpers</title>
-!Pinclude/net/cfg80211.h Data path helpers
-!Finclude/net/cfg80211.h ieee80211_data_to_8023
-!Finclude/net/cfg80211.h ieee80211_data_from_8023
-!Finclude/net/cfg80211.h ieee80211_amsdu_to_8023s
-!Finclude/net/cfg80211.h cfg80211_classify8021d
- </chapter>
- <chapter>
- <title>Regulatory enforcement infrastructure</title>
-!Pinclude/net/cfg80211.h Regulatory enforcement infrastructure
-!Finclude/net/cfg80211.h regulatory_hint
-!Finclude/net/cfg80211.h wiphy_apply_custom_regulatory
-!Finclude/net/cfg80211.h freq_reg_info
- </chapter>
- <chapter>
- <title>RFkill integration</title>
-!Pinclude/net/cfg80211.h RFkill integration
-!Finclude/net/cfg80211.h wiphy_rfkill_set_hw_state
-!Finclude/net/cfg80211.h wiphy_rfkill_start_polling
-!Finclude/net/cfg80211.h wiphy_rfkill_stop_polling
- </chapter>
- <chapter>
- <title>Test mode</title>
-!Pinclude/net/cfg80211.h Test mode
-!Finclude/net/cfg80211.h cfg80211_testmode_alloc_reply_skb
-!Finclude/net/cfg80211.h cfg80211_testmode_reply
-!Finclude/net/cfg80211.h cfg80211_testmode_alloc_event_skb
-!Finclude/net/cfg80211.h cfg80211_testmode_event
- </chapter>
- </book>
- <book id="mac80211-developers-guide">
- <bookinfo>
- <title>The mac80211 subsystem</title>
- <abstract>
-!Pinclude/net/mac80211.h Introduction
-!Pinclude/net/mac80211.h Warning
- </abstract>
- </bookinfo>
-
- <toc></toc>
-
- <!--
- Generally, this document shall be ordered by increasing complexity.
- It is important to note that readers should be able to read only
- the first few sections to get a working driver and only advanced
- usage should require reading the full document.
- -->
-
- <part>
- <title>The basic mac80211 driver interface</title>
- <partintro>
- <para>
- You should read and understand the information contained
- within this part of the book while implementing a driver.
- In some chapters, advanced usage is noted, that may be
- skipped at first.
- </para>
- <para>
- This part of the book only covers station and monitor mode
- functionality, additional information required to implement
- the other modes is covered in the second part of the book.
- </para>
- </partintro>
-
- <chapter id="basics">
- <title>Basic hardware handling</title>
- <para>TBD</para>
- <para>
- This chapter shall contain information on getting a hw
- struct allocated and registered with mac80211.
- </para>
- <para>
- Since it is required to allocate rates/modes before registering
- a hw struct, this chapter shall also contain information on setting
- up the rate/mode structs.
- </para>
- <para>
- Additionally, some discussion about the callbacks and
- the general programming model should be in here, including
- the definition of ieee80211_ops which will be referred to
- a lot.
- </para>
- <para>
- Finally, a discussion of hardware capabilities should be done
- with references to other parts of the book.
- </para>
- <!-- intentionally multiple !F lines to get proper order -->
-!Finclude/net/mac80211.h ieee80211_hw
-!Finclude/net/mac80211.h ieee80211_hw_flags
-!Finclude/net/mac80211.h SET_IEEE80211_DEV
-!Finclude/net/mac80211.h SET_IEEE80211_PERM_ADDR
-!Finclude/net/mac80211.h ieee80211_ops
-!Finclude/net/mac80211.h ieee80211_alloc_hw
-!Finclude/net/mac80211.h ieee80211_register_hw
-!Finclude/net/mac80211.h ieee80211_unregister_hw
-!Finclude/net/mac80211.h ieee80211_free_hw
- </chapter>
-
- <chapter id="phy-handling">
- <title>PHY configuration</title>
- <para>TBD</para>
- <para>
- This chapter should describe PHY handling including
- start/stop callbacks and the various structures used.
- </para>
-!Finclude/net/mac80211.h ieee80211_conf
-!Finclude/net/mac80211.h ieee80211_conf_flags
- </chapter>
-
- <chapter id="iface-handling">
- <title>Virtual interfaces</title>
- <para>TBD</para>
- <para>
- This chapter should describe virtual interface basics
- that are relevant to the driver (VLANs, MGMT etc are not.)
- It should explain the use of the add_iface/remove_iface
- callbacks as well as the interface configuration callbacks.
- </para>
- <para>Things related to AP mode should be discussed there.</para>
- <para>
- Things related to supporting multiple interfaces should be
- in the appropriate chapter, a BIG FAT note should be here about
- this though and the recommendation to allow only a single
- interface in STA mode at first!
- </para>
-!Finclude/net/mac80211.h ieee80211_vif
- </chapter>
-
- <chapter id="rx-tx">
- <title>Receive and transmit processing</title>
- <sect1>
- <title>what should be here</title>
- <para>TBD</para>
- <para>
- This should describe the receive and transmit
- paths in mac80211/the drivers as well as
- transmit status handling.
- </para>
- </sect1>
- <sect1>
- <title>Frame format</title>
-!Pinclude/net/mac80211.h Frame format
- </sect1>
- <sect1>
- <title>Packet alignment</title>
-!Pnet/mac80211/rx.c Packet alignment
- </sect1>
- <sect1>
- <title>Calling into mac80211 from interrupts</title>
-!Pinclude/net/mac80211.h Calling mac80211 from interrupts
- </sect1>
- <sect1>
- <title>functions/definitions</title>
-!Finclude/net/mac80211.h ieee80211_rx_status
-!Finclude/net/mac80211.h mac80211_rx_flags
-!Finclude/net/mac80211.h mac80211_tx_info_flags
-!Finclude/net/mac80211.h mac80211_tx_control_flags
-!Finclude/net/mac80211.h mac80211_rate_control_flags
-!Finclude/net/mac80211.h ieee80211_tx_rate
-!Finclude/net/mac80211.h ieee80211_tx_info
-!Finclude/net/mac80211.h ieee80211_tx_info_clear_status
-!Finclude/net/mac80211.h ieee80211_rx
-!Finclude/net/mac80211.h ieee80211_rx_ni
-!Finclude/net/mac80211.h ieee80211_rx_irqsafe
-!Finclude/net/mac80211.h ieee80211_tx_status
-!Finclude/net/mac80211.h ieee80211_tx_status_ni
-!Finclude/net/mac80211.h ieee80211_tx_status_irqsafe
-!Finclude/net/mac80211.h ieee80211_rts_get
-!Finclude/net/mac80211.h ieee80211_rts_duration
-!Finclude/net/mac80211.h ieee80211_ctstoself_get
-!Finclude/net/mac80211.h ieee80211_ctstoself_duration
-!Finclude/net/mac80211.h ieee80211_generic_frame_duration
-!Finclude/net/mac80211.h ieee80211_wake_queue
-!Finclude/net/mac80211.h ieee80211_stop_queue
-!Finclude/net/mac80211.h ieee80211_wake_queues
-!Finclude/net/mac80211.h ieee80211_stop_queues
-!Finclude/net/mac80211.h ieee80211_queue_stopped
- </sect1>
- </chapter>
-
- <chapter id="filters">
- <title>Frame filtering</title>
-!Pinclude/net/mac80211.h Frame filtering
-!Finclude/net/mac80211.h ieee80211_filter_flags
- </chapter>
-
- <chapter id="workqueue">
- <title>The mac80211 workqueue</title>
-!Pinclude/net/mac80211.h mac80211 workqueue
-!Finclude/net/mac80211.h ieee80211_queue_work
-!Finclude/net/mac80211.h ieee80211_queue_delayed_work
- </chapter>
- </part>
-
- <part id="advanced">
- <title>Advanced driver interface</title>
- <partintro>
- <para>
- Information contained within this part of the book is
- of interest only for advanced interaction of mac80211
- with drivers to exploit more hardware capabilities and
- improve performance.
- </para>
- </partintro>
-
- <chapter id="led-support">
- <title>LED support</title>
- <para>
- Mac80211 supports various ways of blinking LEDs. Wherever possible,
- device LEDs should be exposed as LED class devices and hooked up to
- the appropriate trigger, which will then be triggered appropriately
- by mac80211.
- </para>
-!Finclude/net/mac80211.h ieee80211_get_tx_led_name
-!Finclude/net/mac80211.h ieee80211_get_rx_led_name
-!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
-!Finclude/net/mac80211.h ieee80211_get_radio_led_name
-!Finclude/net/mac80211.h ieee80211_tpt_blink
-!Finclude/net/mac80211.h ieee80211_tpt_led_trigger_flags
-!Finclude/net/mac80211.h ieee80211_create_tpt_led_trigger
- </chapter>
-
- <chapter id="hardware-crypto-offload">
- <title>Hardware crypto acceleration</title>
-!Pinclude/net/mac80211.h Hardware crypto acceleration
- <!-- intentionally multiple !F lines to get proper order -->
-!Finclude/net/mac80211.h set_key_cmd
-!Finclude/net/mac80211.h ieee80211_key_conf
-!Finclude/net/mac80211.h ieee80211_key_flags
-!Finclude/net/mac80211.h ieee80211_get_tkip_p1k
-!Finclude/net/mac80211.h ieee80211_get_tkip_p1k_iv
-!Finclude/net/mac80211.h ieee80211_get_tkip_p2k
- </chapter>
-
- <chapter id="powersave">
- <title>Powersave support</title>
-!Pinclude/net/mac80211.h Powersave support
- </chapter>
-
- <chapter id="beacon-filter">
- <title>Beacon filter support</title>
-!Pinclude/net/mac80211.h Beacon filter support
-!Finclude/net/mac80211.h ieee80211_beacon_loss
- </chapter>
-
- <chapter id="qos">
- <title>Multiple queues and QoS support</title>
- <para>TBD</para>
-!Finclude/net/mac80211.h ieee80211_tx_queue_params
- </chapter>
-
- <chapter id="AP">
- <title>Access point mode support</title>
- <para>TBD</para>
- <para>Some parts of the if_conf should be discussed here instead</para>
- <para>
- Insert notes about VLAN interfaces with hw crypto here or
- in the hw crypto chapter.
- </para>
- <section id="ps-client">
- <title>support for powersaving clients</title>
-!Pinclude/net/mac80211.h AP support for powersaving clients
-!Finclude/net/mac80211.h ieee80211_get_buffered_bc
-!Finclude/net/mac80211.h ieee80211_beacon_get
-!Finclude/net/mac80211.h ieee80211_sta_eosp
-!Finclude/net/mac80211.h ieee80211_frame_release_type
-!Finclude/net/mac80211.h ieee80211_sta_ps_transition
-!Finclude/net/mac80211.h ieee80211_sta_ps_transition_ni
-!Finclude/net/mac80211.h ieee80211_sta_set_buffered
-!Finclude/net/mac80211.h ieee80211_sta_block_awake
- </section>
- </chapter>
-
- <chapter id="multi-iface">
- <title>Supporting multiple virtual interfaces</title>
- <para>TBD</para>
- <para>
- Note: WDS with identical MAC address should almost always be OK
- </para>
- <para>
- Insert notes about having multiple virtual interfaces with
- different MAC addresses here, note which configurations are
- supported by mac80211, add notes about supporting hw crypto
- with it.
- </para>
-!Finclude/net/mac80211.h ieee80211_iterate_active_interfaces
-!Finclude/net/mac80211.h ieee80211_iterate_active_interfaces_atomic
- </chapter>
-
- <chapter id="station-handling">
- <title>Station handling</title>
- <para>TODO</para>
-!Finclude/net/mac80211.h ieee80211_sta
-!Finclude/net/mac80211.h sta_notify_cmd
-!Finclude/net/mac80211.h ieee80211_find_sta
-!Finclude/net/mac80211.h ieee80211_find_sta_by_ifaddr
- </chapter>
-
- <chapter id="hardware-scan-offload">
- <title>Hardware scan offload</title>
- <para>TBD</para>
-!Finclude/net/mac80211.h ieee80211_scan_completed
- </chapter>
-
- <chapter id="aggregation">
- <title>Aggregation</title>
- <sect1>
- <title>TX A-MPDU aggregation</title>
-!Pnet/mac80211/agg-tx.c TX A-MPDU aggregation
-!Cnet/mac80211/agg-tx.c
- </sect1>
- <sect1>
- <title>RX A-MPDU aggregation</title>
-!Pnet/mac80211/agg-rx.c RX A-MPDU aggregation
-!Cnet/mac80211/agg-rx.c
-!Finclude/net/mac80211.h ieee80211_ampdu_mlme_action
- </sect1>
- </chapter>
-
- <chapter id="smps">
- <title>Spatial Multiplexing Powersave (SMPS)</title>
-!Pinclude/net/mac80211.h Spatial multiplexing power save
-!Finclude/net/mac80211.h ieee80211_request_smps
-!Finclude/net/mac80211.h ieee80211_smps_mode
- </chapter>
- </part>
-
- <part id="rate-control">
- <title>Rate control interface</title>
- <partintro>
- <para>TBD</para>
- <para>
- This part of the book describes the rate control algorithm
- interface and how it relates to mac80211 and drivers.
- </para>
- </partintro>
- <chapter id="ratecontrol-api">
- <title>Rate Control API</title>
- <para>TBD</para>
-!Finclude/net/mac80211.h ieee80211_start_tx_ba_session
-!Finclude/net/mac80211.h ieee80211_start_tx_ba_cb_irqsafe
-!Finclude/net/mac80211.h ieee80211_stop_tx_ba_session
-!Finclude/net/mac80211.h ieee80211_stop_tx_ba_cb_irqsafe
-!Finclude/net/mac80211.h ieee80211_rate_control_changed
-!Finclude/net/mac80211.h ieee80211_tx_rate_control
-!Finclude/net/mac80211.h rate_control_send_low
- </chapter>
- </part>
-
- <part id="internal">
- <title>Internals</title>
- <partintro>
- <para>TBD</para>
- <para>
- This part of the book describes mac80211 internals.
- </para>
- </partintro>
-
- <chapter id="key-handling">
- <title>Key handling</title>
- <sect1>
- <title>Key handling basics</title>
-!Pnet/mac80211/key.c Key handling basics
- </sect1>
- <sect1>
- <title>MORE TBD</title>
- <para>TBD</para>
- </sect1>
- </chapter>
-
- <chapter id="rx-processing">
- <title>Receive processing</title>
- <para>TBD</para>
- </chapter>
-
- <chapter id="tx-processing">
- <title>Transmit processing</title>
- <para>TBD</para>
- </chapter>
-
- <chapter id="sta-info">
- <title>Station info handling</title>
- <sect1>
- <title>Programming information</title>
-!Fnet/mac80211/sta_info.h sta_info
-!Fnet/mac80211/sta_info.h ieee80211_sta_info_flags
- </sect1>
- <sect1>
- <title>STA information lifetime rules</title>
-!Pnet/mac80211/sta_info.c STA information lifetime rules
- </sect1>
- </chapter>
-
- <chapter id="aggregation-internals">
- <title>Aggregation</title>
-!Fnet/mac80211/sta_info.h sta_ampdu_mlme
-!Fnet/mac80211/sta_info.h tid_ampdu_tx
-!Fnet/mac80211/sta_info.h tid_ampdu_rx
- </chapter>
-
- <chapter id="synchronisation">
- <title>Synchronisation</title>
- <para>TBD</para>
- <para>Locking, lots of RCU</para>
- </chapter>
- </part>
- </book>
-</set>
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index 736f5916daea..fdf8232d0eeb 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml \
kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
- 80211.xml debugobjects.xml sh.xml regulator.xml \
+ debugobjects.xml sh.xml regulator.xml \
alsa-driver-api.xml writing-an-alsa-driver.xml \
tracepoint.xml w1.xml \
writing_musb_glue_layer.xml crypto-API.xml iio.xml
diff --git a/Documentation/Makefile b/Documentation/Makefile
index de955e151af8..c2a469112c37 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -1,3 +1 @@
-subdir-y := accounting auxdisplay blackfin \
- filesystems filesystems ia64 laptops mic misc-devices \
- networking pcmcia prctl ptp timers vDSO watchdog
+subdir-y :=
diff --git a/Documentation/accounting/Makefile b/Documentation/accounting/Makefile
deleted file mode 100644
index 7e232cb6fd7d..000000000000
--- a/Documentation/accounting/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# List of programs to build
-hostprogs-y := getdelays
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_getdelays.o += -I$(objtree)/usr/include
diff --git a/Documentation/accounting/delay-accounting.txt b/Documentation/accounting/delay-accounting.txt
index 8a12f0730c94..042ea59b5853 100644
--- a/Documentation/accounting/delay-accounting.txt
+++ b/Documentation/accounting/delay-accounting.txt
@@ -54,9 +54,9 @@ are sent to userspace without requiring a command. If it is the last exiting
task of a thread group, the per-tgid statistics are also sent. More details
are given in the taskstats interface description.
-The getdelays.c userspace utility in this directory allows simple commands to
-be run and the corresponding delay statistics to be displayed. It also serves
-as an example of using the taskstats interface.
+The getdelays.c userspace utility in tools/accounting directory allows simple
+commands to be run and the corresponding delay statistics to be displayed. It
+also serves as an example of using the taskstats interface.
Usage
-----
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX
index dea011c8d7c7..b6e69fd371c4 100644
--- a/Documentation/arm/00-INDEX
+++ b/Documentation/arm/00-INDEX
@@ -8,8 +8,6 @@ Interrupts
- ARM Interrupt subsystem documentation
IXP4xx
- Intel IXP4xx Network processor.
-Makefile
- - Build sourcefiles as part of the Documentation-build for arm
Netwinder
- Netwinder specific documentation
Porting
diff --git a/Documentation/auxdisplay/Makefile b/Documentation/auxdisplay/Makefile
deleted file mode 100644
index ada4dac99ef4..000000000000
--- a/Documentation/auxdisplay/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# List of programs to build
-hostprogs-y := cfag12864b-example
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_cfag12864b-example.o += -I$(objtree)/usr/include
diff --git a/Documentation/auxdisplay/cfag12864b b/Documentation/auxdisplay/cfag12864b
index eb7be393a510..12fd51b8de75 100644
--- a/Documentation/auxdisplay/cfag12864b
+++ b/Documentation/auxdisplay/cfag12864b
@@ -101,5 +101,5 @@ Although the LCD won't get updated until the next refresh time arrives.
Also, you can mmap the framebuffer: open & mmap, munmap & close...
which is the best option for most uses.
-Check Documentation/auxdisplay/cfag12864b-example.c
+Check samples/auxdisplay/cfag12864b-example.c
for a real working userspace complete program with usage examples.
diff --git a/Documentation/blackfin/00-INDEX b/Documentation/blackfin/00-INDEX
index c54fcdd4ae9f..265a1effebde 100644
--- a/Documentation/blackfin/00-INDEX
+++ b/Documentation/blackfin/00-INDEX
@@ -1,10 +1,6 @@
00-INDEX
- This file
-Makefile
- - Makefile for gptimers example file.
bfin-gpio-notes.txt
- Notes in developing/using bfin-gpio driver.
bfin-spi-notes.txt
- Notes for using bfin spi bus driver.
-gptimers-example.c
- - gptimers example
diff --git a/Documentation/blackfin/Makefile b/Documentation/blackfin/Makefile
deleted file mode 100644
index 6782c58fbc29..000000000000
--- a/Documentation/blackfin/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-ifneq ($(CONFIG_BLACKFIN),)
-ifneq ($(CONFIG_BFIN_GPTIMERS),)
-obj-m := gptimers-example.o
-endif
-endif
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index e5b6497116f4..c75b64a85859 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -309,3 +309,4 @@ Version History
with a reshape in progress.
1.9.0 Add support for RAID level takeover/reshape/region size
and set size reduction.
+1.9.1 Fix activation of existing RAID 4/10 mapped devices
diff --git a/Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt b/Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
new file mode 100644
index 000000000000..b69bb68992fd
--- /dev/null
+++ b/Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
@@ -0,0 +1,17 @@
+Binding for ASCII LCD displays on Imagination Technologies boards
+
+Required properties:
+- compatible : should be one of:
+ "img,boston-lcd"
+ "mti,malta-lcd"
+ "mti,sead3-lcd"
+
+Required properties for "img,boston-lcd":
+- reg : memory region locating the device registers
+
+Required properties for "mti,malta-lcd" or "mti,sead3-lcd":
+- regmap: phandle of the system controller containing the LCD registers
+- offset: offset in bytes to the LCD registers within the system controller
+
+The layout of the registers & properties of the display are determined
+from the compatible string, making this binding somewhat trivial.
diff --git a/Documentation/devicetree/bindings/clock/uniphier-clock.txt b/Documentation/devicetree/bindings/clock/uniphier-clock.txt
index c7179d3b5c33..812163060fa3 100644
--- a/Documentation/devicetree/bindings/clock/uniphier-clock.txt
+++ b/Documentation/devicetree/bindings/clock/uniphier-clock.txt
@@ -24,7 +24,7 @@ Example:
reg = <0x61840000 0x4000>;
clock {
- compatible = "socionext,uniphier-ld20-clock";
+ compatible = "socionext,uniphier-ld11-clock";
#clock-cells = <1>;
};
@@ -43,8 +43,8 @@ Provided clocks:
21: USB3 ch1 PHY1
-Media I/O (MIO) clock
----------------------
+Media I/O (MIO) clock, SD clock
+-------------------------------
Required properties:
- compatible: should be one of the following:
@@ -52,10 +52,10 @@ Required properties:
"socionext,uniphier-ld4-mio-clock" - for LD4 SoC.
"socionext,uniphier-pro4-mio-clock" - for Pro4 SoC.
"socionext,uniphier-sld8-mio-clock" - for sLD8 SoC.
- "socionext,uniphier-pro5-mio-clock" - for Pro5 SoC.
- "socionext,uniphier-pxs2-mio-clock" - for PXs2/LD6b SoC.
+ "socionext,uniphier-pro5-sd-clock" - for Pro5 SoC.
+ "socionext,uniphier-pxs2-sd-clock" - for PXs2/LD6b SoC.
"socionext,uniphier-ld11-mio-clock" - for LD11 SoC.
- "socionext,uniphier-ld20-mio-clock" - for LD20 SoC.
+ "socionext,uniphier-ld20-sd-clock" - for LD20 SoC.
- #clock-cells: should be 1.
Example:
@@ -66,7 +66,7 @@ Example:
reg = <0x59810000 0x800>;
clock {
- compatible = "socionext,uniphier-ld20-mio-clock";
+ compatible = "socionext,uniphier-ld11-mio-clock";
#clock-cells = <1>;
};
@@ -112,7 +112,7 @@ Example:
reg = <0x59820000 0x200>;
clock {
- compatible = "socionext,uniphier-ld20-peri-clock";
+ compatible = "socionext,uniphier-ld11-peri-clock";
#clock-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt
index f31b2ad1552b..5fa691e6f638 100644
--- a/Documentation/devicetree/bindings/i2c/i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c.txt
@@ -32,6 +32,14 @@ wants to support one of the below features, it should adapt the bindings below.
- clock-frequency
frequency of bus clock in Hz.
+- i2c-bus
+ For I2C adapters that have child nodes that are a mixture of both I2C
+ devices and non-I2C devices, the 'i2c-bus' subnode can be used for
+ populating I2C devices. If the 'i2c-bus' subnode is present, only
+ subnodes of this will be considered as I2C slaves. The properties,
+ '#address-cells' and '#size-cells' must be defined under this subnode
+ if present.
+
- i2c-scl-falling-time-ns
Number of nanoseconds the SCL signal takes to fall; t(f) in the I2C
specification.
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index 1416c6a0d2cd..fbbad6446741 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -51,7 +51,6 @@ fsl,sgtl5000 SGTL5000: Ultra Low-Power Audio Codec
gmt,g751 G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface
infineon,slb9635tt Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
infineon,slb9645tt Infineon SLB9645 I2C TPM (new protocol, max 400khz)
-isil,isl12057 Intersil ISL12057 I2C RTC Chip
isil,isl29028 Intersil ISL29028 Ambient Light and Proximity Sensor
maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator
maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
diff --git a/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt b/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
index f97993be2dcb..d3b273e4336a 100644
--- a/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
+++ b/Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
@@ -14,6 +14,7 @@ length of memory mapped region.
representing a ethernet device.
- dsaf-handle: phandle, specifies a reference to a node
representing a dsaf device.
+- node_guid: a number that uniquely identifies a device or component
- #address-cells: must be 2
- #size-cells: must be 2
Optional properties:
@@ -32,6 +33,7 @@ Example:
dma-coherent;
eth-handle = <&eth2 &eth3 &eth4 &eth5 &eth6 &eth7>;
dsaf-handle = <&soc0_dsa>;
+ node-guid = [00 9A CD 00 00 01 02 03];
#address-cells = <2>;
#size-cells = <2>;
interrupt-parent = <&mbigen_dsa>;
diff --git a/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt b/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
new file mode 100644
index 000000000000..7b8944c2cb31
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
@@ -0,0 +1,21 @@
+* MELFAS MIP4 Touchscreen
+
+Required properties:
+- compatible: must be "melfas,mip4_ts"
+- reg: I2C slave address of the chip (0x48 or 0x34)
+- interrupt-parent: interrupt controller to which the chip is connected
+- interrupts: interrupt to which the chip is connected
+
+Optional properties:
+- ce-gpios: GPIO connected to the CE (chip enable) pin of the chip
+
+Example:
+ i2c@00000000 {
+ touchscreen: melfas_mip4@48 {
+ compatible = "melfas,mip4_ts";
+ reg = <0x48>;
+ interrupt-parent = <&gpio>;
+ interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
+ ce-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt
new file mode 100644
index 000000000000..fbbacd958240
--- /dev/null
+++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt
@@ -0,0 +1,23 @@
+* Aspeed BT (Block Transfer) IPMI interface
+
+The Aspeed SOCs (AST2400 and AST2500) are commonly used as BMCs
+(BaseBoard Management Controllers) and the BT interface can be used to
+perform in-band IPMI communication with their host.
+
+Required properties:
+
+- compatible : should be "aspeed,ast2400-bt-bmc"
+- reg: physical address and size of the registers
+
+Optional properties:
+
+- interrupts: interrupt generated by the BT interface. without an
+ interrupt, the driver will operate in poll mode.
+
+Example:
+
+ ibt@1e789140 {
+ compatible = "aspeed,ast2400-bt-bmc";
+ reg = <0x1e789140 0x18>;
+ interrupts = <8>;
+ };
diff --git a/Documentation/devicetree/bindings/ipmi.txt b/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt
index d5f1a877ed3e..d5f1a877ed3e 100644
--- a/Documentation/devicetree/bindings/ipmi.txt
+++ b/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt
diff --git a/Documentation/devicetree/bindings/mips/brcm/soc.txt b/Documentation/devicetree/bindings/mips/brcm/soc.txt
index 4a7e030e4f9b..e4e1cd91fb1f 100644
--- a/Documentation/devicetree/bindings/mips/brcm/soc.txt
+++ b/Documentation/devicetree/bindings/mips/brcm/soc.txt
@@ -2,9 +2,9 @@
Required properties:
-- compatible: "brcm,bcm3384", "brcm,bcm33843"
+- compatible: "brcm,bcm3368", "brcm,bcm3384", "brcm,bcm33843"
"brcm,bcm3384-viper", "brcm,bcm33843-viper"
- "brcm,bcm6328", "brcm,bcm6358", "brcm,bcm6368",
+ "brcm,bcm6328", "brcm,bcm6358", "brcm,bcm6362", "brcm,bcm6368",
"brcm,bcm63168", "brcm,bcm63268",
"brcm,bcm7125", "brcm,bcm7346", "brcm,bcm7358", "brcm,bcm7360",
"brcm,bcm7362", "brcm,bcm7420", "brcm,bcm7425"
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-net.txt b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
index bce52b2ec55e..6fd988c84c4f 100644
--- a/Documentation/devicetree/bindings/net/marvell-orion-net.txt
+++ b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
@@ -49,6 +49,7 @@ Optional port properties:
and
- phy-handle: See ethernet.txt file in the same directory.
+ - phy-mode: See ethernet.txt file in the same directory.
or
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
index 5e60ad18f147..2ad18c4ea55c 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
@@ -43,7 +43,9 @@ aspeed,ast2500-pinctrl, aspeed,g5-pinctrl:
GPID0 GPID2 GPIE0 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8
I2C9 MAC1LINK MDIO1 MDIO2 OSCCLK PEWAKE PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7
-RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 TIMER4 TIMER5 TIMER6 TIMER7 TIMER8
+RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 SPI1DEBUG SPI1PASSTHRU TIMER4 TIMER5 TIMER6
+TIMER7 TIMER8 VGABIOSROM
+
Examples:
diff --git a/Documentation/devicetree/bindings/reset/uniphier-reset.txt b/Documentation/devicetree/bindings/reset/uniphier-reset.txt
index e6bbfccd56c3..5020524cddeb 100644
--- a/Documentation/devicetree/bindings/reset/uniphier-reset.txt
+++ b/Documentation/devicetree/bindings/reset/uniphier-reset.txt
@@ -6,25 +6,25 @@ System reset
Required properties:
- compatible: should be one of the following:
- "socionext,uniphier-sld3-reset" - for PH1-sLD3 SoC.
- "socionext,uniphier-ld4-reset" - for PH1-LD4 SoC.
- "socionext,uniphier-pro4-reset" - for PH1-Pro4 SoC.
- "socionext,uniphier-sld8-reset" - for PH1-sLD8 SoC.
- "socionext,uniphier-pro5-reset" - for PH1-Pro5 SoC.
- "socionext,uniphier-pxs2-reset" - for ProXstream2/PH1-LD6b SoC.
- "socionext,uniphier-ld11-reset" - for PH1-LD11 SoC.
- "socionext,uniphier-ld20-reset" - for PH1-LD20 SoC.
+ "socionext,uniphier-sld3-reset" - for sLD3 SoC.
+ "socionext,uniphier-ld4-reset" - for LD4 SoC.
+ "socionext,uniphier-pro4-reset" - for Pro4 SoC.
+ "socionext,uniphier-sld8-reset" - for sLD8 SoC.
+ "socionext,uniphier-pro5-reset" - for Pro5 SoC.
+ "socionext,uniphier-pxs2-reset" - for PXs2/LD6b SoC.
+ "socionext,uniphier-ld11-reset" - for LD11 SoC.
+ "socionext,uniphier-ld20-reset" - for LD20 SoC.
- #reset-cells: should be 1.
Example:
sysctrl@61840000 {
- compatible = "socionext,uniphier-ld20-sysctrl",
+ compatible = "socionext,uniphier-ld11-sysctrl",
"simple-mfd", "syscon";
reg = <0x61840000 0x4000>;
reset {
- compatible = "socionext,uniphier-ld20-reset";
+ compatible = "socionext,uniphier-ld11-reset";
#reset-cells = <1>;
};
@@ -32,30 +32,30 @@ Example:
};
-Media I/O (MIO) reset
----------------------
+Media I/O (MIO) reset, SD reset
+-------------------------------
Required properties:
- compatible: should be one of the following:
- "socionext,uniphier-sld3-mio-reset" - for PH1-sLD3 SoC.
- "socionext,uniphier-ld4-mio-reset" - for PH1-LD4 SoC.
- "socionext,uniphier-pro4-mio-reset" - for PH1-Pro4 SoC.
- "socionext,uniphier-sld8-mio-reset" - for PH1-sLD8 SoC.
- "socionext,uniphier-pro5-mio-reset" - for PH1-Pro5 SoC.
- "socionext,uniphier-pxs2-mio-reset" - for ProXstream2/PH1-LD6b SoC.
- "socionext,uniphier-ld11-mio-reset" - for PH1-LD11 SoC.
- "socionext,uniphier-ld20-mio-reset" - for PH1-LD20 SoC.
+ "socionext,uniphier-sld3-mio-reset" - for sLD3 SoC.
+ "socionext,uniphier-ld4-mio-reset" - for LD4 SoC.
+ "socionext,uniphier-pro4-mio-reset" - for Pro4 SoC.
+ "socionext,uniphier-sld8-mio-reset" - for sLD8 SoC.
+ "socionext,uniphier-pro5-sd-reset" - for Pro5 SoC.
+ "socionext,uniphier-pxs2-sd-reset" - for PXs2/LD6b SoC.
+ "socionext,uniphier-ld11-mio-reset" - for LD11 SoC.
+ "socionext,uniphier-ld20-sd-reset" - for LD20 SoC.
- #reset-cells: should be 1.
Example:
mioctrl@59810000 {
- compatible = "socionext,uniphier-ld20-mioctrl",
+ compatible = "socionext,uniphier-ld11-mioctrl",
"simple-mfd", "syscon";
reg = <0x59810000 0x800>;
reset {
- compatible = "socionext,uniphier-ld20-mio-reset";
+ compatible = "socionext,uniphier-ld11-mio-reset";
#reset-cells = <1>;
};
@@ -68,24 +68,24 @@ Peripheral reset
Required properties:
- compatible: should be one of the following:
- "socionext,uniphier-ld4-peri-reset" - for PH1-LD4 SoC.
- "socionext,uniphier-pro4-peri-reset" - for PH1-Pro4 SoC.
- "socionext,uniphier-sld8-peri-reset" - for PH1-sLD8 SoC.
- "socionext,uniphier-pro5-peri-reset" - for PH1-Pro5 SoC.
- "socionext,uniphier-pxs2-peri-reset" - for ProXstream2/PH1-LD6b SoC.
- "socionext,uniphier-ld11-peri-reset" - for PH1-LD11 SoC.
- "socionext,uniphier-ld20-peri-reset" - for PH1-LD20 SoC.
+ "socionext,uniphier-ld4-peri-reset" - for LD4 SoC.
+ "socionext,uniphier-pro4-peri-reset" - for Pro4 SoC.
+ "socionext,uniphier-sld8-peri-reset" - for sLD8 SoC.
+ "socionext,uniphier-pro5-peri-reset" - for Pro5 SoC.
+ "socionext,uniphier-pxs2-peri-reset" - for PXs2/LD6b SoC.
+ "socionext,uniphier-ld11-peri-reset" - for LD11 SoC.
+ "socionext,uniphier-ld20-peri-reset" - for LD20 SoC.
- #reset-cells: should be 1.
Example:
perictrl@59820000 {
- compatible = "socionext,uniphier-ld20-perictrl",
+ compatible = "socionext,uniphier-ld11-perictrl",
"simple-mfd", "syscon";
reg = <0x59820000 0x200>;
reset {
- compatible = "socionext,uniphier-ld20-peri-reset";
+ compatible = "socionext,uniphier-ld11-peri-reset";
#reset-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/rtc/dallas,ds1390.txt b/Documentation/devicetree/bindings/rtc/dallas,ds1390.txt
index 8e76f2648796..9882b819f173 100644
--- a/Documentation/devicetree/bindings/rtc/dallas,ds1390.txt
+++ b/Documentation/devicetree/bindings/rtc/dallas,ds1390.txt
@@ -11,7 +11,7 @@ Optional properties:
- trickle-diode-disable : Do not use internal trickle charger diode
Should be given if internal trickle charger diode should be disabled
Example:
- ds1390: rtc@68 {
+ ds1390: rtc@0 {
compatible = "dallas,ds1390";
trickle-resistor-ohms = <250>;
reg = <0>;
diff --git a/Documentation/devicetree/bindings/rtc/epson,rx8900.txt b/Documentation/devicetree/bindings/rtc/epson,rx8900.txt
new file mode 100644
index 000000000000..3f61e516ecf6
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/epson,rx8900.txt
@@ -0,0 +1,22 @@
+Real Time Clock driver for:
+ - Epson RX8900
+ - Micro Crystal rv8803
+
+Required properties:
+- compatible: should be: "microcrystal,rv8803" or "epson,rx8900"
+- reg : the I2C address of the device for I2C
+
+Optional properties:
+- epson,vdet-disable : boolean, if present will disable voltage detector.
+ Should be set if no backup battery is used.
+- trickle-diode-disable : boolean, if present will disable internal trickle
+ charger diode
+
+Example:
+
+ rtc: rtc@32 {
+ compatible = "epson,rx8900"
+ reg = <0x32>;
+ epson,vdet-disable;
+ trickle-diode-disable;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-omap.txt b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
index bf7d11ae9bea..bee41f97044e 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-omap.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
@@ -18,6 +18,18 @@ Optional properties:
through pmic_power_en
- clocks: Any internal or external clocks feeding in to rtc
- clock-names: Corresponding names of the clocks
+- pinctrl-0: a phandle pointing to the pin settings for the device
+- pinctrl-names: should be "default"
+
+Optional subnodes:
+- generic pinctrl node
+
+Required pinctrl subnodes properties:
+- pins - Names of ext_wakeup pins to configure
+
+Optional pinctrl subnodes properties:
+- input-enable - Enables ext_wakeup
+- ti,active-high - Set input active high (by default active low)
Example:
@@ -30,4 +42,13 @@ rtc@1c23000 {
system-power-controller;
clocks = <&clk_32k_rtc>, <&clk_32768_ck>;
clock-names = "ext-clk", "int-clk";
+
+ pinctrl-0 = <&ext_wakeup>;
+ pinctrl-names = "default";
+
+ ext_wakeup: ext-wakeup {
+ pins = "ext_wakeup0";
+ input-enable;
+ ti,active-high;
+ };
};
diff --git a/Documentation/devicetree/bindings/serial/cdns,uart.txt b/Documentation/devicetree/bindings/serial/cdns,uart.txt
index a3eb154c32ca..227bb770b027 100644
--- a/Documentation/devicetree/bindings/serial/cdns,uart.txt
+++ b/Documentation/devicetree/bindings/serial/cdns,uart.txt
@@ -1,7 +1,9 @@
Binding for Cadence UART Controller
Required properties:
-- compatible : should be "cdns,uart-r1p8", or "xlnx,xuartps"
+- compatible :
+ Use "xlnx,xuartps","cdns,uart-r1p8" for Zynq-7xxx SoC.
+ Use "xlnx,zynqmp-uart","cdns,uart-r1p12" for Zynq Ultrascale+ MPSoC.
- reg: Should contain UART controller registers location and length.
- interrupts: Should contain UART controller interrupts.
- clocks: Must contain phandles to the UART clocks
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index 1e4000d83aee..8d27d1a603e7 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -9,6 +9,14 @@ Required properties:
- "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
- "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
- "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.
+ - "renesas,scif-r8a7743" for R8A7743 (RZ/G1M) SCIF compatible UART.
+ - "renesas,scifa-r8a7743" for R8A7743 (RZ/G1M) SCIFA compatible UART.
+ - "renesas,scifb-r8a7743" for R8A7743 (RZ/G1M) SCIFB compatible UART.
+ - "renesas,hscif-r8a7743" for R8A7743 (RZ/G1M) HSCIF compatible UART.
+ - "renesas,scif-r8a7745" for R8A7745 (RZ/G1E) SCIF compatible UART.
+ - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART.
+ - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART.
+ - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART.
- "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
- "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
- "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
diff --git a/Documentation/devicetree/bindings/timer/jcore,pit.txt b/Documentation/devicetree/bindings/timer/jcore,pit.txt
new file mode 100644
index 000000000000..af5dd35469d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/jcore,pit.txt
@@ -0,0 +1,24 @@
+J-Core Programmable Interval Timer and Clocksource
+
+Required properties:
+
+- compatible: Must be "jcore,pit".
+
+- reg: Memory region(s) for timer/clocksource registers. For SMP,
+ there should be one region per cpu, indexed by the sequential,
+ zero-based hardware cpu number.
+
+- interrupts: An interrupt to assign for the timer. The actual pit
+ core is integrated with the aic and allows the timer interrupt
+ assignment to be programmed by software, but this property is
+ required in order to reserve an interrupt number that doesn't
+ conflict with other devices.
+
+
+Example:
+
+timer@200 {
+ compatible = "jcore,pit";
+ reg = < 0x200 0x30 0x500 0x30 >;
+ interrupts = < 0x48 >;
+};
diff --git a/Documentation/devicetree/bindings/usb/dwc2.txt b/Documentation/devicetree/bindings/usb/dwc2.txt
index 455f2c310a1b..2c30a5479069 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.txt
+++ b/Documentation/devicetree/bindings/usb/dwc2.txt
@@ -28,10 +28,7 @@ Refer to phy/phy-bindings.txt for generic phy consumer properties
- g-use-dma: enable dma usage in gadget driver.
- g-rx-fifo-size: size of rx fifo size in gadget mode.
- g-np-tx-fifo-size: size of non-periodic tx fifo size in gadget mode.
-
-Deprecated properties:
-- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0)
- in gadget mode.
+- g-tx-fifo-size: size of periodic tx fifo per endpoint (except ep0) in gadget mode.
Example:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 24c6f658bce1..f0a48ea78659 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -163,9 +163,11 @@ maxim Maxim Integrated Products
meas Measurement Specialties
mediatek MediaTek Inc.
melexis Melexis N.V.
+melfas MELFAS Inc.
merrii Merrii Technology Co., Ltd.
micrel Micrel Inc.
microchip Microchip Technology Inc.
+microcrystal Micro Crystal AG
micron Micron Technology Inc.
minix MINIX Technology Ltd.
mitsubishi Mitsubishi Electric Corporation
diff --git a/Documentation/features/perf/kprobes-event/arch-support.txt b/Documentation/features/perf/kprobes-event/arch-support.txt
index 9855ad044386..4660bf222db1 100644
--- a/Documentation/features/perf/kprobes-event/arch-support.txt
+++ b/Documentation/features/perf/kprobes-event/arch-support.txt
@@ -22,7 +22,7 @@
| m68k: | TODO |
| metag: | TODO |
| microblaze: | TODO |
- | mips: | TODO |
+ | mips: | ok |
| mn10300: | TODO |
| nios2: | TODO |
| openrisc: | TODO |
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 9922939e7d99..f66e748fc5e4 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -2,8 +2,6 @@
- this file (info on some of the filesystems supported by linux).
Locking
- info on locking rules as they pertain to Linux VFS.
-Makefile
- - Makefile for building the filsystems-part of DocBook.
9p.txt
- 9p (v9fs) is an implementation of the Plan 9 remote fs protocol.
adfs.txt
diff --git a/Documentation/filesystems/Makefile b/Documentation/filesystems/Makefile
deleted file mode 100644
index 883010ce5e35..000000000000
--- a/Documentation/filesystems/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# List of programs to build
-hostprogs-y := dnotify_test
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 219ffd41a911..74329fd0add2 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -395,32 +395,6 @@ is not associated with a file:
or if empty, the mapping is anonymous.
-The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
-of the individual tasks of a process. In this file you will see a mapping marked
-as [stack] if that task sees it as a stack. Hence, for the example above, the
-task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
-
-08048000-08049000 r-xp 00000000 03:00 8312 /opt/test
-08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
-0804a000-0806b000 rw-p 00000000 00:00 0 [heap]
-a7cb1000-a7cb2000 ---p 00000000 00:00 0
-a7cb2000-a7eb2000 rw-p 00000000 00:00 0
-a7eb2000-a7eb3000 ---p 00000000 00:00 0
-a7eb3000-a7ed5000 rw-p 00000000 00:00 0 [stack]
-a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
-a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6
-a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6
-a800b000-a800e000 rw-p 00000000 00:00 0
-a800e000-a8022000 r-xp 00000000 03:00 14462 /lib/libpthread.so.0
-a8022000-a8023000 r--p 00013000 03:00 14462 /lib/libpthread.so.0
-a8023000-a8024000 rw-p 00014000 03:00 14462 /lib/libpthread.so.0
-a8024000-a8027000 rw-p 00000000 00:00 0
-a8027000-a8043000 r-xp 00000000 03:00 8317 /lib/ld-linux.so.2
-a8043000-a8044000 r--p 0001b000 03:00 8317 /lib/ld-linux.so.2
-a8044000-a8045000 rw-p 0001c000 03:00 8317 /lib/ld-linux.so.2
-aff35000-aff4a000 rw-p 00000000 00:00 0
-ffffe000-fffff000 r-xp 00000000 00:00 0 [vdso]
-
The /proc/PID/smaps is an extension based on maps, showing the memory
consumption for each of the process's mappings. For each of mappings there
is a series of lines such as the following:
diff --git a/Documentation/gpio/board.txt b/Documentation/gpio/board.txt
index 40884c4fe40c..a0f61898d493 100644
--- a/Documentation/gpio/board.txt
+++ b/Documentation/gpio/board.txt
@@ -6,7 +6,7 @@ Note that it only applies to the new descriptor-based interface. For a
description of the deprecated integer-based GPIO interface please refer to
gpio-legacy.txt (actually, there is no real mapping possible with the old
interface; you just fetch an integer from somewhere and request the
-corresponding GPIO.
+corresponding GPIO).
All platforms can enable the GPIO library, but if the platform strictly
requires GPIO functionality to be present, it needs to select GPIOLIB from its
@@ -162,6 +162,9 @@ The driver controlling "foo.0" will then be able to obtain its GPIOs as follows:
Since the "led" GPIOs are mapped as active-high, this example will switch their
signals to 1, i.e. enabling the LEDs. And for the "power" GPIO, which is mapped
-as active-low, its actual signal will be 0 after this code. Contrary to the legacy
-integer GPIO interface, the active-low property is handled during mapping and is
-thus transparent to GPIO consumers.
+as active-low, its actual signal will be 0 after this code. Contrary to the
+legacy integer GPIO interface, the active-low property is handled during
+mapping and is thus transparent to GPIO consumers.
+
+A set of functions such as gpiod_set_value() is available to work with
+the new descriptor-oriented interface.
diff --git a/Documentation/ia64/Makefile b/Documentation/ia64/Makefile
deleted file mode 100644
index d493163affe7..000000000000
--- a/Documentation/ia64/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# List of programs to build
-hostprogs-y := aliasing-test
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
diff --git a/Documentation/index.rst b/Documentation/index.rst
index d9ccb94fca95..c53d089455a4 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -17,6 +17,7 @@ Contents:
driver-api/index
media/index
gpu/index
+ 80211/index
Indices and tables
==================
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index 1fec1135791d..8d1341ccde64 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -319,3 +319,60 @@ For touchpad packet, the format is:
otherwise byte 0 bit 4 must be set and byte 0/4/5 are
in NEW fmt
F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
+
+
+ALPS Absolute Mode - Protocol Version 8
+---------------------------------------
+
+Spoken by SS4 (73 03 14) and SS5 (73 03 28) hardware.
+
+The packet type is given by the APD field, bits 4-5 of byte 3.
+
+Touchpad packet (APD = 0x2):
+
+ b7 b6 b5 b4 b3 b2 b1 b0
+ byte 0: SWM SWR SWL 1 1 0 0 X7
+ byte 1: 0 X6 X5 X4 X3 X2 X1 X0
+ byte 2: 0 Y6 Y5 Y4 Y3 Y2 Y1 Y0
+ byte 3: 0 T&P 1 0 1 0 0 Y7
+ byte 4: 0 Z6 Z5 Z4 Z3 Z2 Z1 Z0
+ byte 5: 0 0 0 0 0 0 0 0
+
+SWM, SWR, SWL: Middle, Right, and Left button states
+
+Touchpad 1 Finger packet (APD = 0x0):
+
+ b7 b6 b5 b4 b3 b2 b1 b0
+ byte 0: SWM SWR SWL 1 1 X2 X1 X0
+ byte 1: X9 X8 X7 1 X6 X5 X4 X3
+ byte 2: 0 X11 X10 LFB Y3 Y2 Y1 Y0
+ byte 3: Y5 Y4 0 0 1 TAPF2 TAPF1 TAPF0
+ byte 4: Zv7 Y11 Y10 1 Y9 Y8 Y7 Y6
+ byte 5: Zv6 Zv5 Zv4 0 Zv3 Zv2 Zv1 Zv0
+
+TAPF: ???
+LFB: ???
+
+Touchpad 2 Finger packet (APD = 0x1):
+
+ b7 b6 b5 b4 b3 b2 b1 b0
+ byte 0: SWM SWR SWL 1 1 AX6 AX5 AX4
+ byte 1: AX11 AX10 AX9 AX8 AX7 AZ1 AY4 AZ0
+ byte 2: AY11 AY10 AY9 CONT AY8 AY7 AY6 AY5
+ byte 3: 0 0 0 1 1 BX6 BX5 BX4
+ byte 4: BX11 BX10 BX9 BX8 BX7 BZ1 BY4 BZ0
+ byte 5: BY11 BY10 BY9 0 BY8 BY7 BY5 BY5
+
+CONT: A 3-or-4 Finger packet is to follow
+
+Touchpad 3-or-4 Finger packet (APD = 0x3):
+
+ b7 b6 b5 b4 b3 b2 b1 b0
+ byte 0: SWM SWR SWL 1 1 AX6 AX5 AX4
+ byte 1: AX11 AX10 AX9 AX8 AX7 AZ1 AY4 AZ0
+ byte 2: AY11 AY10 AY9 OVF AY8 AY7 AY6 AY5
+ byte 3: 0 0 1 1 1 BX6 BX5 BX4
+ byte 4: BX11 BX10 BX9 BX8 BX7 BZ1 BY4 BZ0
+ byte 5: BY11 BY10 BY9 0 BY8 BY7 BY5 BY5
+
+OVF: 5th finger detected
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 385a5ef41c17..9b9c4797fc55 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -41,6 +41,7 @@ This document describes the Linux kernel Makefiles.
--- 6.8 Custom kbuild commands
--- 6.9 Preprocessing linker scripts
--- 6.10 Generic header files
+ --- 6.11 Post-link pass
=== 7 Kbuild syntax for exported headers
--- 7.1 header-y
@@ -1237,6 +1238,21 @@ When kbuild executes, the following steps are followed (roughly):
to list the file in the Kbuild file.
See "7.4 generic-y" for further info on syntax etc.
+--- 6.11 Post-link pass
+
+ If the file arch/xxx/Makefile.postlink exists, this makefile
+ will be invoked for post-link objects (vmlinux and modules.ko)
+ for architectures to run post-link passes on. Must also handle
+ the clean target.
+
+ This pass runs after kallsyms generation. If the architecture
+ needs to modify symbol locations, rather than manipulate the
+ kallsyms, it may be easier to add another postlink target for
+ .tmp_vmlinux? targets to be called from link-vmlinux.sh.
+
+ For example, powerpc uses this to check relocation sanity of
+ the linked vmlinux file.
+
=== 7 Kbuild syntax for exported headers
The kernel includes a set of headers that is exported to userspace.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 58f3c1041759..37babf91f2cb 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1511,7 +1511,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
controllers
i8042.notimeout [HW] Ignore timeout condition signalled by controller
- i8042.reset [HW] Reset the controller during init and cleanup
+ i8042.reset [HW] Reset the controller during init, cleanup and
+ suspend-to-ram transitions, only during s2r
+ transitions, or never reset
+ Format: { 1 | Y | y | 0 | N | n }
+ 1, Y, y: always reset controller
+ 0, N, n: don't ever reset controller
+ Default: only on s2r transitions on x86; most other
+ architectures force reset to be always executed
i8042.unlock [HW] Unlock (ignore) the keylock
i8042.kbdreset [HW] Reset device connected to KBD port
diff --git a/Documentation/kselftest.txt b/Documentation/kselftest.txt
index 979eacae243d..54bee77fa728 100644
--- a/Documentation/kselftest.txt
+++ b/Documentation/kselftest.txt
@@ -1,8 +1,9 @@
Linux Kernel Selftests
The kernel contains a set of "self tests" under the tools/testing/selftests/
-directory. These are intended to be small unit tests to exercise individual
-code paths in the kernel.
+directory. These are intended to be small tests to exercise individual code
+paths in the kernel. Tests are intended to be run after building, installing
+and booting a kernel.
On some systems, hot-plug tests could hang forever waiting for cpu and
memory to be ready to be offlined. A special hot-plug target is created
diff --git a/Documentation/laptops/00-INDEX b/Documentation/laptops/00-INDEX
index 7c0ac2a26b9e..86169dc766f7 100644
--- a/Documentation/laptops/00-INDEX
+++ b/Documentation/laptops/00-INDEX
@@ -1,13 +1,9 @@
00-INDEX
- This file
-Makefile
- - Makefile for building dslm example program.
asus-laptop.txt
- information on the Asus Laptop Extras driver.
disk-shock-protection.txt
- information on hard disk shock protection.
-dslm.c
- - Simple Disk Sleep Monitor program
laptop-mode.txt
- how to conserve battery power using laptop-mode.
sony-laptop.txt
diff --git a/Documentation/laptops/Makefile b/Documentation/laptops/Makefile
deleted file mode 100644
index 0abe44f68965..000000000000
--- a/Documentation/laptops/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# List of programs to build
-hostprogs-y := dslm
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
diff --git a/Documentation/laptops/laptop-mode.txt b/Documentation/laptops/laptop-mode.txt
index 4ebbfc3f1c6e..19276f5d195c 100644
--- a/Documentation/laptops/laptop-mode.txt
+++ b/Documentation/laptops/laptop-mode.txt
@@ -779,4 +779,4 @@ Monitoring tool
---------------
Bartek Kania submitted this, it can be used to measure how much time your disk
-spends spun up/down. See Documentation/laptops/dslm.c
+spends spun up/down. See tools/laptop/dslm/dslm.c
diff --git a/Documentation/mic/Makefile b/Documentation/mic/Makefile
deleted file mode 100644
index a191d453badf..000000000000
--- a/Documentation/mic/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y := mpssd
diff --git a/Documentation/mic/mpssd/Makefile b/Documentation/mic/mpssd/Makefile
deleted file mode 100644
index 06871b0c08a6..000000000000
--- a/Documentation/mic/mpssd/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-ifndef CROSS_COMPILE
-# List of programs to build
-hostprogs-$(CONFIG_X86_64) := mpssd
-
-mpssd-objs := mpssd.o sysfs.o
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS += -I$(objtree)/usr/include -I$(srctree)/tools/include
-
-ifdef DEBUG
-HOSTCFLAGS += -DDEBUG=$(DEBUG)
-endif
-
-HOSTLOADLIBES_mpssd := -lpthread
-
-install:
- install mpssd /usr/sbin/mpssd
- install micctrl /usr/sbin/micctrl
-endif
diff --git a/Documentation/misc-devices/Makefile b/Documentation/misc-devices/Makefile
deleted file mode 100644
index e2b7aa4c9e21..000000000000
--- a/Documentation/misc-devices/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y := mei
diff --git a/Documentation/misc-devices/mei/Makefile b/Documentation/misc-devices/mei/Makefile
deleted file mode 100644
index d758047d1b6d..000000000000
--- a/Documentation/misc-devices/mei/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# List of programs to build
-hostprogs-y := mei-amt-version
-HOSTCFLAGS_mei-amt-version.o += -I$(objtree)/usr/include
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index a7697783ac4c..c6beb5f1637f 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -10,8 +10,6 @@ LICENSE.qlge
- GPLv2 for QLogic Linux qlge NIC Driver
LICENSE.qlcnic
- GPLv2 for QLogic Linux qlcnic NIC Driver
-Makefile
- - Makefile for docsrc.
PLIP.txt
- PLIP: The Parallel Line Internet Protocol device driver
README.ipw2100
diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile
deleted file mode 100644
index 4c5d7c485439..000000000000
--- a/Documentation/networking/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y := timestamping
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
index 0fe1c6e0dbcd..a20b2fae942b 100644
--- a/Documentation/networking/netdev-FAQ.txt
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -29,8 +29,8 @@ A: There are always two trees (git repositories) in play. Both are driven
Linus, and net-next is where the new code goes for the future release.
You can find the trees here:
- http://git.kernel.org/?p=linux/kernel/git/davem/net.git
- http://git.kernel.org/?p=linux/kernel/git/davem/net-next.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
Q: How often do changes from these trees make it to the mainline Linus tree?
@@ -76,7 +76,7 @@ Q: So where are we now in this cycle?
A: Load the mainline (Linus) page here:
- http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
and note the top of the "tags" section. If it is rc1, it is early
in the dev cycle. If it was tagged rc7 a week ago, then a release
@@ -123,7 +123,7 @@ A: Normally Greg Kroah-Hartman collects stable commits himself, but
It contains the patches which Dave has selected, but not yet handed
off to Greg. If Greg already has the patch, then it will be here:
- http://git.kernel.org/cgit/linux/kernel/git/stable/stable-queue.git
+ https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
A quick way to find whether the patch is in this stable-queue is
to simply clone the repo, and then git grep the mainline commit ID, e.g.
diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt
index 4fb51d32fccc..399e4e866a9c 100644
--- a/Documentation/networking/nf_conntrack-sysctl.txt
+++ b/Documentation/networking/nf_conntrack-sysctl.txt
@@ -33,24 +33,6 @@ nf_conntrack_events - BOOLEAN
If this option is enabled, the connection tracking code will
provide userspace with connection tracking events via ctnetlink.
-nf_conntrack_events_retry_timeout - INTEGER (seconds)
- default 15
-
- This option is only relevant when "reliable connection tracking
- events" are used. Normally, ctnetlink is "lossy", that is,
- events are normally dropped when userspace listeners can't keep up.
-
- Userspace can request "reliable event mode". When this mode is
- active, the conntrack will only be destroyed after the event was
- delivered. If event delivery fails, the kernel periodically
- re-tries to send the event to userspace.
-
- This is the maximum interval the kernel should use when re-trying
- to deliver the destroy event.
-
- A higher number means there will be fewer delivery retries and it
- will take longer for a backlog to be processed.
-
nf_conntrack_expect_max - INTEGER
Maximum size of expectation table. Default value is
nf_conntrack_buckets / 256. Minimum is 1.
diff --git a/Documentation/networking/timestamping/Makefile b/Documentation/networking/timestamping/Makefile
deleted file mode 100644
index 8c20dfaa4d6e..000000000000
--- a/Documentation/networking/timestamping/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# To compile, from the source root
-#
-# make headers_install
-# make M=documentation
-
-# List of programs to build
-hostprogs-y := hwtstamp_config timestamping txtimestamp
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_timestamping.o += -I$(objtree)/usr/include
-HOSTCFLAGS_txtimestamp.o += -I$(objtree)/usr/include
-HOSTCFLAGS_hwtstamp_config.o += -I$(objtree)/usr/include
diff --git a/Documentation/pcmcia/Makefile b/Documentation/pcmcia/Makefile
deleted file mode 100644
index 47a8fa162683..000000000000
--- a/Documentation/pcmcia/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# List of programs to build
-hostprogs-y := crc32hash
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_crc32hash.o += -I$(objtree)/usr/include
diff --git a/Documentation/pcmcia/devicetable.txt b/Documentation/pcmcia/devicetable.txt
index 199afd100cf2..5f3e00ab54c4 100644
--- a/Documentation/pcmcia/devicetable.txt
+++ b/Documentation/pcmcia/devicetable.txt
@@ -27,7 +27,7 @@ pcmcia:m0149cC1ABf06pfn00fn00pa725B842DpbF1EFEE84pc0877B627pd00000000
The hex value after "pa" is the hash of product ID string 1, after "pb" for
string 2 and so on.
-Alternatively, you can use crc32hash (see Documentation/pcmcia/crc32hash.c)
+Alternatively, you can use crc32hash (see tools/pcmcia/crc32hash.c)
to determine the crc32 hash. Simply pass the string you want to evaluate
as argument to this program, e.g.:
-$ ./crc32hash "Dual Speed"
+$ tools/pcmcia/crc32hash "Dual Speed"
diff --git a/Documentation/prctl/Makefile b/Documentation/prctl/Makefile
deleted file mode 100644
index 44de3080c7f2..000000000000
--- a/Documentation/prctl/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-ifndef CROSS_COMPILE
-# List of programs to build
-hostprogs-$(CONFIG_X86) := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test disable-tsc-test
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_disable-tsc-ctxt-sw-stress-test.o += -I$(objtree)/usr/include
-HOSTCFLAGS_disable-tsc-on-off-stress-test.o += -I$(objtree)/usr/include
-HOSTCFLAGS_disable-tsc-test.o += -I$(objtree)/usr/include
-endif
diff --git a/Documentation/ptp/Makefile b/Documentation/ptp/Makefile
deleted file mode 100644
index 293d6c09a11f..000000000000
--- a/Documentation/ptp/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# List of programs to build
-hostprogs-y := testptp
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_testptp.o += -I$(objtree)/usr/include
-HOSTLOADLIBES_testptp := -lrt
diff --git a/Documentation/scsi/g_NCR5380.txt b/Documentation/scsi/g_NCR5380.txt
index fd880150aeea..e2c187947e58 100644
--- a/Documentation/scsi/g_NCR5380.txt
+++ b/Documentation/scsi/g_NCR5380.txt
@@ -21,16 +21,6 @@ NCR53c400 card, the Trantor T130B in its default configuration:
The NCR53c400 does not support DMA but it does have Pseudo-DMA which is
supported by the driver.
-If the default configuration does not work for you, you can use the kernel
-command lines (eg using the lilo append command):
- ncr5380=addr,irq
- ncr53c400=addr,irq
- ncr53c400a=addr,irq
- dtc3181e=addr,irq
-
-The driver does not probe for any addresses or ports other than those in
-the OVERRIDE or given to the kernel as above.
-
This driver provides some information on what it has detected in
/proc/scsi/g_NCR5380/x where x is the scsi card number as detected at boot
time. More info to come in the future.
@@ -38,6 +28,16 @@ time. More info to come in the future.
This driver works as a module.
When included as a module, parameters can be passed on the insmod/modprobe
command line:
+ irq=xx[,...] the interrupt(s)
+ base=xx[,...] the port or base address(es) (for port or memory mapped, resp.)
+ card=xx[,...] card type(s):
+ 0 = NCR5380,
+ 1 = NCR53C400,
+ 2 = NCR53C400A,
+ 3 = Domex Technology Corp 3181E (DTC3181E)
+ 4 = Hewlett Packard C2502
+
+These old-style parameters can support only one card:
ncr_irq=xx the interrupt
ncr_addr=xx the port or base address (for port or memory
mapped, resp.)
@@ -46,11 +46,19 @@ command line:
ncr_53c400a=1 to set up for a NCR53C400A board
dtc_3181e=1 to set up for a Domex Technology Corp 3181E board
hp_c2502=1 to set up for a Hewlett Packard C2502 board
+
e.g.
-modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
+OLD: modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
+NEW: modprobe g_NCR5380 irq=5 base=0x350 card=0
for a port mapped NCR5380 board or
-modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
- for a memory mapped NCR53C400 board with interrupts disabled.
+
+OLD: modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
+NEW: modprobe g_NCR5380 irq=255 base=0xc8000 card=1
+ for a memory mapped NCR53C400 board with interrupts disabled or
+
+NEW: modprobe g_NCR5380 irq=0,7 base=0x240,0x300 card=3,4
+ for two cards: DTC3181 (in non-PnP mode) at 0x240 with no IRQ
+ and HP C2502 at 0x300 with IRQ 7
(255 should be specified for no or DMA interrupt, 254 to autoprobe for an
IRQ line if overridden on the command line.)
diff --git a/Documentation/spi/00-INDEX b/Documentation/spi/00-INDEX
index 4644bf0d9832..8e4bb17d70eb 100644
--- a/Documentation/spi/00-INDEX
+++ b/Documentation/spi/00-INDEX
@@ -1,7 +1,5 @@
00-INDEX
- this file.
-Makefile
- - Makefile for the example sourcefiles.
butterfly
- AVR Butterfly SPI driver overview and pin configuration.
ep93xx_spi
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
index ee212a27772f..3be05fe0f1f9 100644
--- a/Documentation/timers/00-INDEX
+++ b/Documentation/timers/00-INDEX
@@ -4,12 +4,8 @@ highres.txt
- High resolution timers and dynamic ticks design notes
hpet.txt
- High Precision Event Timer Driver for Linux
-hpet_example.c
- - sample hpet timer test program
hrtimers.txt
- subsystem for high-resolution kernel timers
-Makefile
- - Build and link hpet_example
NO_HZ.txt
- Summary of the different methods for the scheduler clock-interrupts management.
timekeeping.txt
diff --git a/Documentation/timers/Makefile b/Documentation/timers/Makefile
deleted file mode 100644
index 6c09ee6ca721..000000000000
--- a/Documentation/timers/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# List of programs to build
-hostprogs-$(CONFIG_X86) := hpet_example
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
diff --git a/Documentation/timers/hpet.txt b/Documentation/timers/hpet.txt
index a484d2c109d7..895345ec513b 100644
--- a/Documentation/timers/hpet.txt
+++ b/Documentation/timers/hpet.txt
@@ -25,4 +25,4 @@ arch/x86/kernel/hpet.c.
The driver provides a userspace API which resembles the API found in the
RTC driver framework. An example user space program is provided in
-file:Documentation/timers/hpet_example.c
+file:samples/timers/hpet_example.c
diff --git a/Documentation/vDSO/Makefile b/Documentation/vDSO/Makefile
deleted file mode 100644
index b12e98770e1f..000000000000
--- a/Documentation/vDSO/Makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-ifndef CROSS_COMPILE
-# vdso_test won't build for glibc < 2.16, so disable it
-# hostprogs-y := vdso_test
-hostprogs-$(CONFIG_X86) := vdso_standalone_test_x86
-vdso_standalone_test_x86-objs := vdso_standalone_test_x86.o parse_vdso.o
-vdso_test-objs := parse_vdso.o vdso_test.o
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS := -I$(objtree)/usr/include -std=gnu99
-HOSTCFLAGS_vdso_standalone_test_x86.o := -fno-asynchronous-unwind-tables -fno-stack-protector
-HOSTLOADLIBES_vdso_standalone_test_x86 := -nostdlib
-ifeq ($(CONFIG_X86_32),y)
-HOSTLOADLIBES_vdso_standalone_test_x86 += -lgcc_s
-endif
-endif
diff --git a/Documentation/watchdog/Makefile b/Documentation/watchdog/Makefile
deleted file mode 100644
index 6018f45f2471..000000000000
--- a/Documentation/watchdog/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y := src
diff --git a/Documentation/watchdog/src/Makefile b/Documentation/watchdog/src/Makefile
deleted file mode 100644
index 4a892c304983..000000000000
--- a/Documentation/watchdog/src/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-# List of programs to build
-hostprogs-y := watchdog-simple watchdog-test
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
diff --git a/Documentation/watchdog/watchdog-api.txt b/Documentation/watchdog/watchdog-api.txt
index b3a701f48118..0e62ba33b7fb 100644
--- a/Documentation/watchdog/watchdog-api.txt
+++ b/Documentation/watchdog/watchdog-api.txt
@@ -37,7 +37,7 @@ activates as soon as /dev/watchdog is opened and will reboot unless
the watchdog is pinged within a certain time, this time is called the
timeout or margin. The simplest way to ping the watchdog is to write
some data to the device. So a very simple watchdog daemon would look
-like this source file: see Documentation/watchdog/src/watchdog-simple.c
+like this source file: see samples/watchdog/watchdog-simple.c
A more advanced driver could for example check that a HTTP server is
still responding before doing the write call to ping the watchdog.
diff --git a/Documentation/watchdog/wdt.txt b/Documentation/watchdog/wdt.txt
index 061c2e35384f..ed2f0b860869 100644
--- a/Documentation/watchdog/wdt.txt
+++ b/Documentation/watchdog/wdt.txt
@@ -47,4 +47,4 @@ The external event interfaces on the WDT boards are not currently supported.
Minor numbers are however allocated for it.
-Example Watchdog Driver: see Documentation/watchdog/src/watchdog-simple.c
+Example Watchdog Driver: see samples/watchdog/watchdog-simple.c
diff --git a/MAINTAINERS b/MAINTAINERS
index f18b5467e37f..e5c17a951b7d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -316,6 +316,14 @@ W: https://01.org/linux-acpi
S: Supported
F: drivers/acpi/fan.c
+ACPI FOR ARM64 (ACPI/arm64)
+M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M: Hanjun Guo <hanjun.guo@linaro.org>
+M: Sudeep Holla <sudeep.holla@arm.com>
+L: linux-acpi@vger.kernel.org
+S: Maintained
+F: drivers/acpi/arm64
+
ACPI THERMAL DRIVER
M: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
@@ -1434,6 +1442,7 @@ F: drivers/cpufreq/mvebu-cpufreq.c
F: arch/arm/configs/mvebu_*_defconfig
ARM/Marvell Berlin SoC support
+M: Jisheng Zhang <jszhang@marvell.com>
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -2545,15 +2554,18 @@ S: Supported
F: drivers/net/ethernet/broadcom/genet/
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
-M: Sony Chacko <sony.chacko@qlogic.com>
-M: Dept-HSGLinuxNICDev@qlogic.com
+M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Harish Patil <harish.patil@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2.*
F: drivers/net/ethernet/broadcom/bnx2_*
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M: Ariel Elior <ariel.elior@qlogic.com>
+M: Yuval Mintz <Yuval.Mintz@cavium.com>
+M: Ariel Elior <ariel.elior@cavium.com>
+M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/
@@ -2760,7 +2772,9 @@ S: Supported
F: drivers/scsi/bfa/
BROCADE BNA 10 GIGABIT ETHERNET DRIVER
-M: Rasesh Mody <rasesh.mody@qlogic.com>
+M: Rasesh Mody <rasesh.mody@cavium.com>
+M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/brocade/bna/
@@ -4614,8 +4628,9 @@ F: sound/usb/misc/ua101.c
EXTENSIBLE FIRMWARE INTERFACE (EFI)
M: Matt Fleming <matt@codeblueprint.co.uk>
+M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
L: linux-efi@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
S: Maintained
F: Documentation/efi-stub.txt
F: arch/ia64/kernel/efi.c
@@ -5280,6 +5295,12 @@ M: Joe Perches <joe@perches.com>
S: Maintained
F: scripts/get_maintainer.pl
+GENWQE (IBM Generic Workqueue Card)
+M: Frank Haverkamp <haver@linux.vnet.ibm.com>
+M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
+S: Supported
+F: drivers/misc/genwqe/
+
GFS2 FILE SYSTEM
M: Steven Whitehouse <swhiteho@redhat.com>
M: Bob Peterson <rpeterso@redhat.com>
@@ -6125,6 +6146,12 @@ M: Stanislaw Gruszka <stf_xl@wp.pl>
S: Maintained
F: drivers/usb/atm/ueagle-atm.c
+IMGTEC ASCII LCD DRIVER
+M: Paul Burton <paul.burton@imgtec.com>
+S: Maintained
+F: Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
+F: drivers/auxdisplay/img-ascii-lcd.c
+
INA209 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
@@ -6436,6 +6463,7 @@ F: include/linux/mei_cl_bus.h
F: drivers/misc/mei/*
F: drivers/watchdog/mei_wdt.c
F: Documentation/misc-devices/mei/*
+F: samples/mei/*
INTEL MIC DRIVERS (mic)
M: Sudeep Dutt <sudeep.dutt@intel.com>
@@ -6622,10 +6650,10 @@ S: Maintained
F: drivers/firmware/iscsi_ibft*
ISCSI
-M: Mike Christie <michaelc@cs.wisc.edu>
+M: Lee Duncan <lduncan@suse.com>
+M: Chris Leech <cleech@redhat.com>
L: open-iscsi@googlegroups.com
-W: www.open-iscsi.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mnc/linux-2.6-iscsi.git
+W: www.open-iscsi.com
S: Maintained
F: drivers/scsi/*iscsi*
F: include/scsi/*iscsi*
@@ -7203,17 +7231,11 @@ F: drivers/lightnvm/
F: include/linux/lightnvm.h
F: include/uapi/linux/lightnvm.h
-LINUX FOR IBM pSERIES (RS/6000)
-M: Paul Mackerras <paulus@au.ibm.com>
-W: http://www.ibm.com/linux/ltc/projects/ppc
-S: Supported
-F: arch/powerpc/boot/rs6000.h
-
LINUX FOR POWERPC (32-BIT AND 64-BIT)
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
M: Paul Mackerras <paulus@samba.org>
M: Michael Ellerman <mpe@ellerman.id.au>
-W: http://www.penguinppc.org/
+W: https://github.com/linuxppc/linux/wiki
L: linuxppc-dev@lists.ozlabs.org
Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
@@ -7228,6 +7250,7 @@ F: drivers/net/ethernet/ibm/ibmvnic.*
F: drivers/pci/hotplug/pnv_php.c
F: drivers/pci/hotplug/rpa*
F: drivers/scsi/ibmvscsi/
+F: tools/testing/selftests/powerpc
N: opal
N: /pmac
N: powermac
@@ -7284,9 +7307,8 @@ F: arch/powerpc/platforms/83xx/
F: arch/powerpc/platforms/85xx/
LINUX FOR POWERPC PA SEMI PWRFICIENT
-M: Olof Johansson <olof@lixom.net>
L: linuxppc-dev@lists.ozlabs.org
-S: Maintained
+S: Orphan
F: arch/powerpc/platforms/pasemi/
F: drivers/*/*pasemi*
F: drivers/*/*/*pasemi*
@@ -7829,6 +7851,13 @@ F: Documentation/scsi/megaraid.txt
F: drivers/scsi/megaraid.*
F: drivers/scsi/megaraid/
+MELFAS MIP4 TOUCHSCREEN DRIVER
+M: Sangwon Jee <jeesw@melfas.com>
+W: http://www.melfas.com
+S: Supported
+F: drivers/input/touchscreen/melfas_mip4.c
+F: Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
+
MELLANOX ETHERNET DRIVER (mlx4_en)
M: Tariq Toukan <tariqt@mellanox.com>
L: netdev@vger.kernel.org
@@ -8085,6 +8114,7 @@ S: Maintained
F: drivers/media/dvb-frontends/mn88473*
MODULE SUPPORT
+M: Jessica Yu <jeyu@redhat.com>
M: Rusty Russell <rusty@rustcorp.com.au>
S: Maintained
F: include/linux/module.h
@@ -8198,7 +8228,7 @@ F: include/linux/mfd/
MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
M: Ulf Hansson <ulf.hansson@linaro.org>
L: linux-mmc@vger.kernel.org
-T: git git://git.linaro.org/people/ulf.hansson/mmc.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git
S: Maintained
F: Documentation/devicetree/bindings/mmc/
F: drivers/mmc/
@@ -8493,11 +8523,10 @@ F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/
NETXEN (1/10) GbE SUPPORT
-M: Manish Chopra <manish.chopra@qlogic.com>
-M: Sony Chacko <sony.chacko@qlogic.com>
-M: Rajesh Borundia <rajesh.borundia@qlogic.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Rahul Verma <rahul.verma@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
-W: http://www.qlogic.com
S: Supported
F: drivers/net/ethernet/qlogic/netxen/
@@ -9020,15 +9049,13 @@ S: Maintained
F: drivers/net/wireless/intersil/p54/
PA SEMI ETHERNET DRIVER
-M: Olof Johansson <olof@lixom.net>
L: netdev@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/net/ethernet/pasemi/*
PA SEMI SMBUS DRIVER
-M: Olof Johansson <olof@lixom.net>
L: linux-i2c@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/i2c/busses/i2c-pasemi.c
PADATA PARALLEL EXECUTION MECHANISM
@@ -9286,7 +9313,7 @@ S: Maintained
F: drivers/pci/host/*designware*
PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE
-M: Joao Pinto <jpinto@synopsys.com>
+M: Jose Abreu <Jose.Abreu@synopsys.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -9377,6 +9404,7 @@ W: http://lists.infradead.org/mailman/listinfo/linux-pcmcia
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brodo/pcmcia.git
S: Maintained
F: Documentation/pcmcia/
+F: tools/pcmcia/
F: drivers/pcmcia/
F: include/pcmcia/
@@ -9874,33 +9902,32 @@ F: Documentation/scsi/LICENSE.qla4xxx
F: drivers/scsi/qla4xxx/
QLOGIC QLA3XXX NETWORK DRIVER
-M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
-M: Ron Mercer <ron.mercer@qlogic.com>
-M: linux-driver@qlogic.com
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: Documentation/networking/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M: Dept-GELinuxNICDev@qlogic.com
+M: Harish Patil <harish.patil@cavium.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER
-M: Harish Patil <harish.patil@qlogic.com>
-M: Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
-M: Dept-GELinuxNICDev@qlogic.com
-M: linux-driver@qlogic.com
+M: Harish Patil <harish.patil@cavium.com>
+M: Manish Chopra <manish.chopra@cavium.com>
+M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlge/
QLOGIC QL4xxx ETHERNET DRIVER
-M: Yuval Mintz <Yuval.Mintz@qlogic.com>
-M: Ariel Elior <Ariel.Elior@qlogic.com>
-M: everest-linux-l2@qlogic.com
+M: Yuval Mintz <Yuval.Mintz@cavium.com>
+M: Ariel Elior <Ariel.Elior@cavium.com>
+M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qed/
diff --git a/Makefile b/Makefile
index 27f97b53e6eb..a2650f9c6a25 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
-PATCHLEVEL = 8
+PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc3
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
@@ -623,6 +623,11 @@ KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
+ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,)
+KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
+endif
+
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
else
@@ -803,6 +808,10 @@ LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
KBUILD_LDFLAGS_MODULE += $(LDFLAGS_BUILD_ID)
LDFLAGS_vmlinux += $(LDFLAGS_BUILD_ID)
+ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
+LDFLAGS_vmlinux += $(call ld-option, --gc-sections,)
+endif
+
ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
LDFLAGS_vmlinux += $(call ld-option, -X,)
endif
@@ -927,9 +936,6 @@ vmlinux_prereq: $(vmlinux-deps) FORCE
ifdef CONFIG_HEADERS_CHECK
$(Q)$(MAKE) -f $(srctree)/Makefile headers_check
endif
-ifdef CONFIG_BUILD_DOCSRC
- $(Q)$(MAKE) $(build)=Documentation
-endif
ifdef CONFIG_GDB_SCRIPTS
$(Q)ln -fsn `cd $(srctree) && /bin/pwd`/scripts/gdb/vmlinux-gdb.py
endif
@@ -942,9 +948,12 @@ endif
include/generated/autoksyms.h: FORCE
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh true
-# Final link of vmlinux
- cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
-quiet_cmd_link-vmlinux = LINK $@
+ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
+
+# Final link of vmlinux with optional arch pass after final link
+ cmd_link-vmlinux = \
+ $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \
+ $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE
+$(call if_changed,link-vmlinux)
@@ -1271,6 +1280,7 @@ $(clean-dirs):
vmlinuxclean:
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
+ $(Q)$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) clean)
clean: archclean vmlinuxclean
diff --git a/arch/Kconfig b/arch/Kconfig
index 180ea33164dc..659bdd079277 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -383,6 +383,24 @@ config GCC_PLUGIN_SANCOV
gcc-4.5 on). It is based on the commit "Add fuzzing coverage support"
by Dmitry Vyukov <dvyukov@google.com>.
+config GCC_PLUGIN_LATENT_ENTROPY
+ bool "Generate some entropy during boot and runtime"
+ depends on GCC_PLUGINS
+ help
+ By saying Y here the kernel will instrument some kernel code to
+ extract some entropy from both original and artificially created
+ program state. This will help especially embedded systems where
+ there is little 'natural' source of entropy normally. The cost
+ is some slowdown of the boot process (about 0.5%) and fork and
+ irq processing.
+
+ Note that entropy extracted this way is not cryptographically
+ secure!
+
+ This plugin was ported from grsecurity/PaX. More information at:
+ * https://grsecurity.net/
+ * https://pax.grsecurity.net/
+
config HAVE_CC_STACKPROTECTOR
bool
help
@@ -450,6 +468,27 @@ config CC_STACKPROTECTOR_STRONG
endchoice
+config THIN_ARCHIVES
+ bool
+ help
+ Select this if the architecture wants to use thin archives
+ instead of ld -r to create the built-in.o files.
+
+config LD_DEAD_CODE_DATA_ELIMINATION
+ bool
+ help
+ Select this if the architecture wants to do dead code and
+ data elimination with the linker by compiling with
+ -ffunction-sections -fdata-sections and linking with
+ --gc-sections.
+
+ This requires that the arch annotates or otherwise protects
+ its external entry points from being discarded. Linker scripts
+ must also merge .text.*, .data.*, and .bss.* correctly into
+ output sections. Care must be taken not to pull in unrelated
+ sections (e.g., '.text.init'). Typically '.' in section names
+ is used to distinguish them from label names / C identifiers.
+
config HAVE_ARCH_WITHIN_STACK_FRAMES
bool
help
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index ffd9cf5ec8c4..bf8475ce85ee 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -3,6 +3,7 @@
generic-y += clkdev.h
generic-y += cputime.h
generic-y += exec.h
+generic-y += export.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
index 466e42e96bfa..94f587535dee 100644
--- a/arch/alpha/include/asm/uaccess.h
+++ b/arch/alpha/include/asm/uaccess.h
@@ -396,11 +396,12 @@ copy_to_user(void __user *to, const void *from, long n)
extern inline long
copy_from_user(void *to, const void __user *from, long n)
{
+ long res = n;
if (likely(__access_ok((unsigned long)from, n, get_fs())))
- n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
- else
- memset(to, 0, n);
- return n;
+ res = __copy_from_user_inatomic(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
extern void __do_clear_user(void);
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index 3ecac0106c8a..8ce13d7a2ad3 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -8,7 +8,7 @@ ccflags-y := -Wno-sign-compare
obj-y := entry.o traps.o process.o osf_sys.o irq.o \
irq_alpha.o signal.o setup.o ptrace.o time.o \
- alpha_ksyms.o systbls.o err_common.o io.o
+ systbls.o err_common.o io.o
obj-$(CONFIG_VGA_HOSE) += console.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
deleted file mode 100644
index f4c7ab6f43b0..000000000000
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * linux/arch/alpha/kernel/alpha_ksyms.c
- *
- * Export the alpha-specific functions that are needed for loadable
- * modules.
- */
-
-#include <linux/module.h>
-#include <asm/console.h>
-#include <asm/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/fpu.h>
-#include <asm/machvec.h>
-
-#include <linux/syscalls.h>
-
-/* these are C runtime functions with special calling conventions: */
-extern void __divl (void);
-extern void __reml (void);
-extern void __divq (void);
-extern void __remq (void);
-extern void __divlu (void);
-extern void __remlu (void);
-extern void __divqu (void);
-extern void __remqu (void);
-
-EXPORT_SYMBOL(alpha_mv);
-EXPORT_SYMBOL(callback_getenv);
-EXPORT_SYMBOL(callback_setenv);
-EXPORT_SYMBOL(callback_save_env);
-
-/* platform dependent support */
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strcpy);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(__memcpy);
-EXPORT_SYMBOL(__memset);
-EXPORT_SYMBOL(___memset);
-EXPORT_SYMBOL(__memsetw);
-EXPORT_SYMBOL(__constant_c_memset);
-EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(clear_page);
-
-EXPORT_SYMBOL(alpha_read_fp_reg);
-EXPORT_SYMBOL(alpha_read_fp_reg_s);
-EXPORT_SYMBOL(alpha_write_fp_reg);
-EXPORT_SYMBOL(alpha_write_fp_reg_s);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_tcpudp_magic);
-EXPORT_SYMBOL(ip_compute_csum);
-EXPORT_SYMBOL(ip_fast_csum);
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-EXPORT_SYMBOL(csum_ipv6_magic);
-
-#ifdef CONFIG_MATHEMU_MODULE
-extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long);
-extern long (*alpha_fp_emul) (unsigned long pc);
-EXPORT_SYMBOL(alpha_fp_emul_imprecise);
-EXPORT_SYMBOL(alpha_fp_emul);
-#endif
-
-/*
- * The following are specially called from the uaccess assembly stubs.
- */
-EXPORT_SYMBOL(__copy_user);
-EXPORT_SYMBOL(__do_clear_user);
-
-/*
- * SMP-specific symbols.
- */
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif /* CONFIG_SMP */
-
-/*
- * The following are special because they're not called
- * explicitly (the C compiler or assembler generates them in
- * response to division operations). Fortunately, their
- * interface isn't gonna change any time soon now, so it's OK
- * to leave it out of version control.
- */
-# undef memcpy
-# undef memset
-EXPORT_SYMBOL(__divl);
-EXPORT_SYMBOL(__divlu);
-EXPORT_SYMBOL(__divq);
-EXPORT_SYMBOL(__divqu);
-EXPORT_SYMBOL(__reml);
-EXPORT_SYMBOL(__remlu);
-EXPORT_SYMBOL(__remq);
-EXPORT_SYMBOL(__remqu);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memchr);
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
index d3398f6ab74c..b7d69604b6d2 100644
--- a/arch/alpha/kernel/machvec_impl.h
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -144,9 +144,11 @@
else beforehand. Fine. We'll do it ourselves. */
#if 0
#define ALIAS_MV(system) \
- struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv")));
+ struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv"))); \
+ EXPORT_SYMBOL(alpha_mv);
#else
#define ALIAS_MV(system) \
- asm(".global alpha_mv\nalpha_mv = " #system "_mv");
+ asm(".global alpha_mv\nalpha_mv = " #system "_mv"); \
+ EXPORT_SYMBOL(alpha_mv);
#endif
#endif /* GENERIC */
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index d9ee81769899..940dfb406591 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -157,14 +157,16 @@ put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
static inline int
read_int(struct task_struct *task, unsigned long addr, int * data)
{
- int copied = access_process_vm(task, addr, data, sizeof(int), 0);
+ int copied = access_process_vm(task, addr, data, sizeof(int),
+ FOLL_FORCE);
return (copied == sizeof(int)) ? 0 : -EIO;
}
static inline int
write_int(struct task_struct *task, unsigned long addr, int data)
{
- int copied = access_process_vm(task, addr, &data, sizeof(int), 1);
+ int copied = access_process_vm(task, addr, &data, sizeof(int),
+ FOLL_FORCE | FOLL_WRITE);
return (copied == sizeof(int)) ? 0 : -EIO;
}
@@ -281,7 +283,8 @@ long arch_ptrace(struct task_struct *child, long request,
/* When I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp),
+ FOLL_FORCE);
ret = -EIO;
if (copied != sizeof(tmp))
break;
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index b20af76f12c1..4811e54069fc 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -115,6 +115,7 @@ unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
#ifdef CONFIG_ALPHA_GENERIC
struct alpha_machine_vector alpha_mv;
+EXPORT_SYMBOL(alpha_mv);
#endif
#ifndef alpha_using_srm
diff --git a/arch/alpha/lib/callback_srm.S b/arch/alpha/lib/callback_srm.S
index 8804bec2c644..6093addc931a 100644
--- a/arch/alpha/lib/callback_srm.S
+++ b/arch/alpha/lib/callback_srm.S
@@ -3,6 +3,7 @@
*/
#include <asm/console.h>
+#include <asm/export.h>
.text
#define HWRPB_CRB_OFFSET 0xc0
@@ -92,6 +93,10 @@ CALLBACK(reset_env, CCB_RESET_ENV, 4)
CALLBACK(save_env, CCB_SAVE_ENV, 1)
CALLBACK(pswitch, CCB_PSWITCH, 3)
CALLBACK(bios_emul, CCB_BIOS_EMUL, 5)
+
+EXPORT_SYMBOL(callback_getenv)
+EXPORT_SYMBOL(callback_setenv)
+EXPORT_SYMBOL(callback_save_env)
.data
__alpha_using_srm: # For use by bootpheader
diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c
index 377f9e34eb97..b57f8007db14 100644
--- a/arch/alpha/lib/checksum.c
+++ b/arch/alpha/lib/checksum.c
@@ -48,6 +48,7 @@ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
(__force u64)saddr + (__force u64)daddr +
(__force u64)sum + ((len + proto) << 8));
}
+EXPORT_SYMBOL(csum_tcpudp_magic);
__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto, __wsum sum)
@@ -144,6 +145,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
return (__force __sum16)~do_csum(iph,ihl*4);
}
+EXPORT_SYMBOL(ip_fast_csum);
/*
* computes the checksum of a memory block at buff, length len,
@@ -178,3 +180,4 @@ __sum16 ip_compute_csum(const void *buff, int len)
{
return (__force __sum16)~from64to16(do_csum(buff,len));
}
+EXPORT_SYMBOL(ip_compute_csum);
diff --git a/arch/alpha/lib/clear_page.S b/arch/alpha/lib/clear_page.S
index a221ae266e29..263d7393c0e7 100644
--- a/arch/alpha/lib/clear_page.S
+++ b/arch/alpha/lib/clear_page.S
@@ -3,7 +3,7 @@
*
* Zero an entire page.
*/
-
+#include <asm/export.h>
.text
.align 4
.global clear_page
@@ -37,3 +37,4 @@ clear_page:
nop
.end clear_page
+ EXPORT_SYMBOL(clear_page)
diff --git a/arch/alpha/lib/clear_user.S b/arch/alpha/lib/clear_user.S
index 8860316c1957..bf5b931866ba 100644
--- a/arch/alpha/lib/clear_user.S
+++ b/arch/alpha/lib/clear_user.S
@@ -24,6 +24,7 @@
* Clobbers:
* $1,$2,$3,$4,$5,$6
*/
+#include <asm/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EX(x,y...) \
@@ -111,3 +112,4 @@ $exception:
ret $31, ($28), 1 # .. e1 :
.end __do_clear_user
+ EXPORT_SYMBOL(__do_clear_user)
diff --git a/arch/alpha/lib/copy_page.S b/arch/alpha/lib/copy_page.S
index 9f3b97459cc6..2ee0bd0508c5 100644
--- a/arch/alpha/lib/copy_page.S
+++ b/arch/alpha/lib/copy_page.S
@@ -3,7 +3,7 @@
*
* Copy an entire page.
*/
-
+#include <asm/export.h>
.text
.align 4
.global copy_page
@@ -47,3 +47,4 @@ copy_page:
nop
.end copy_page
+ EXPORT_SYMBOL(copy_page)
diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S
index 6f3fab9eb434..509f62b65311 100644
--- a/arch/alpha/lib/copy_user.S
+++ b/arch/alpha/lib/copy_user.S
@@ -26,6 +26,8 @@
* $1,$2,$3,$4,$5,$6,$7
*/
+#include <asm/export.h>
+
/* Allow an exception for an insn; exit if we get one. */
#define EXI(x,y...) \
99: x,##y; \
@@ -124,22 +126,9 @@ $65:
bis $31,$31,$0
$41:
$35:
-$exitout:
- ret $31,($28),1
-
$exitin:
- /* A stupid byte-by-byte zeroing of the rest of the output
- buffer. This cures security holes by never leaving
- random kernel data around to be copied elsewhere. */
-
- mov $0,$1
-$101:
- EXO ( ldq_u $2,0($6) )
- subq $1,1,$1
- mskbl $2,$6,$2
- EXO ( stq_u $2,0($6) )
- addq $6,1,$6
- bgt $1,$101
+$exitout:
ret $31,($28),1
.end __copy_user
+EXPORT_SYMBOL(__copy_user)
diff --git a/arch/alpha/lib/csum_ipv6_magic.S b/arch/alpha/lib/csum_ipv6_magic.S
index 2c2acb96deb6..e74b4544b0cc 100644
--- a/arch/alpha/lib/csum_ipv6_magic.S
+++ b/arch/alpha/lib/csum_ipv6_magic.S
@@ -12,6 +12,7 @@
* added by Ivan Kokshaysky <ink@jurassic.park.msu.ru>
*/
+#include <asm/export.h>
.globl csum_ipv6_magic
.align 4
.ent csum_ipv6_magic
@@ -113,3 +114,4 @@ csum_ipv6_magic:
ret # .. e1 :
.end csum_ipv6_magic
+ EXPORT_SYMBOL(csum_ipv6_magic)
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index 5675dca8dbb1..b4ff3b683bcd 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -374,6 +374,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
}
return (__force __wsum)checksum;
}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
@@ -386,3 +387,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
set_fs(oldfs);
return checksum;
}
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c
index f9f5fe830e9f..4221b40167ee 100644
--- a/arch/alpha/lib/dec_and_lock.c
+++ b/arch/alpha/lib/dec_and_lock.c
@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/atomic.h>
+#include <linux/export.h>
asm (".text \n\
.global _atomic_dec_and_lock \n\
@@ -39,3 +40,4 @@ static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
spin_unlock(lock);
return 0;
}
+EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/alpha/lib/divide.S b/arch/alpha/lib/divide.S
index 2d1a0484a99e..1e33bd127621 100644
--- a/arch/alpha/lib/divide.S
+++ b/arch/alpha/lib/divide.S
@@ -45,6 +45,7 @@
* $28 - compare status
*/
+#include <asm/export.h>
#define halt .long 0
/*
@@ -151,6 +152,7 @@ ufunction:
addq $30,STACK,$30
ret $31,($23),1
.end ufunction
+EXPORT_SYMBOL(ufunction)
/*
* Uhh.. Ugly signed division. I'd rather not have it at all, but
@@ -193,3 +195,4 @@ sfunction:
addq $30,STACK,$30
ret $31,($23),1
.end sfunction
+EXPORT_SYMBOL(sfunction)
diff --git a/arch/alpha/lib/ev6-clear_page.S b/arch/alpha/lib/ev6-clear_page.S
index adf4f7be0e2b..abe99e69a194 100644
--- a/arch/alpha/lib/ev6-clear_page.S
+++ b/arch/alpha/lib/ev6-clear_page.S
@@ -3,7 +3,7 @@
*
* Zero an entire page.
*/
-
+#include <asm/export.h>
.text
.align 4
.global clear_page
@@ -52,3 +52,4 @@ clear_page:
nop
.end clear_page
+ EXPORT_SYMBOL(clear_page)
diff --git a/arch/alpha/lib/ev6-clear_user.S b/arch/alpha/lib/ev6-clear_user.S
index 4f42a16b7f53..05bef6b50598 100644
--- a/arch/alpha/lib/ev6-clear_user.S
+++ b/arch/alpha/lib/ev6-clear_user.S
@@ -43,6 +43,7 @@
* want to leave a hole (and we also want to avoid repeating lots of work)
*/
+#include <asm/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EX(x,y...) \
99: x,##y; \
@@ -222,4 +223,4 @@ $exception: # Destination for exception recovery(?)
nop # .. E .. .. :
ret $31, ($28), 1 # L0 .. .. .. : L U L U
.end __do_clear_user
-
+ EXPORT_SYMBOL(__do_clear_user)
diff --git a/arch/alpha/lib/ev6-copy_page.S b/arch/alpha/lib/ev6-copy_page.S
index b789db192754..77935061bddb 100644
--- a/arch/alpha/lib/ev6-copy_page.S
+++ b/arch/alpha/lib/ev6-copy_page.S
@@ -56,7 +56,7 @@
destination pages are in the dcache, but it is my guess that this is
less important than the dcache miss case. */
-
+#include <asm/export.h>
.text
.align 4
.global copy_page
@@ -201,3 +201,4 @@ copy_page:
nop
.end copy_page
+ EXPORT_SYMBOL(copy_page)
diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S
index db42ffe9c350..be720b518af9 100644
--- a/arch/alpha/lib/ev6-copy_user.S
+++ b/arch/alpha/lib/ev6-copy_user.S
@@ -37,6 +37,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
*/
+#include <asm/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EXI(x,y...) \
99: x,##y; \
@@ -227,33 +228,12 @@ $dirtyentry:
bgt $0,$onebyteloop # U .. .. .. : U L U L
$zerolength:
+$exitin:
$exitout: # Destination for exception recovery(?)
nop # .. .. .. E
nop # .. .. E ..
nop # .. E .. ..
ret $31,($28),1 # L0 .. .. .. : L U L U
-$exitin:
-
- /* A stupid byte-by-byte zeroing of the rest of the output
- buffer. This cures security holes by never leaving
- random kernel data around to be copied elsewhere. */
-
- nop
- nop
- nop
- mov $0,$1
-
-$101:
- EXO ( stb $31,0($6) ) # L
- subq $1,1,$1 # E
- addq $6,1,$6 # E
- bgt $1,$101 # U
-
- nop
- nop
- nop
- ret $31,($28),1 # L0
-
.end __copy_user
-
+ EXPORT_SYMBOL(__copy_user)
diff --git a/arch/alpha/lib/ev6-csum_ipv6_magic.S b/arch/alpha/lib/ev6-csum_ipv6_magic.S
index fc0bc399f872..de62627ac4fe 100644
--- a/arch/alpha/lib/ev6-csum_ipv6_magic.S
+++ b/arch/alpha/lib/ev6-csum_ipv6_magic.S
@@ -52,6 +52,7 @@
* may cause additional delay in rare cases (load-load replay traps).
*/
+#include <asm/export.h>
.globl csum_ipv6_magic
.align 4
.ent csum_ipv6_magic
@@ -148,3 +149,4 @@ csum_ipv6_magic:
ret # L0 : L U L U
.end csum_ipv6_magic
+ EXPORT_SYMBOL(csum_ipv6_magic)
diff --git a/arch/alpha/lib/ev6-divide.S b/arch/alpha/lib/ev6-divide.S
index 2a82b9be93fa..d18dc0e96e3d 100644
--- a/arch/alpha/lib/ev6-divide.S
+++ b/arch/alpha/lib/ev6-divide.S
@@ -55,6 +55,7 @@
* Try not to change the actual algorithm if possible for consistency.
*/
+#include <asm/export.h>
#define halt .long 0
/*
@@ -205,6 +206,7 @@ ufunction:
addq $30,STACK,$30 # E :
ret $31,($23),1 # L0 : L U U L
.end ufunction
+EXPORT_SYMBOL(ufunction)
/*
* Uhh.. Ugly signed division. I'd rather not have it at all, but
@@ -257,3 +259,4 @@ sfunction:
addq $30,STACK,$30 # E :
ret $31,($23),1 # L0 : L U U L
.end sfunction
+EXPORT_SYMBOL(sfunction)
diff --git a/arch/alpha/lib/ev6-memchr.S b/arch/alpha/lib/ev6-memchr.S
index 1a5f71b9d8b1..419adc53ccb4 100644
--- a/arch/alpha/lib/ev6-memchr.S
+++ b/arch/alpha/lib/ev6-memchr.S
@@ -27,7 +27,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
* Try not to change the actual algorithm if possible for consistency.
*/
-
+#include <asm/export.h>
.set noreorder
.set noat
@@ -189,3 +189,4 @@ $not_found:
ret # L0 :
.end memchr
+ EXPORT_SYMBOL(memchr)
diff --git a/arch/alpha/lib/ev6-memcpy.S b/arch/alpha/lib/ev6-memcpy.S
index 52b37b0f2af5..b19798b2efc0 100644
--- a/arch/alpha/lib/ev6-memcpy.S
+++ b/arch/alpha/lib/ev6-memcpy.S
@@ -19,7 +19,7 @@
* Temp usage notes:
* $1,$2, - scratch
*/
-
+#include <asm/export.h>
.set noreorder
.set noat
@@ -242,6 +242,7 @@ $nomoredata:
nop # E :
.end memcpy
+ EXPORT_SYMBOL(memcpy)
/* For backwards module compatibility. */
__memcpy = memcpy
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index 356bb2fdd705..fed21c6893e8 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -26,7 +26,7 @@
* as fixes will need to be made in multiple places. The performance gain
* is worth it.
*/
-
+#include <asm/export.h>
.set noat
.set noreorder
.text
@@ -229,6 +229,7 @@ end_b:
nop
ret $31,($26),1 # L0 :
.end ___memset
+ EXPORT_SYMBOL(___memset)
/*
* This is the original body of code, prior to replication and
@@ -406,6 +407,7 @@ end:
nop
ret $31,($26),1 # L0 :
.end __constant_c_memset
+ EXPORT_SYMBOL(__constant_c_memset)
/*
* This is a replicant of the __constant_c_memset code, rescheduled
@@ -594,6 +596,9 @@ end_w:
ret $31,($26),1 # L0 :
.end __memsetw
+ EXPORT_SYMBOL(__memsetw)
memset = ___memset
__memset = ___memset
+ EXPORT_SYMBOL(memset)
+ EXPORT_SYMBOL(__memset)
diff --git a/arch/alpha/lib/ev67-strcat.S b/arch/alpha/lib/ev67-strcat.S
index c426fe3ed72f..b69f60419be1 100644
--- a/arch/alpha/lib/ev67-strcat.S
+++ b/arch/alpha/lib/ev67-strcat.S
@@ -19,7 +19,7 @@
* string once.
*/
-
+#include <asm/export.h>
.text
.align 4
@@ -52,3 +52,4 @@ $found: cttz $2, $3 # U0 :
br __stxcpy # L0 :
.end strcat
+ EXPORT_SYMBOL(strcat)
diff --git a/arch/alpha/lib/ev67-strchr.S b/arch/alpha/lib/ev67-strchr.S
index fbb7b4ffade9..ea8f2f35db9c 100644
--- a/arch/alpha/lib/ev67-strchr.S
+++ b/arch/alpha/lib/ev67-strchr.S
@@ -15,7 +15,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
* Try not to change the actual algorithm if possible for consistency.
*/
-
+#include <asm/export.h>
#include <asm/regdef.h>
.set noreorder
@@ -86,3 +86,4 @@ $found: negq t0, t1 # E : clear all but least set bit
ret # L0 :
.end strchr
+ EXPORT_SYMBOL(strchr)
diff --git a/arch/alpha/lib/ev67-strlen.S b/arch/alpha/lib/ev67-strlen.S
index 503928072523..736fd41884a8 100644
--- a/arch/alpha/lib/ev67-strlen.S
+++ b/arch/alpha/lib/ev67-strlen.S
@@ -17,7 +17,7 @@
* U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
*/
-
+#include <asm/export.h>
.set noreorder
.set noat
@@ -47,3 +47,4 @@ $found:
ret $31, ($26) # L0 :
.end strlen
+ EXPORT_SYMBOL(strlen)
diff --git a/arch/alpha/lib/ev67-strncat.S b/arch/alpha/lib/ev67-strncat.S
index 4ae716cd2bfb..cd35cbade73a 100644
--- a/arch/alpha/lib/ev67-strncat.S
+++ b/arch/alpha/lib/ev67-strncat.S
@@ -20,7 +20,7 @@
* Try not to change the actual algorithm if possible for consistency.
*/
-
+#include <asm/export.h>
.text
.align 4
@@ -92,3 +92,4 @@ $zerocount:
ret # L0 :
.end strncat
+ EXPORT_SYMBOL(strncat)
diff --git a/arch/alpha/lib/ev67-strrchr.S b/arch/alpha/lib/ev67-strrchr.S
index dd0d8c6b9f59..747455f0328c 100644
--- a/arch/alpha/lib/ev67-strrchr.S
+++ b/arch/alpha/lib/ev67-strrchr.S
@@ -18,7 +18,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
*/
-
+#include <asm/export.h>
#include <asm/regdef.h>
.set noreorder
@@ -107,3 +107,4 @@ $eos:
nop
.end strrchr
+ EXPORT_SYMBOL(strrchr)
diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c
index 05017ba34c3c..4aa6dbfa14ee 100644
--- a/arch/alpha/lib/fpreg.c
+++ b/arch/alpha/lib/fpreg.c
@@ -4,6 +4,9 @@
* (C) Copyright 1998 Linus Torvalds
*/
+#include <linux/compiler.h>
+#include <linux/export.h>
+
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val));
#else
@@ -52,6 +55,7 @@ alpha_read_fp_reg (unsigned long reg)
}
return val;
}
+EXPORT_SYMBOL(alpha_read_fp_reg);
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val));
@@ -97,6 +101,7 @@ alpha_write_fp_reg (unsigned long reg, unsigned long val)
case 31: LDT(31, val); break;
}
}
+EXPORT_SYMBOL(alpha_write_fp_reg);
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val));
@@ -146,6 +151,7 @@ alpha_read_fp_reg_s (unsigned long reg)
}
return val;
}
+EXPORT_SYMBOL(alpha_read_fp_reg_s);
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val));
@@ -191,3 +197,4 @@ alpha_write_fp_reg_s (unsigned long reg, unsigned long val)
case 31: LDS(31, val); break;
}
}
+EXPORT_SYMBOL(alpha_write_fp_reg_s);
diff --git a/arch/alpha/lib/memchr.S b/arch/alpha/lib/memchr.S
index 14427eeb555e..c13d3eca2e05 100644
--- a/arch/alpha/lib/memchr.S
+++ b/arch/alpha/lib/memchr.S
@@ -31,7 +31,7 @@ For correctness consider that:
- only minimum number of quadwords may be accessed
- the third argument is an unsigned long
*/
-
+#include <asm/export.h>
.set noreorder
.set noat
@@ -162,3 +162,4 @@ $not_found:
ret # .. e1 :
.end memchr
+ EXPORT_SYMBOL(memchr)
diff --git a/arch/alpha/lib/memcpy.c b/arch/alpha/lib/memcpy.c
index 64083fc73238..57d9291ad172 100644
--- a/arch/alpha/lib/memcpy.c
+++ b/arch/alpha/lib/memcpy.c
@@ -16,6 +16,7 @@
*/
#include <linux/types.h>
+#include <linux/export.h>
/*
* This should be done in one go with ldq_u*2/mask/stq_u. Do it
@@ -158,6 +159,4 @@ void * memcpy(void * dest, const void *src, size_t n)
__memcpy_unaligned_up ((unsigned long) dest, (unsigned long) src, n);
return dest;
}
-
-/* For backward modules compatibility, define __memcpy. */
-asm("__memcpy = memcpy; .globl __memcpy");
+EXPORT_SYMBOL(memcpy);
diff --git a/arch/alpha/lib/memmove.S b/arch/alpha/lib/memmove.S
index eb3b6e02242f..6872c85cb5e5 100644
--- a/arch/alpha/lib/memmove.S
+++ b/arch/alpha/lib/memmove.S
@@ -6,7 +6,7 @@
* This is hand-massaged output from the original memcpy.c. We defer to
* memcpy whenever possible; the backwards copy loops are not unrolled.
*/
-
+#include <asm/export.h>
.set noat
.set noreorder
.text
@@ -179,3 +179,4 @@ $egress:
nop
.end memmove
+ EXPORT_SYMBOL(memmove)
diff --git a/arch/alpha/lib/memset.S b/arch/alpha/lib/memset.S
index 76ccc6d1f364..89a26f5e89de 100644
--- a/arch/alpha/lib/memset.S
+++ b/arch/alpha/lib/memset.S
@@ -13,7 +13,7 @@
* The scheduling comments are according to the EV5 documentation (and done by
* hand, so they might well be incorrect, please do tell me about it..)
*/
-
+#include <asm/export.h>
.set noat
.set noreorder
.text
@@ -106,6 +106,8 @@ within_one_quad:
end:
ret $31,($26),1 /* E1 */
.end ___memset
+EXPORT_SYMBOL(___memset)
+EXPORT_SYMBOL(__constant_c_memset)
.align 5
.ent __memsetw
@@ -122,6 +124,9 @@ __memsetw:
br __constant_c_memset /* .. E1 */
.end __memsetw
+EXPORT_SYMBOL(__memsetw)
memset = ___memset
__memset = ___memset
+ EXPORT_SYMBOL(memset)
+ EXPORT_SYMBOL(__memset)
diff --git a/arch/alpha/lib/strcat.S b/arch/alpha/lib/strcat.S
index 393f50384878..249837b03d4b 100644
--- a/arch/alpha/lib/strcat.S
+++ b/arch/alpha/lib/strcat.S
@@ -4,6 +4,7 @@
*
* Append a null-terminated string from SRC to DST.
*/
+#include <asm/export.h>
.text
@@ -50,3 +51,4 @@ $found: negq $2, $3 # clear all but least set bit
br __stxcpy
.end strcat
+EXPORT_SYMBOL(strcat);
diff --git a/arch/alpha/lib/strchr.S b/arch/alpha/lib/strchr.S
index 011a175e8329..7412a173ea39 100644
--- a/arch/alpha/lib/strchr.S
+++ b/arch/alpha/lib/strchr.S
@@ -5,7 +5,7 @@
* Return the address of a given character within a null-terminated
* string, or null if it is not found.
*/
-
+#include <asm/export.h>
#include <asm/regdef.h>
.set noreorder
@@ -68,3 +68,4 @@ $retnull:
ret # .. e1 :
.end strchr
+ EXPORT_SYMBOL(strchr)
diff --git a/arch/alpha/lib/strcpy.S b/arch/alpha/lib/strcpy.S
index e0728e4ad21f..98deae1e4d08 100644
--- a/arch/alpha/lib/strcpy.S
+++ b/arch/alpha/lib/strcpy.S
@@ -5,7 +5,7 @@
* Copy a null-terminated string from SRC to DST. Return a pointer
* to the null-terminator in the source.
*/
-
+#include <asm/export.h>
.text
.align 3
@@ -21,3 +21,4 @@ strcpy:
br __stxcpy # do the copy
.end strcpy
+ EXPORT_SYMBOL(strcpy)
diff --git a/arch/alpha/lib/strlen.S b/arch/alpha/lib/strlen.S
index fe63353de152..79c416f71bac 100644
--- a/arch/alpha/lib/strlen.S
+++ b/arch/alpha/lib/strlen.S
@@ -11,7 +11,7 @@
* do this instead of the 9 instructions that
* binary search needs).
*/
-
+#include <asm/export.h>
.set noreorder
.set noat
@@ -55,3 +55,4 @@ done: subq $0, $16, $0
ret $31, ($26)
.end strlen
+ EXPORT_SYMBOL(strlen)
diff --git a/arch/alpha/lib/strncat.S b/arch/alpha/lib/strncat.S
index a8278163c972..6c29ea60869a 100644
--- a/arch/alpha/lib/strncat.S
+++ b/arch/alpha/lib/strncat.S
@@ -9,7 +9,7 @@
* past count, whereas libc may write to count+1. This follows the generic
* implementation in lib/string.c and is, IMHO, more sensible.
*/
-
+#include <asm/export.h>
.text
.align 3
@@ -82,3 +82,4 @@ $zerocount:
ret
.end strncat
+ EXPORT_SYMBOL(strncat)
diff --git a/arch/alpha/lib/strncpy.S b/arch/alpha/lib/strncpy.S
index a46f7f3ad8c7..e102cf1567dd 100644
--- a/arch/alpha/lib/strncpy.S
+++ b/arch/alpha/lib/strncpy.S
@@ -10,7 +10,7 @@
* version has cropped that bit o' nastiness as well as assuming that
* __stxncpy is in range of a branch.
*/
-
+#include <asm/export.h>
.set noat
.set noreorder
@@ -79,3 +79,4 @@ $zerolen:
ret
.end strncpy
+ EXPORT_SYMBOL(strncpy)
diff --git a/arch/alpha/lib/strrchr.S b/arch/alpha/lib/strrchr.S
index 1970dc07cfd1..4bc6cb4b9812 100644
--- a/arch/alpha/lib/strrchr.S
+++ b/arch/alpha/lib/strrchr.S
@@ -5,7 +5,7 @@
* Return the address of the last occurrence of a given character
* within a null-terminated string, or null if it is not found.
*/
-
+#include <asm/export.h>
#include <asm/regdef.h>
.set noreorder
@@ -85,3 +85,4 @@ $retnull:
ret # .. e1 :
.end strrchr
+ EXPORT_SYMBOL(strrchr)
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index ecd12379e2cd..bd204bfa29ed 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -41,6 +41,8 @@ config ARC
select PERF_USE_VMALLOC
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZMA
config MIGHT_HAVE_PCI
bool
@@ -186,14 +188,6 @@ if SMP
config ARC_HAS_COH_CACHES
def_bool n
-config ARC_MCIP
- bool "ARConnect Multicore IP (MCIP) Support "
- depends on ISA_ARCV2
- help
- This IP block enables SMP in ARC-HS38 cores.
- It provides for cross-core interrupts, multi-core debug
- hardware semaphores, shared memory,....
-
config NR_CPUS
int "Maximum number of CPUs (2-4096)"
range 2 4096
@@ -211,6 +205,15 @@ config ARC_SMP_HALT_ON_RESET
endif #SMP
+config ARC_MCIP
+ bool "ARConnect Multicore IP (MCIP) Support "
+ depends on ISA_ARCV2
+ default y if SMP
+ help
+ This IP block enables SMP in ARC-HS38 cores.
+ It provides for cross-core interrupts, multi-core debug
+ hardware semaphores, shared memory,....
+
menuconfig ARC_CACHE
bool "Enable Cache Support"
default y
@@ -537,14 +540,6 @@ config ARC_DBG_TLB_PARANOIA
bool "Paranoia Checks in Low Level TLB Handlers"
default n
-config ARC_DBG_TLB_MISS_COUNT
- bool "Profile TLB Misses"
- default n
- select DEBUG_FS
- help
- Counts number of I and D TLB Misses and exports them via Debugfs
- The counters can be cleared via Debugfs as well
-
endif
config ARC_UBOOT_SUPPORT
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index aa82d13d4213..864adad52280 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -50,9 +50,6 @@ atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
cflags-$(atleast_gcc44) += -fsection-anchors
-cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
-cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
-
ifdef CONFIG_ISA_ARCV2
ifndef CONFIG_ARC_HAS_LL64
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
index e597cb34c16a..f94cf151e06a 100644
--- a/arch/arc/boot/Makefile
+++ b/arch/arc/boot/Makefile
@@ -14,9 +14,15 @@ UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
suffix-y := bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
+suffix-$(CONFIG_KERNEL_LZMA) := lzma
-targets += uImage uImage.bin uImage.gz
-extra-y += vmlinux.bin vmlinux.bin.gz
+targets += uImage
+targets += uImage.bin
+targets += uImage.gz
+targets += uImage.lzma
+extra-y += vmlinux.bin
+extra-y += vmlinux.bin.gz
+extra-y += vmlinux.bin.lzma
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
@@ -24,12 +30,18 @@ $(obj)/vmlinux.bin: vmlinux FORCE
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzma)
+
$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage,none)
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage,gzip)
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
+ $(call if_changed,uimage,lzma)
+
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
@ln -sf $(notdir $<) $@
@echo ' Image $@ is ready'
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index db25c65155cb..7f3f9f63708c 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -349,10 +349,11 @@ struct cpuinfo_arc {
struct cpuinfo_arc_bpu bpu;
struct bcr_identity core;
struct bcr_isa isa;
+ const char *details, *name;
unsigned int vec_base;
struct cpuinfo_arc_ccm iccm, dccm;
struct {
- unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
+ unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
fpu_sp:1, fpu_dp:1, pad2:6,
debug:1, ap:1, smart:1, rtt:1, pad3:4,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index fb781e34f322..b3410ff6a62d 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -53,7 +53,7 @@ extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
-extern int ioc_exists;
+extern int ioc_enable;
extern unsigned long perip_base, perip_end;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
index 7096f97a1434..aa2d6da9d187 100644
--- a/arch/arc/include/asm/elf.h
+++ b/arch/arc/include/asm/elf.h
@@ -54,7 +54,7 @@ extern int elf_check_arch(const struct elf32_hdr *);
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE (2UL * TASK_SIZE / 3)
/*
* When the program starts, a1 contains a pointer to a function to be
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
index 847e3bbe387f..c8fbe4114bad 100644
--- a/arch/arc/include/asm/mcip.h
+++ b/arch/arc/include/asm/mcip.h
@@ -55,6 +55,22 @@ struct mcip_cmd {
#define IDU_M_DISTRI_DEST 0x2
};
+struct mcip_bcr {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad3:8,
+ idu:1, llm:1, num_cores:6,
+ iocoh:1, gfrc:1, dbg:1, pad2:1,
+ msg:1, sem:1, ipi:1, pad:1,
+ ver:8;
+#else
+ unsigned int ver:8,
+ pad:1, ipi:1, sem:1, msg:1,
+ pad2:1, dbg:1, gfrc:1, iocoh:1,
+ num_cores:6, llm:1, idu:1,
+ pad3:8;
+#endif
+};
+
/*
* MCIP programming model
*
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
index 518222bb3f8e..6e91d8b339c3 100644
--- a/arch/arc/include/asm/module.h
+++ b/arch/arc/include/asm/module.h
@@ -18,6 +18,7 @@
struct mod_arch_specific {
void *unw_info;
int unw_sec_idx;
+ const char *secstr;
};
#endif
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 48b37c693db3..cb954cdab070 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -27,11 +27,6 @@ struct id_to_str {
const char *str;
};
-struct cpuinfo_data {
- struct id_to_str info;
- int up_range;
-};
-
extern int root_mountflags, end_mem;
void setup_processor(void);
@@ -43,5 +38,6 @@ void __init setup_arch_memory(void);
#define IS_USED_RUN(v) ((v) ? "" : "(not used) ")
#define IS_USED_CFG(cfg) IS_USED_RUN(IS_ENABLED(cfg))
#define IS_AVAIL2(v, s, cfg) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
+#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index e56f9fcc5581..772b67ca56e7 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -17,6 +17,7 @@ int sys_clone_wrapper(int, int, int, int, int);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);
+int sys_arc_usr_cmpxchg(int *, int, int);
#include <asm-generic/syscalls.h>
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 41fa2ec9e02c..9a34136d84b2 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -27,18 +27,19 @@
#define NR_syscalls __NR_syscalls
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+
/* ARC specific syscall */
#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
+#define __NR_arc_usr_cmpxchg (__NR_arch_specific_syscall + 4)
__SYSCALL(__NR_cacheflush, sys_cacheflush)
__SYSCALL(__NR_arc_settls, sys_arc_settls)
__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
-
-
-/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
-#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+__SYSCALL(__NR_arc_usr_cmpxchg, sys_arc_usr_cmpxchg)
__SYSCALL(__NR_sysfs, sys_sysfs)
#undef __SYSCALL
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 72f9179b1a24..c424d5abc318 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -15,11 +15,12 @@
#include <asm/mcip.h>
#include <asm/setup.h>
-static char smp_cpuinfo_buf[128];
-static int idu_detected;
-
static DEFINE_RAW_SPINLOCK(mcip_lock);
+#ifdef CONFIG_SMP
+
+static char smp_cpuinfo_buf[128];
+
static void mcip_setup_per_cpu(int cpu)
{
smp_ipi_irq_setup(cpu, IPI_IRQ);
@@ -86,21 +87,7 @@ static void mcip_ipi_clear(int irq)
static void mcip_probe_n_setup(void)
{
- struct mcip_bcr {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad3:8,
- idu:1, llm:1, num_cores:6,
- iocoh:1, gfrc:1, dbg:1, pad2:1,
- msg:1, sem:1, ipi:1, pad:1,
- ver:8;
-#else
- unsigned int ver:8,
- pad:1, ipi:1, sem:1, msg:1,
- pad2:1, dbg:1, gfrc:1, iocoh:1,
- num_cores:6, llm:1, idu:1,
- pad3:8;
-#endif
- } mp;
+ struct mcip_bcr mp;
READ_BCR(ARC_REG_MCIP_BCR, mp);
@@ -114,7 +101,6 @@ static void mcip_probe_n_setup(void)
IS_AVAIL1(mp.gfrc, "GFRC"));
cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
- idu_detected = mp.idu;
if (mp.dbg) {
__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
@@ -130,6 +116,8 @@ struct plat_smp_ops plat_smp_ops = {
.ipi_clear = mcip_ipi_clear,
};
+#endif
+
/***************************************************************************
* ARCv2 Interrupt Distribution Unit (IDU)
*
@@ -295,8 +283,11 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
/* Read IDU BCR to confirm nr_irqs */
int nr_irqs = of_irq_count(intc);
int i, irq;
+ struct mcip_bcr mp;
+
+ READ_BCR(ARC_REG_MCIP_BCR, mp);
- if (!idu_detected)
+ if (!mp.idu)
panic("IDU not detected, but DeviceTree using it");
pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
index 9a2849756022..42e964db2967 100644
--- a/arch/arc/kernel/module.c
+++ b/arch/arc/kernel/module.c
@@ -30,17 +30,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstr, struct module *mod)
{
#ifdef CONFIG_ARC_DW2_UNWIND
- int i;
-
mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL;
-
- for (i = 1; i < hdr->e_shnum; i++) {
- if (strcmp(secstr+sechdrs[i].sh_name, ".eh_frame") == 0) {
- mod->arch.unw_sec_idx = i;
- break;
- }
- }
+ mod->arch.secstr = secstr;
#endif
return 0;
}
@@ -59,29 +51,33 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
unsigned int relsec, /* sec index for relo sec */
struct module *module)
{
- int i, n;
+ int i, n, relo_type;
Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym_entry, *sym_sec;
- Elf32_Addr relocation;
- Elf32_Addr location;
- Elf32_Addr sec_to_patch;
- int relo_type;
-
- sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
+ Elf32_Addr relocation, location, tgt_addr;
+ unsigned int tgtsec;
+
+ /*
+ * @relsec has relocations e.g. .rela.init.text
+ * @tgtsec is section to patch e.g. .init.text
+ */
+ tgtsec = sechdrs[relsec].sh_info;
+ tgt_addr = sechdrs[tgtsec].sh_addr;
sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
- pr_debug("\n========== Module Sym reloc ===========================\n");
- pr_debug("Section to fixup %x\n", sec_to_patch);
+ pr_debug("\nSection to fixup %s @%x\n",
+ module->arch.secstr + sechdrs[tgtsec].sh_name, tgt_addr);
pr_debug("=========================================================\n");
- pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
+ pr_debug("r_off\tr_add\tst_value ADDRESS VALUE\n");
pr_debug("=========================================================\n");
/* Loop thru entries in relocation section */
for (i = 0; i < n; i++) {
+ const char *s;
/* This is where to make the change */
- location = sec_to_patch + rel_entry[i].r_offset;
+ location = tgt_addr + rel_entry[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
@@ -89,10 +85,15 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
relocation = sym_entry->st_value + rel_entry[i].r_addend;
- pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
- rel_entry[i].r_offset, rel_entry[i].r_addend,
- sym_entry->st_value, location, relocation,
- strtab + sym_entry->st_name);
+ if (sym_entry->st_name == 0 && ELF_ST_TYPE (sym_entry->st_info) == STT_SECTION) {
+ s = module->arch.secstr + sechdrs[sym_entry->st_shndx].sh_name;
+ } else {
+ s = strtab + sym_entry->st_name;
+ }
+
+ pr_debug(" %x\t%x\t%x %x %x [%s]\n",
+ rel_entry[i].r_offset, rel_entry[i].r_addend,
+ sym_entry->st_value, location, relocation, s);
/* This assumes modules are built with -mlong-calls
* so any branches/jumps are absolute 32 bit jmps
@@ -111,6 +112,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
goto relo_err;
}
+
+ if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
+ module->arch.unw_sec_idx = tgtsec;
+
return 0;
relo_err:
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index be1972bd2729..59aa43cb146e 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -41,6 +41,39 @@ SYSCALL_DEFINE0(arc_gettls)
return task_thread_info(current)->thr_ptr;
}
+SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
+{
+ int uval;
+ int ret;
+
+ /*
+ * This is only for old cores lacking LLOCK/SCOND, which by defintion
+ * can't possibly be SMP. Thus doesn't need to be SMP safe.
+ * And this also helps reduce the overhead for serializing in
+ * the UP case
+ */
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ preempt_disable();
+
+ ret = __get_user(uval, uaddr);
+ if (ret)
+ goto done;
+
+ if (uval != expected)
+ ret = -EAGAIN;
+ else
+ ret = __put_user(new, uaddr);
+
+done:
+ preempt_enable();
+
+ return ret;
+}
+
void arch_cpu_idle(void)
{
/* sleep, but enable all interrupts before committing */
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 3df7f9c72f42..0385df77a697 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -40,6 +40,29 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
+static const struct id_to_str arc_cpu_rel[] = {
+#ifdef CONFIG_ISA_ARCOMPACT
+ { 0x34, "R4.10"},
+ { 0x35, "R4.11"},
+#else
+ { 0x51, "R2.0" },
+ { 0x52, "R2.1" },
+ { 0x53, "R3.0" },
+#endif
+ { 0x00, NULL }
+};
+
+static const struct id_to_str arc_cpu_nm[] = {
+#ifdef CONFIG_ISA_ARCOMPACT
+ { 0x20, "ARC 600" },
+ { 0x30, "ARC 770" }, /* 750 identified seperately */
+#else
+ { 0x40, "ARC EM" },
+ { 0x50, "ARC HS38" },
+#endif
+ { 0x00, "Unknown" }
+};
+
static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
{
if (is_isa_arcompact()) {
@@ -92,11 +115,26 @@ static void read_arc_build_cfg_regs(void)
struct bcr_timer timer;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+ const struct id_to_str *tbl;
+
FIX_PTR(cpu);
READ_BCR(AUX_IDENTITY, cpu->core);
READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
+ for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
+ if (cpu->core.family == tbl->id) {
+ cpu->details = tbl->str;
+ break;
+ }
+ }
+
+ for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
+ if ((cpu->core.family & 0xF0) == tbl->id)
+ break;
+ }
+ cpu->name = tbl->str;
+
READ_BCR(ARC_REG_TIMERS_BCR, timer);
cpu->extn.timer0 = timer.t0;
cpu->extn.timer1 = timer.t1;
@@ -111,6 +149,9 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
+ cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
+ IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
+
READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
/* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
@@ -160,64 +201,38 @@ static void read_arc_build_cfg_regs(void)
cpu->extn.rtt = bcr.ver ? 1 : 0;
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
-}
-static const struct cpuinfo_data arc_cpu_tbl[] = {
-#ifdef CONFIG_ISA_ARCOMPACT
- { {0x20, "ARC 600" }, 0x2F},
- { {0x30, "ARC 700" }, 0x33},
- { {0x34, "ARC 700 R4.10"}, 0x34},
- { {0x35, "ARC 700 R4.11"}, 0x35},
-#else
- { {0x50, "ARC HS38 R2.0"}, 0x51},
- { {0x52, "ARC HS38 R2.1"}, 0x52},
- { {0x53, "ARC HS38 R3.0"}, 0x53},
-#endif
- { {0x00, NULL } }
-};
+ /* some hacks for lack of feature BCR info in old ARC700 cores */
+ if (is_isa_arcompact()) {
+ if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
+ cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
+ else
+ cpu->isa.atomic = cpu->isa.atomic1;
+ cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+
+ /* there's no direct way to distinguish 750 vs. 770 */
+ if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
+ cpu->name = "ARC750";
+ }
+}
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core;
- const struct cpuinfo_data *tbl;
- char *isa_nm;
- int i, be, atomic;
- int n = 0;
+ int i, n = 0;
FIX_PTR(cpu);
- if (is_isa_arcompact()) {
- isa_nm = "ARCompact";
- be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
-
- atomic = cpu->isa.atomic1;
- if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
- atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
- } else {
- isa_nm = "ARCv2";
- be = cpu->isa.be;
- atomic = cpu->isa.atomic;
- }
-
n += scnprintf(buf + n, len - n,
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
core->family, core->cpu_id, core->chip_id);
- for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
- if ((core->family >= tbl->info.id) &&
- (core->family <= tbl->up_range)) {
- n += scnprintf(buf + n, len - n,
- "processor [%d]\t: %s (%s ISA) %s\n",
- cpu_id, tbl->info.str, isa_nm,
- IS_AVAIL1(be, "[Big-Endian]"));
- break;
- }
- }
-
- if (tbl->info.id == 0)
- n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
+ n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
+ cpu_id, cpu->name, cpu->details,
+ is_isa_arcompact() ? "ARCompact" : "ARCv2",
+ IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
@@ -226,7 +241,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
CONFIG_ARC_HAS_RTC));
n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
- IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+ IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
@@ -253,7 +268,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL1(cpu->extn.swap, "swap "),
IS_AVAIL1(cpu->extn.minmax, "minmax "),
IS_AVAIL1(cpu->extn.crc, "crc "),
- IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE));
+ IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));
if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n,
@@ -272,9 +287,7 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
FIX_PTR(cpu);
- n += scnprintf(buf + n, len - n,
- "Vector Table\t: %#x\nPeripherals\t: %#lx:%#lx\n",
- cpu->vec_base, perip_base, perip_end);
+ n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
@@ -507,7 +520,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
* way to pass it w/o having to kmalloc/free a 2 byte string.
* Encode cpu-id as 0xFFcccc, which is decoded by show routine.
*/
- return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
+ return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 6cb3736b6b83..d347bbc086fe 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -107,13 +107,13 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
struct user_regs_struct uregs;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
- if (!err)
- set_current_blocked(&set);
-
err |= __copy_from_user(&uregs.scratch,
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
+ if (err)
+ return err;
+ set_current_blocked(&set);
regs->bta = uregs.scratch.bta;
regs->lp_start = uregs.scratch.lp_start;
regs->lp_end = uregs.scratch.lp_end;
@@ -138,7 +138,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
regs->r0 = uregs.scratch.r0;
regs->sp = uregs.scratch.sp;
- return err;
+ return 0;
}
static inline int is_do_ss_needed(unsigned int magic)
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 934150e7ac48..82f9bc819f4a 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -237,113 +237,3 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
if (!user_mode(regs))
show_stacktrace(current, regs);
}
-
-#ifdef CONFIG_DEBUG_FS
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-#include <linux/debugfs.h>
-
-static struct dentry *test_dentry;
-static struct dentry *test_dir;
-static struct dentry *test_u32_dentry;
-
-static u32 clr_on_read = 1;
-
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
-u32 numitlb, numdtlb, num_pte_not_present;
-
-static int fill_display_data(char *kbuf)
-{
- size_t num = 0;
- num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
- num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
- num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
-
- if (clr_on_read)
- numitlb = numdtlb = num_pte_not_present = 0;
-
- return num;
-}
-
-static int tlb_stats_open(struct inode *inode, struct file *file)
-{
- file->private_data = (void *)__get_free_page(GFP_KERNEL);
- return 0;
-}
-
-/* called on user read(): display the counters */
-static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
- char __user *user_buf, /* user buffer */
- size_t len, /* length of buffer */
- loff_t *offset) /* offset in the file */
-{
- size_t num;
- char *kbuf = (char *)file->private_data;
-
- /* All of the data can he shoved in one iteration */
- if (*offset != 0)
- return 0;
-
- num = fill_display_data(kbuf);
-
- /* simple_read_from_buffer() is helper for copy to user space
- It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
- @3 (offset) into the user space address starting at @1 (user_buf).
- @5 (len) is max size of user buffer
- */
- return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
-}
-
-/* called on user write : clears the counters */
-static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
- size_t length, loff_t *offset)
-{
- numitlb = numdtlb = num_pte_not_present = 0;
- return length;
-}
-
-static int tlb_stats_close(struct inode *inode, struct file *file)
-{
- free_page((unsigned long)(file->private_data));
- return 0;
-}
-
-static const struct file_operations tlb_stats_file_ops = {
- .read = tlb_stats_output,
- .write = tlb_stats_clear,
- .open = tlb_stats_open,
- .release = tlb_stats_close
-};
-#endif
-
-static int __init arc_debugfs_init(void)
-{
- test_dir = debugfs_create_dir("arc", NULL);
-
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
- test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
- &tlb_stats_file_ops);
-#endif
-
- test_u32_dentry =
- debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
-
- return 0;
-}
-
-module_init(arc_debugfs_init);
-
-static void __exit arc_debugfs_exit(void)
-{
- debugfs_remove(test_u32_dentry);
- debugfs_remove(test_dentry);
- debugfs_remove(test_dir);
-}
-module_exit(arc_debugfs_exit);
-
-#endif
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 97dddbefb86a..2b96cfc3be75 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -22,8 +22,8 @@
#include <asm/setup.h>
static int l2_line_sz;
-int ioc_exists;
-volatile int slc_enable = 1, ioc_enable = 1;
+static int ioc_exists;
+int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
@@ -53,18 +53,15 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
- if (!is_isa_arcv2())
- return buf;
-
p = &cpuinfo_arc700[c].slc;
if (p->ver)
n += scnprintf(buf + n, len - n,
"SLC\t\t: %uK, %uB Line%s\n",
p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
- if (ioc_exists)
- n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
- IS_DISABLED_RUN(ioc_enable));
+ n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
+ perip_base,
+ IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
return buf;
}
@@ -113,8 +110,10 @@ static void read_decode_cache_bcr_arcv2(int cpu)
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
- if (cbcr.c && ioc_enable)
+ if (cbcr.c)
ioc_exists = 1;
+ else
+ ioc_enable = 0;
/* HS 2.0 didn't have AUX_VOL */
if (cpuinfo_arc700[cpu].core.family > 0x51) {
@@ -1002,7 +1001,7 @@ void arc_cache_init(void)
read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
}
- if (is_isa_arcv2() && ioc_exists) {
+ if (is_isa_arcv2() && ioc_enable) {
/* IO coherency base - 0x8z */
write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
/* IO coherency aperture size - 512Mb: 0x8z-0xAz */
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 20afc65e22dc..60aab5a7522b 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -45,7 +45,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
* -For coherent data, Read/Write to buffers terminate early in cache
* (vs. always going to memory - thus are faster)
*/
- if ((is_isa_arcv2() && ioc_exists) ||
+ if ((is_isa_arcv2() && ioc_enable) ||
(attrs & DMA_ATTR_NON_CONSISTENT))
need_coh = 0;
@@ -97,7 +97,7 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
int is_non_coh = 1;
is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
- (is_isa_arcv2() && ioc_exists);
+ (is_isa_arcv2() && ioc_enable);
if (PageHighMem(page) || !is_non_coh)
iounmap((void __force __iomem *)vaddr);
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index ec868a9081a1..bdb295e09160 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -793,16 +793,16 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
char super_pg[64] = "";
if (p_mmu->s_pg_sz_m)
- scnprintf(super_pg, 64, "%dM Super Page%s, ",
+ scnprintf(super_pg, 64, "%dM Super Page %s",
p_mmu->s_pg_sz_m,
IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
n += scnprintf(buf + n, len - n,
- "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
+ "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
p_mmu->ver, p_mmu->pg_sz_k, super_pg,
p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
p_mmu->u_dtlb, p_mmu->u_itlb,
- IS_AVAIL2(p_mmu->pae, "PAE40 ", CONFIG_ARC_HAS_PAE40));
+ IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
return buf;
}
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index f1967eeb32e7..b30e4e36bb00 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -237,15 +237,6 @@ ex_saved_reg1:
2:
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
- and.f 0, r0, _PAGE_PRESENT
- bz 1f
- ld r3, [num_pte_not_present]
- add r3, r3, 1
- st r3, [num_pte_not_present]
-1:
-#endif
-
.endm
;-----------------------------------------------------------------
@@ -309,12 +300,6 @@ ENTRY(EV_TLBMissI)
TLBMISS_FREEUP_REGS
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
- ld r0, [@numitlb]
- add r0, r0, 1
- st r0, [@numitlb]
-#endif
-
;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA
LOAD_FAULT_PTE
@@ -349,12 +334,6 @@ ENTRY(EV_TLBMissD)
TLBMISS_FREEUP_REGS
-#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
- ld r0, [@numdtlb]
- add r0, r0, 1
- st r0, [@numdtlb]
-#endif
-
;----------------------------------------------------------------
; Get the PTE corresponding to V-addr accessed
; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index b3df1c60d465..386eee6de232 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -239,14 +239,25 @@
arm,primecell-periphid = <0x10480180>;
max-frequency = <100000000>;
bus-width = <4>;
+ cap-sd-highspeed;
cap-mmc-highspeed;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ /* All direction control is used */
+ st,sig-dir-cmd;
+ st,sig-dir-dat0;
+ st,sig-dir-dat2;
+ st,sig-dir-dat31;
+ st,sig-pin-fbclk;
+ full-pwr-cycle;
vmmc-supply = <&ab8500_ldo_aux3_reg>;
vqmmc-supply = <&vmmci>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&sdi0_default_mode>;
pinctrl-1 = <&sdi0_sleep_mode>;
- cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>; // 218
+ /* GPIO218 MMC_CD */
+ cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>;
status = "okay";
};
@@ -549,7 +560,7 @@
/* VMMCI level-shifter enable */
snowball_cfg3 {
pins = "GPIO217_AH12";
- ste,config = <&gpio_out_lo>;
+ ste,config = <&gpio_out_hi>;
};
/* VMMCI level-shifter voltage select */
snowball_cfg4 {
diff --git a/arch/arm/boot/dts/uniphier-pro5.dtsi b/arch/arm/boot/dts/uniphier-pro5.dtsi
index 2c49c3614bda..5357ea9c14b1 100644
--- a/arch/arm/boot/dts/uniphier-pro5.dtsi
+++ b/arch/arm/boot/dts/uniphier-pro5.dtsi
@@ -184,11 +184,11 @@
};
&mio_clk {
- compatible = "socionext,uniphier-pro5-mio-clock";
+ compatible = "socionext,uniphier-pro5-sd-clock";
};
&mio_rst {
- compatible = "socionext,uniphier-pro5-mio-reset";
+ compatible = "socionext,uniphier-pro5-sd-reset";
};
&peri_clk {
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index 8789cd518933..950f07ba0337 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -197,11 +197,11 @@
};
&mio_clk {
- compatible = "socionext,uniphier-pxs2-mio-clock";
+ compatible = "socionext,uniphier-pxs2-sd-clock";
};
&mio_rst {
- compatible = "socionext,uniphier-pxs2-mio-reset";
+ compatible = "socionext,uniphier-pxs2-sd-reset";
};
&peri_clk {
diff --git a/arch/arm/boot/dts/vf500.dtsi b/arch/arm/boot/dts/vf500.dtsi
index a3824e61bd72..d7fdb2a7d97b 100644
--- a/arch/arm/boot/dts/vf500.dtsi
+++ b/arch/arm/boot/dts/vf500.dtsi
@@ -70,7 +70,7 @@
global_timer: timer@40002200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x40002200 0x20>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
interrupt-parent = <&intc>;
clocks = <&clks VF610_CLK_PLATFORM_BUS>;
};
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 437d0740dec6..11f37ed1dbff 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -850,6 +850,7 @@ CONFIG_PWM_SUN4I=y
CONFIG_PWM_TEGRA=y
CONFIG_PWM_VT8500=y
CONFIG_PHY_HIX5HD2_SATA=y
+CONFIG_E1000E=y
CONFIG_PWM_STI=y
CONFIG_PWM_BCM2835=y
CONFIG_PWM_BRCMSTB=m
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 55e0e3ea9cb6..0745538b26d3 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -8,6 +8,7 @@ generic-y += early_ioremap.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += export.h
generic-y += ioctl.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index a93c0f99acf7..1f59ea051bab 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -533,11 +533,12 @@ __clear_user(void __user *addr, unsigned long n)
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
- else /* security hole - plug it */
- memset(to, 0, n);
- return n;
+ unsigned long res = n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = __copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index ad325a8c7e1e..68c2c097cffe 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -33,7 +33,7 @@ endif
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
-obj-$(CONFIG_MODULES) += armksyms.o module.o
+obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
deleted file mode 100644
index 7e45f69a0ddc..000000000000
--- a/arch/arm/kernel/armksyms.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * linux/arch/arm/kernel/armksyms.c
- *
- * Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/cryptohash.h>
-#include <linux/delay.h>
-#include <linux/in6.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/arm-smccc.h>
-
-#include <asm/checksum.h>
-#include <asm/ftrace.h>
-
-/*
- * libgcc functions - functions that are used internally by the
- * compiler... (prototypes are not correct though, but that
- * doesn't really matter since they're not versioned).
- */
-extern void __ashldi3(void);
-extern void __ashrdi3(void);
-extern void __divsi3(void);
-extern void __lshrdi3(void);
-extern void __modsi3(void);
-extern void __muldi3(void);
-extern void __ucmpdi2(void);
-extern void __udivsi3(void);
-extern void __umodsi3(void);
-extern void __do_div64(void);
-extern void __bswapsi2(void);
-extern void __bswapdi2(void);
-
-extern void __aeabi_idiv(void);
-extern void __aeabi_idivmod(void);
-extern void __aeabi_lasr(void);
-extern void __aeabi_llsl(void);
-extern void __aeabi_llsr(void);
-extern void __aeabi_lmul(void);
-extern void __aeabi_uidiv(void);
-extern void __aeabi_uidivmod(void);
-extern void __aeabi_ulcmp(void);
-
-extern void fpundefinstr(void);
-
-void mmioset(void *, unsigned int, size_t);
-void mmiocpy(void *, const void *, size_t);
-
- /* platform dependent support */
-EXPORT_SYMBOL(arm_delay_ops);
-
- /* networking */
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(__csum_ipv6_magic);
-
- /* io */
-#ifndef __raw_readsb
-EXPORT_SYMBOL(__raw_readsb);
-#endif
-#ifndef __raw_readsw
-EXPORT_SYMBOL(__raw_readsw);
-#endif
-#ifndef __raw_readsl
-EXPORT_SYMBOL(__raw_readsl);
-#endif
-#ifndef __raw_writesb
-EXPORT_SYMBOL(__raw_writesb);
-#endif
-#ifndef __raw_writesw
-EXPORT_SYMBOL(__raw_writesw);
-#endif
-#ifndef __raw_writesl
-EXPORT_SYMBOL(__raw_writesl);
-#endif
-
- /* string / mem functions */
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(__memzero);
-
-EXPORT_SYMBOL(mmioset);
-EXPORT_SYMBOL(mmiocpy);
-
-#ifdef CONFIG_MMU
-EXPORT_SYMBOL(copy_page);
-
-EXPORT_SYMBOL(arm_copy_from_user);
-EXPORT_SYMBOL(arm_copy_to_user);
-EXPORT_SYMBOL(arm_clear_user);
-
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-EXPORT_SYMBOL(__get_user_8);
-
-#ifdef __ARMEB__
-EXPORT_SYMBOL(__get_user_64t_1);
-EXPORT_SYMBOL(__get_user_64t_2);
-EXPORT_SYMBOL(__get_user_64t_4);
-EXPORT_SYMBOL(__get_user_32t_8);
-#endif
-
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
-#endif
-
- /* gcc lib functions */
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__divsi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__muldi3);
-EXPORT_SYMBOL(__ucmpdi2);
-EXPORT_SYMBOL(__udivsi3);
-EXPORT_SYMBOL(__umodsi3);
-EXPORT_SYMBOL(__do_div64);
-EXPORT_SYMBOL(__bswapsi2);
-EXPORT_SYMBOL(__bswapdi2);
-
-#ifdef CONFIG_AEABI
-EXPORT_SYMBOL(__aeabi_idiv);
-EXPORT_SYMBOL(__aeabi_idivmod);
-EXPORT_SYMBOL(__aeabi_lasr);
-EXPORT_SYMBOL(__aeabi_llsl);
-EXPORT_SYMBOL(__aeabi_llsr);
-EXPORT_SYMBOL(__aeabi_lmul);
-EXPORT_SYMBOL(__aeabi_uidiv);
-EXPORT_SYMBOL(__aeabi_uidivmod);
-EXPORT_SYMBOL(__aeabi_ulcmp);
-#endif
-
- /* bitops */
-EXPORT_SYMBOL(_set_bit);
-EXPORT_SYMBOL(_test_and_set_bit);
-EXPORT_SYMBOL(_clear_bit);
-EXPORT_SYMBOL(_test_and_clear_bit);
-EXPORT_SYMBOL(_change_bit);
-EXPORT_SYMBOL(_test_and_change_bit);
-EXPORT_SYMBOL(_find_first_zero_bit_le);
-EXPORT_SYMBOL(_find_next_zero_bit_le);
-EXPORT_SYMBOL(_find_first_bit_le);
-EXPORT_SYMBOL(_find_next_bit_le);
-
-#ifdef __ARMEB__
-EXPORT_SYMBOL(_find_first_zero_bit_be);
-EXPORT_SYMBOL(_find_next_zero_bit_be);
-EXPORT_SYMBOL(_find_first_bit_be);
-EXPORT_SYMBOL(_find_next_bit_be);
-#endif
-
-#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_OLD_MCOUNT
-EXPORT_SYMBOL(mcount);
-#endif
-EXPORT_SYMBOL(__gnu_mcount_nc);
-#endif
-
-#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
-EXPORT_SYMBOL(__pv_phys_pfn_offset);
-EXPORT_SYMBOL(__pv_offset);
-#endif
-
-#ifdef CONFIG_HAVE_ARM_SMCCC
-EXPORT_SYMBOL(arm_smccc_smc);
-EXPORT_SYMBOL(arm_smccc_hvc);
-#endif
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index c73c4030ca5d..b629d3f11c3d 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -7,6 +7,7 @@
#include <asm/assembler.h>
#include <asm/ftrace.h>
#include <asm/unwind.h>
+#include <asm/export.h>
#include "entry-header.S"
@@ -153,6 +154,7 @@ ENTRY(mcount)
__mcount _old
#endif
ENDPROC(mcount)
+EXPORT_SYMBOL(mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller_old)
@@ -205,6 +207,7 @@ UNWIND(.fnstart)
#endif
UNWIND(.fnend)
ENDPROC(__gnu_mcount_nc)
+EXPORT_SYMBOL(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 04286fd9e09c..f41cee4c5746 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -22,6 +22,7 @@
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/pgtable.h>
+#include <asm/export.h>
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
#include CONFIG_DEBUG_LL_INCLUDE
@@ -727,6 +728,8 @@ __pv_phys_pfn_offset:
__pv_offset:
.quad 0
.size __pv_offset, . -__pv_offset
+EXPORT_SYMBOL(__pv_phys_pfn_offset)
+EXPORT_SYMBOL(__pv_offset)
#endif
#include "head-common.S"
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
index 2e48b674aab1..37669e7e13af 100644
--- a/arch/arm/kernel/smccc-call.S
+++ b/arch/arm/kernel/smccc-call.S
@@ -16,6 +16,7 @@
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
+#include <asm/export.h>
/*
* Wrap c macros in asm macros to delay expansion until after the
@@ -51,6 +52,7 @@ UNWIND( .fnend)
ENTRY(arm_smccc_smc)
SMCCC SMCCC_SMC
ENDPROC(arm_smccc_smc)
+EXPORT_SYMBOL(arm_smccc_smc)
/*
* void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
@@ -60,3 +62,4 @@ ENDPROC(arm_smccc_smc)
ENTRY(arm_smccc_hvc)
SMCCC SMCCC_HVC
ENDPROC(arm_smccc_hvc)
+EXPORT_SYMBOL(arm_smccc_hvc)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 03e9273f1876..08bb84f2ad58 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1312,6 +1312,13 @@ static int init_hyp_mode(void)
goto out_err;
}
+ err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
+ kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
+ if (err) {
+ kvm_err("Cannot map bss section\n");
+ goto out_err;
+ }
+
/*
* Map the Hyp stack pages
*/
diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S
index b05e95840651..a7e7de89bd75 100644
--- a/arch/arm/lib/ashldi3.S
+++ b/arch/arm/lib/ashldi3.S
@@ -28,6 +28,7 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
@@ -52,3 +53,5 @@ ENTRY(__aeabi_llsl)
ENDPROC(__ashldi3)
ENDPROC(__aeabi_llsl)
+EXPORT_SYMBOL(__ashldi3)
+EXPORT_SYMBOL(__aeabi_llsl)
diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S
index 275d7d2341a4..490336e42518 100644
--- a/arch/arm/lib/ashrdi3.S
+++ b/arch/arm/lib/ashrdi3.S
@@ -28,6 +28,7 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
@@ -52,3 +53,5 @@ ENTRY(__aeabi_lasr)
ENDPROC(__ashrdi3)
ENDPROC(__aeabi_lasr)
+EXPORT_SYMBOL(__ashrdi3)
+EXPORT_SYMBOL(__aeabi_lasr)
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 7d807cfd8ef5..df06638b327c 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,5 +1,6 @@
#include <asm/assembler.h>
#include <asm/unwind.h>
+#include <asm/export.h>
#if __LINUX_ARM_ARCH__ >= 6
.macro bitop, name, instr
@@ -25,6 +26,7 @@ UNWIND( .fnstart )
bx lr
UNWIND( .fnend )
ENDPROC(\name )
+EXPORT_SYMBOL(\name )
.endm
.macro testop, name, instr, store
@@ -55,6 +57,7 @@ UNWIND( .fnstart )
2: bx lr
UNWIND( .fnend )
ENDPROC(\name )
+EXPORT_SYMBOL(\name )
.endm
#else
.macro bitop, name, instr
@@ -74,6 +77,7 @@ UNWIND( .fnstart )
ret lr
UNWIND( .fnend )
ENDPROC(\name )
+EXPORT_SYMBOL(\name )
.endm
/**
@@ -102,5 +106,6 @@ UNWIND( .fnstart )
ret lr
UNWIND( .fnend )
ENDPROC(\name )
+EXPORT_SYMBOL(\name )
.endm
#endif
diff --git a/arch/arm/lib/bswapsdi2.S b/arch/arm/lib/bswapsdi2.S
index 07cda737bb11..f05f78247304 100644
--- a/arch/arm/lib/bswapsdi2.S
+++ b/arch/arm/lib/bswapsdi2.S
@@ -1,5 +1,6 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
#if __LINUX_ARM_ARCH__ >= 6
ENTRY(__bswapsi2)
@@ -35,3 +36,5 @@ ENTRY(__bswapdi2)
ret lr
ENDPROC(__bswapdi2)
#endif
+EXPORT_SYMBOL(__bswapsi2)
+EXPORT_SYMBOL(__bswapdi2)
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index e936352ccb00..b566154f5cf4 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -10,6 +10,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
+#include <asm/export.h>
.text
@@ -50,6 +51,9 @@ USER( strnebt r2, [r0])
UNWIND(.fnend)
ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std)
+#ifndef CONFIG_UACCESS_WITH_MEMCPY
+EXPORT_SYMBOL(arm_clear_user)
+#endif
.pushsection .text.fixup,"ax"
.align 0
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 1512bebfbf1b..63e4c1ed0225 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
+#include <asm/export.h>
/*
* Prototype:
@@ -94,16 +95,14 @@ ENTRY(arm_copy_from_user)
#include "copy_template.S"
ENDPROC(arm_copy_from_user)
+EXPORT_SYMBOL(arm_copy_from_user)
.pushsection .fixup,"ax"
.align 0
copy_abort_preamble
- ldmfd sp!, {r1, r2}
- sub r3, r0, r1
- rsb r1, r3, r2
- str r1, [sp]
- bl __memzero
- ldr r0, [sp], #4
+ ldmfd sp!, {r1, r2, r3}
+ sub r0, r0, r1
+ rsb r0, r0, r2
copy_abort_end
.popsection
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index 6ee2f6706f86..d97851d4af7a 100644
--- a/arch/arm/lib/copy_page.S
+++ b/arch/arm/lib/copy_page.S
@@ -13,6 +13,7 @@
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
+#include <asm/export.h>
#define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 ))
@@ -45,3 +46,4 @@ ENTRY(copy_page)
PLD( beq 2b )
ldmfd sp!, {r4, pc} @ 3
ENDPROC(copy_page)
+EXPORT_SYMBOL(copy_page)
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index caf5019d8161..592c179112d1 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
+#include <asm/export.h>
/*
* Prototype:
@@ -99,6 +100,9 @@ WEAK(arm_copy_to_user)
ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std)
+#ifndef CONFIG_UACCESS_WITH_MEMCPY
+EXPORT_SYMBOL(arm_copy_to_user)
+#endif
.pushsection .text.fixup,"ax"
.align 0
diff --git a/arch/arm/lib/csumipv6.S b/arch/arm/lib/csumipv6.S
index 3ac6ef01bc43..68603b5ee537 100644
--- a/arch/arm/lib/csumipv6.S
+++ b/arch/arm/lib/csumipv6.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
.text
@@ -30,4 +31,4 @@ ENTRY(__csum_ipv6_magic)
adcs r0, r0, #0
ldmfd sp!, {pc}
ENDPROC(__csum_ipv6_magic)
-
+EXPORT_SYMBOL(__csum_ipv6_magic)
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S
index 984e0f29d548..830b20e81c37 100644
--- a/arch/arm/lib/csumpartial.S
+++ b/arch/arm/lib/csumpartial.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
.text
@@ -140,3 +141,4 @@ ENTRY(csum_partial)
bne 4b
b .Lless4
ENDPROC(csum_partial)
+EXPORT_SYMBOL(csum_partial)
diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S
index d03fc71fc88c..9c3383fed129 100644
--- a/arch/arm/lib/csumpartialcopy.S
+++ b/arch/arm/lib/csumpartialcopy.S
@@ -49,5 +49,6 @@
#define FN_ENTRY ENTRY(csum_partial_copy_nocheck)
#define FN_EXIT ENDPROC(csum_partial_copy_nocheck)
+#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_nocheck)
#include "csumpartialcopygeneric.S"
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index 10b45909610c..8b94d20e51d1 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <asm/assembler.h>
+#include <asm/export.h>
/*
* unsigned int
@@ -331,3 +332,4 @@ FN_ENTRY
mov r5, r4, get_byte_1
b .Lexit
FN_EXIT
+FN_EXPORT
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1712f132b80d..5d495edf3d83 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -73,6 +73,7 @@
#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
+#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_from_user)
#include "csumpartialcopygeneric.S"
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 2cef11884857..69aad80a3af4 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/export.h>
#include <linux/timex.h>
/*
@@ -34,6 +35,7 @@ struct arm_delay_ops arm_delay_ops __ro_after_init = {
.const_udelay = __loop_const_udelay,
.udelay = __loop_udelay,
};
+EXPORT_SYMBOL(arm_delay_ops);
static const struct delay_timer *delay_timer;
static bool delay_calibrated;
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index a9eafe4981eb..0c9e1c18fc9e 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -15,6 +15,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
+#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
@@ -210,3 +211,4 @@ Ldiv0_64:
UNWIND(.fnend)
ENDPROC(__do_div64)
+EXPORT_SYMBOL(__do_div64)
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 7848780e8834..26302b8cd38f 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -15,6 +15,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
.text
/*
@@ -37,6 +38,7 @@ ENTRY(_find_first_zero_bit_le)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_le)
+EXPORT_SYMBOL(_find_first_zero_bit_le)
/*
* Purpose : Find next 'zero' bit
@@ -57,6 +59,7 @@ ENTRY(_find_next_zero_bit_le)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_le)
+EXPORT_SYMBOL(_find_next_zero_bit_le)
/*
* Purpose : Find a 'one' bit
@@ -78,6 +81,7 @@ ENTRY(_find_first_bit_le)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_le)
+EXPORT_SYMBOL(_find_first_bit_le)
/*
* Purpose : Find next 'one' bit
@@ -97,6 +101,7 @@ ENTRY(_find_next_bit_le)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_le)
+EXPORT_SYMBOL(_find_next_bit_le)
#ifdef __ARMEB__
@@ -116,6 +121,7 @@ ENTRY(_find_first_zero_bit_be)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_zero_bit_be)
+EXPORT_SYMBOL(_find_first_zero_bit_be)
ENTRY(_find_next_zero_bit_be)
teq r1, #0
@@ -133,6 +139,7 @@ ENTRY(_find_next_zero_bit_be)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_zero_bit_be)
+EXPORT_SYMBOL(_find_next_zero_bit_be)
ENTRY(_find_first_bit_be)
teq r1, #0
@@ -150,6 +157,7 @@ ENTRY(_find_first_bit_be)
3: mov r0, r1 @ no free bits
ret lr
ENDPROC(_find_first_bit_be)
+EXPORT_SYMBOL(_find_first_bit_be)
ENTRY(_find_next_bit_be)
teq r1, #0
@@ -166,6 +174,7 @@ ENTRY(_find_next_bit_be)
add r2, r2, #1 @ align bit pointer
b 2b @ loop for next bit
ENDPROC(_find_next_bit_be)
+EXPORT_SYMBOL(_find_next_bit_be)
#endif
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 8ecfd15c3a02..9d09a38e73af 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -31,6 +31,7 @@
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
+#include <asm/export.h>
ENTRY(__get_user_1)
check_uaccess r0, 1, r1, r2, __get_user_bad
@@ -38,6 +39,7 @@ ENTRY(__get_user_1)
mov r0, #0
ret lr
ENDPROC(__get_user_1)
+EXPORT_SYMBOL(__get_user_1)
ENTRY(__get_user_2)
check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -58,6 +60,7 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_2)
+EXPORT_SYMBOL(__get_user_2)
ENTRY(__get_user_4)
check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -65,6 +68,7 @@ ENTRY(__get_user_4)
mov r0, #0
ret lr
ENDPROC(__get_user_4)
+EXPORT_SYMBOL(__get_user_4)
ENTRY(__get_user_8)
check_uaccess r0, 8, r1, r2, __get_user_bad
@@ -78,6 +82,7 @@ ENTRY(__get_user_8)
mov r0, #0
ret lr
ENDPROC(__get_user_8)
+EXPORT_SYMBOL(__get_user_8)
#ifdef __ARMEB__
ENTRY(__get_user_32t_8)
@@ -91,6 +96,7 @@ ENTRY(__get_user_32t_8)
mov r0, #0
ret lr
ENDPROC(__get_user_32t_8)
+EXPORT_SYMBOL(__get_user_32t_8)
ENTRY(__get_user_64t_1)
check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -98,6 +104,7 @@ ENTRY(__get_user_64t_1)
mov r0, #0
ret lr
ENDPROC(__get_user_64t_1)
+EXPORT_SYMBOL(__get_user_64t_1)
ENTRY(__get_user_64t_2)
check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -114,6 +121,7 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_64t_2)
+EXPORT_SYMBOL(__get_user_64t_2)
ENTRY(__get_user_64t_4)
check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -121,6 +129,7 @@ ENTRY(__get_user_64t_4)
mov r0, #0
ret lr
ENDPROC(__get_user_64t_4)
+EXPORT_SYMBOL(__get_user_64t_4)
#endif
__get_user_bad8:
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index c31b2f3153f1..3dff7a3a2aef 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
.Linsb_align: rsb ip, ip, #4
cmp ip, r2
@@ -121,3 +122,4 @@ ENTRY(__raw_readsb)
ldmfd sp!, {r4 - r6, pc}
ENDPROC(__raw_readsb)
+EXPORT_SYMBOL(__raw_readsb)
diff --git a/arch/arm/lib/io-readsl.S b/arch/arm/lib/io-readsl.S
index 2ed86fa5465f..bfd39682325b 100644
--- a/arch/arm/lib/io-readsl.S
+++ b/arch/arm/lib/io-readsl.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
ENTRY(__raw_readsl)
teq r2, #0 @ do we have to check for the zero len?
@@ -77,3 +78,4 @@ ENTRY(__raw_readsl)
strb r3, [r1, #0]
ret lr
ENDPROC(__raw_readsl)
+EXPORT_SYMBOL(__raw_readsl)
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index 413da9914529..b3af3db6caac 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
.Linsw_bad_alignment:
adr r0, .Linsw_bad_align_msg
@@ -103,4 +104,4 @@ ENTRY(__raw_readsw)
ldmfd sp!, {r4, r5, r6, pc}
-
+EXPORT_SYMBOL(__raw_readsw)
diff --git a/arch/arm/lib/io-readsw-armv4.S b/arch/arm/lib/io-readsw-armv4.S
index d9a45e9692ae..3c7a7a40b33e 100644
--- a/arch/arm/lib/io-readsw-armv4.S
+++ b/arch/arm/lib/io-readsw-armv4.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
.macro pack, rd, hw1, hw2
#ifndef __ARMEB__
@@ -129,3 +130,4 @@ ENTRY(__raw_readsw)
strneb ip, [r1]
ldmfd sp!, {r4, pc}
ENDPROC(__raw_readsw)
+EXPORT_SYMBOL(__raw_readsw)
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index a46bbc9b168b..fa3633594415 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
.macro outword, rd
#ifndef __ARMEB__
@@ -92,3 +93,4 @@ ENTRY(__raw_writesb)
ldmfd sp!, {r4, r5, pc}
ENDPROC(__raw_writesb)
+EXPORT_SYMBOL(__raw_writesb)
diff --git a/arch/arm/lib/io-writesl.S b/arch/arm/lib/io-writesl.S
index 4ea2435988c1..98ed6aec0b47 100644
--- a/arch/arm/lib/io-writesl.S
+++ b/arch/arm/lib/io-writesl.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
ENTRY(__raw_writesl)
teq r2, #0 @ do we have to check for the zero len?
@@ -65,3 +66,4 @@ ENTRY(__raw_writesl)
bne 6b
ret lr
ENDPROC(__raw_writesl)
+EXPORT_SYMBOL(__raw_writesl)
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 121789eb6802..577184c082bb 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
.Loutsw_bad_alignment:
adr r0, .Loutsw_bad_align_msg
@@ -124,3 +125,4 @@ ENTRY(__raw_writesw)
strne ip, [r0]
ldmfd sp!, {r4, r5, r6, pc}
+EXPORT_SYMBOL(__raw_writesw)
diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S
index 269f90c51ad2..e335f489d1fc 100644
--- a/arch/arm/lib/io-writesw-armv4.S
+++ b/arch/arm/lib/io-writesw-armv4.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
.macro outword, rd
#ifndef __ARMEB__
@@ -98,3 +99,4 @@ ENTRY(__raw_writesw)
strneh ip, [r0]
ret lr
ENDPROC(__raw_writesw)
+EXPORT_SYMBOL(__raw_writesw)
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index 9397b2e532af..f541bc013bff 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -36,6 +36,7 @@ Boston, MA 02111-1307, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
+#include <asm/export.h>
.macro ARM_DIV_BODY dividend, divisor, result, curbit
@@ -238,6 +239,8 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__udivsi3)
ENDPROC(__aeabi_uidiv)
+EXPORT_SYMBOL(__udivsi3)
+EXPORT_SYMBOL(__aeabi_uidiv)
ENTRY(__umodsi3)
UNWIND(.fnstart)
@@ -256,6 +259,7 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__umodsi3)
+EXPORT_SYMBOL(__umodsi3)
#ifdef CONFIG_ARM_PATCH_IDIV
.align 3
@@ -303,6 +307,8 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__divsi3)
ENDPROC(__aeabi_idiv)
+EXPORT_SYMBOL(__divsi3)
+EXPORT_SYMBOL(__aeabi_idiv)
ENTRY(__modsi3)
UNWIND(.fnstart)
@@ -327,6 +333,7 @@ UNWIND(.fnstart)
UNWIND(.fnend)
ENDPROC(__modsi3)
+EXPORT_SYMBOL(__modsi3)
#ifdef CONFIG_AEABI
@@ -343,6 +350,7 @@ UNWIND(.save {r0, r1, ip, lr} )
UNWIND(.fnend)
ENDPROC(__aeabi_uidivmod)
+EXPORT_SYMBOL(__aeabi_uidivmod)
ENTRY(__aeabi_idivmod)
UNWIND(.fnstart)
@@ -356,6 +364,7 @@ UNWIND(.save {r0, r1, ip, lr} )
UNWIND(.fnend)
ENDPROC(__aeabi_idivmod)
+EXPORT_SYMBOL(__aeabi_idivmod)
#endif
diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S
index 922dcd88b02b..e40833981417 100644
--- a/arch/arm/lib/lshrdi3.S
+++ b/arch/arm/lib/lshrdi3.S
@@ -28,6 +28,7 @@ Boston, MA 02110-1301, USA. */
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
#ifdef __ARMEB__
#define al r1
@@ -52,3 +53,5 @@ ENTRY(__aeabi_llsr)
ENDPROC(__lshrdi3)
ENDPROC(__aeabi_llsr)
+EXPORT_SYMBOL(__lshrdi3)
+EXPORT_SYMBOL(__aeabi_llsr)
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S
index 74a5bed6d999..44182bf686a5 100644
--- a/arch/arm/lib/memchr.S
+++ b/arch/arm/lib/memchr.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
.text
.align 5
@@ -24,3 +25,4 @@ ENTRY(memchr)
2: movne r0, #0
ret lr
ENDPROC(memchr)
+EXPORT_SYMBOL(memchr)
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 64111bd4440b..1be5b6ddf37c 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
+#include <asm/export.h>
#define LDR1W_SHIFT 0
#define STR1W_SHIFT 0
@@ -68,3 +69,5 @@ ENTRY(memcpy)
ENDPROC(memcpy)
ENDPROC(mmiocpy)
+EXPORT_SYMBOL(memcpy)
+EXPORT_SYMBOL(mmiocpy)
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 69a9d47fc5ab..71dcc5400d02 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
+#include <asm/export.h>
.text
@@ -225,3 +226,4 @@ ENTRY(memmove)
18: backward_copy_shift push=24 pull=8
ENDPROC(memmove)
+EXPORT_SYMBOL(memmove)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 3c65e3bd790f..7b72044cba62 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
+#include <asm/export.h>
.text
.align 5
@@ -135,3 +136,5 @@ UNWIND( .fnstart )
UNWIND( .fnend )
ENDPROC(memset)
ENDPROC(mmioset)
+EXPORT_SYMBOL(memset)
+EXPORT_SYMBOL(mmioset)
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 0eded952e089..6dec26ed5bcc 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -10,6 +10,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
+#include <asm/export.h>
.text
.align 5
@@ -135,3 +136,4 @@ UNWIND( .fnstart )
ret lr @ 1
UNWIND( .fnend )
ENDPROC(__memzero)
+EXPORT_SYMBOL(__memzero)
diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S
index 204305956925..b8f12388ccac 100644
--- a/arch/arm/lib/muldi3.S
+++ b/arch/arm/lib/muldi3.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
@@ -46,3 +47,5 @@ ENTRY(__aeabi_lmul)
ENDPROC(__muldi3)
ENDPROC(__aeabi_lmul)
+EXPORT_SYMBOL(__muldi3)
+EXPORT_SYMBOL(__aeabi_lmul)
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 38d660d3705f..11de126e2ed6 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -31,6 +31,7 @@
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/domain.h>
+#include <asm/export.h>
ENTRY(__put_user_1)
check_uaccess r0, 1, r1, ip, __put_user_bad
@@ -38,6 +39,7 @@ ENTRY(__put_user_1)
mov r0, #0
ret lr
ENDPROC(__put_user_1)
+EXPORT_SYMBOL(__put_user_1)
ENTRY(__put_user_2)
check_uaccess r0, 2, r1, ip, __put_user_bad
@@ -62,6 +64,7 @@ ENTRY(__put_user_2)
mov r0, #0
ret lr
ENDPROC(__put_user_2)
+EXPORT_SYMBOL(__put_user_2)
ENTRY(__put_user_4)
check_uaccess r0, 4, r1, ip, __put_user_bad
@@ -69,6 +72,7 @@ ENTRY(__put_user_4)
mov r0, #0
ret lr
ENDPROC(__put_user_4)
+EXPORT_SYMBOL(__put_user_4)
ENTRY(__put_user_8)
check_uaccess r0, 8, r1, ip, __put_user_bad
@@ -82,6 +86,7 @@ ENTRY(__put_user_8)
mov r0, #0
ret lr
ENDPROC(__put_user_8)
+EXPORT_SYMBOL(__put_user_8)
__put_user_bad:
mov r0, #-EFAULT
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S
index 013d64c71e8d..7301f6e6046c 100644
--- a/arch/arm/lib/strchr.S
+++ b/arch/arm/lib/strchr.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
.text
.align 5
@@ -25,3 +26,4 @@ ENTRY(strchr)
subeq r0, r0, #1
ret lr
ENDPROC(strchr)
+EXPORT_SYMBOL(strchr)
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S
index 3cec1c7482c4..aaf9fd98b754 100644
--- a/arch/arm/lib/strrchr.S
+++ b/arch/arm/lib/strrchr.S
@@ -11,6 +11,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
.text
.align 5
@@ -24,3 +25,4 @@ ENTRY(strrchr)
mov r0, r3
ret lr
ENDPROC(strrchr)
+EXPORT_SYMBOL(strrchr)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 6bd1089b07e0..1626e3a551a1 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -19,6 +19,7 @@
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
+#include <linux/export.h>
#include <asm/current.h>
#include <asm/page.h>
@@ -156,6 +157,7 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
}
return n;
}
+EXPORT_SYMBOL(arm_copy_to_user);
static unsigned long noinline
__clear_user_memset(void __user *addr, unsigned long n)
@@ -213,6 +215,7 @@ unsigned long arm_clear_user(void __user *addr, unsigned long n)
}
return n;
}
+EXPORT_SYMBOL(arm_clear_user);
#if 0
diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S
index ad4a6309141a..127a91af46f3 100644
--- a/arch/arm/lib/ucmpdi2.S
+++ b/arch/arm/lib/ucmpdi2.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
#ifdef __ARMEB__
#define xh r0
@@ -35,6 +36,7 @@ ENTRY(__ucmpdi2)
ret lr
ENDPROC(__ucmpdi2)
+EXPORT_SYMBOL(__ucmpdi2)
#ifdef CONFIG_AEABI
@@ -48,6 +50,7 @@ ENTRY(__aeabi_ulcmp)
ret lr
ENDPROC(__aeabi_ulcmp)
+EXPORT_SYMBOL(__aeabi_ulcmp)
#endif
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index cab128913e72..737450fe790c 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -32,7 +32,6 @@ endif
ifdef CONFIG_SND_IMX_SOC
obj-y += ssi-fiq.o
-obj-y += ssi-fiq-ksym.o
endif
# i.MX21 based machines
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 0df062d8b2c9..b54db47f6f32 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -408,7 +408,7 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
{
struct clk *clk;
- int i;
+ int i, ret;
imx6q_pu_domain.reg = pu_reg;
@@ -430,13 +430,22 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
return 0;
- pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
- return of_genpd_add_provider_onecell(dev->of_node,
+ for (i = 0; i < ARRAY_SIZE(imx_gpc_domains); i++)
+ pm_genpd_init(imx_gpc_domains[i], NULL, false);
+
+ ret = of_genpd_add_provider_onecell(dev->of_node,
&imx_gpc_onecell_data);
+ if (ret)
+ goto power_off;
+
+ return 0;
+power_off:
+ imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
clk_err:
while (i--)
clk_put(imx6q_pu_domain.clk[i]);
+ imx6q_pu_domain.reg = NULL;
return -EINVAL;
}
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 97fd25105e2c..45801b27ee5c 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -173,7 +173,7 @@ static void __init imx6q_enet_phy_init(void)
ksz9021rn_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_KSZ9031, MICREL_PHY_ID_MASK,
ksz9031rn_phy_fixup);
- phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffff,
+ phy_register_fixup_for_uid(PHY_ID_AR8031, 0xffffffef,
ar8031_phy_fixup);
phy_register_fixup_for_uid(PHY_ID_AR8035, 0xffffffef,
ar8035_phy_fixup);
diff --git a/arch/arm/mach-imx/ssi-fiq-ksym.c b/arch/arm/mach-imx/ssi-fiq-ksym.c
deleted file mode 100644
index 792090f9a032..000000000000
--- a/arch/arm/mach-imx/ssi-fiq-ksym.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Exported ksyms for the SSI FIQ handler
- *
- * Copyright (C) 2009, Sascha Hauer <s.hauer@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-
-#include <linux/platform_data/asoc-imx-ssi.h>
-
-EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer);
-EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer);
-EXPORT_SYMBOL(imx_ssi_fiq_start);
-EXPORT_SYMBOL(imx_ssi_fiq_end);
-EXPORT_SYMBOL(imx_ssi_fiq_base);
-
diff --git a/arch/arm/mach-imx/ssi-fiq.S b/arch/arm/mach-imx/ssi-fiq.S
index a8b93c5f29b5..fd7917f1c204 100644
--- a/arch/arm/mach-imx/ssi-fiq.S
+++ b/arch/arm/mach-imx/ssi-fiq.S
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/export.h>
/*
* r8 = bit 0-15: tx offset, bit 16-31: tx buffer size
@@ -144,4 +145,8 @@ imx_ssi_fiq_tx_buffer:
.word 0x0
.L_imx_ssi_fiq_end:
imx_ssi_fiq_end:
-
+EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer)
+EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer)
+EXPORT_SYMBOL(imx_ssi_fiq_start)
+EXPORT_SYMBOL(imx_ssi_fiq_end)
+EXPORT_SYMBOL(imx_ssi_fiq_base)
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index f9b6bd306cfe..541647f57192 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -23,6 +23,7 @@ config MACH_MVEBU_V7
select CACHE_L2X0
select ARM_CPU_SUSPEND
select MACH_MVEBU_ANY
+ select MVEBU_CLK_COREDIV
config MACH_ARMADA_370
bool "Marvell Armada 370 boards"
@@ -32,7 +33,6 @@ config MACH_ARMADA_370
select CPU_PJ4B
select MACH_MVEBU_V7
select PINCTRL_ARMADA_370
- select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 370 SoC with device tree.
@@ -50,7 +50,6 @@ config MACH_ARMADA_375
select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_375
- select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 375 SoC with device tree.
@@ -68,7 +67,6 @@ config MACH_ARMADA_38X
select HAVE_SMP
select MACH_MVEBU_V7
select PINCTRL_ARMADA_38X
- select MVEBU_CLK_COREDIV
help
Say 'Y' here if you want your kernel to support boards based
on the Marvell Armada 380/385 SoC with device tree.
diff --git a/arch/arm/mach-uniphier/Kconfig b/arch/arm/mach-uniphier/Kconfig
index 82dddee3a469..3930fbba30b4 100644
--- a/arch/arm/mach-uniphier/Kconfig
+++ b/arch/arm/mach-uniphier/Kconfig
@@ -1,6 +1,7 @@
config ARCH_UNIPHIER
bool "Socionext UniPhier SoCs"
depends on ARCH_MULTI_V7
+ select ARCH_HAS_RESET_CONTROLLER
select ARM_AMBA
select ARM_GLOBAL_TIMER
select ARM_GIC
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 30398dbc940a..969ef880d234 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -915,7 +915,7 @@ config RANDOMIZE_BASE
config RANDOMIZE_MODULE_REGION_FULL
bool "Randomize the module region independently from the core kernel"
- depends on RANDOMIZE_BASE
+ depends on RANDOMIZE_BASE && !DYNAMIC_FTRACE
default y
help
Randomizes the location of the module region without considering the
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index cfbdf02ef566..101794f5ce10 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -190,6 +190,7 @@ config ARCH_THUNDER
config ARCH_UNIPHIER
bool "Socionext UniPhier SoC Family"
+ select ARCH_HAS_RESET_CONTROLLER
select PINCTRL
help
This enables support for Socionext UniPhier SoC family.
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index ab51aed6b6c1..3635b8662724 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -15,7 +15,7 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
GZFLAGS :=-9
ifneq ($(CONFIG_RELOCATABLE),)
-LDFLAGS_vmlinux += -pie -Bsymbolic
+LDFLAGS_vmlinux += -pie -shared -Bsymbolic
endif
ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
index 2d7872a36b91..b09f3bc5c6c1 100644
--- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -164,6 +164,8 @@
nand-ecc-mode = "hw";
nand-ecc-strength = <8>;
nand-ecc-step-size = <512>;
+ nand-bus-width = <16>;
+ brcm,nand-oob-sector-size = <16>;
#address-cells = <1>;
#size-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 58635f7f4668..97d331ec2500 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -123,6 +123,7 @@
<1 14 0xf08>, /* Physical Non-Secure PPI */
<1 11 0xf08>, /* Virtual PPI */
<1 10 0xf08>; /* Hypervisor PPI */
+ fsl,erratum-a008585;
};
pmu {
@@ -502,10 +503,11 @@
};
sata: sata@3200000 {
- compatible = "fsl,ls1043a-ahci", "fsl,ls1021a-ahci";
+ compatible = "fsl,ls1043a-ahci";
reg = <0x0 0x3200000 0x0 0x10000>;
interrupts = <0 69 0x4>;
clocks = <&clockgen 4 0>;
+ dma-coherent;
};
msi1: msi-controller1@1571000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index d1059765dfee..7f0dc13b4087 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -195,6 +195,7 @@
<1 14 4>, /* Physical Non-Secure PPI, active-low */
<1 11 4>, /* Virtual PPI, active-low */
<1 10 4>; /* Hypervisor PPI, active-low */
+ fsl,erratum-a008585;
};
pmu {
@@ -683,6 +684,7 @@
reg = <0x0 0x3200000 0x0 0x10000>;
interrupts = <0 133 0x4>; /* Level high type */
clocks = <&clockgen 4 3>;
+ dma-coherent;
};
sata1: sata@3210000 {
@@ -691,6 +693,7 @@
reg = <0x0 0x3210000 0x0 0x10000>;
interrupts = <0 136 0x4>; /* Level high type */
clocks = <&clockgen 4 3>;
+ dma-coherent;
};
usb0: usb3@3100000 {
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index e5e3ed678b6f..602e2c2e9a4d 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -131,7 +131,7 @@
#address-cells = <0x1>;
#size-cells = <0x0>;
cell-index = <1>;
- clocks = <&cpm_syscon0 0 3>;
+ clocks = <&cpm_syscon0 1 21>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
index 46cdddfcea6c..e5eeca2c2456 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
@@ -116,7 +116,6 @@
cap-mmc-highspeed;
clock-frequency = <150000000>;
disable-wp;
- keep-power-in-suspend;
non-removable;
num-slots = <1>;
vmmc-supply = <&vcc_io>;
@@ -258,8 +257,6 @@
};
vcc_sd: SWITCH_REG1 {
- regulator-always-on;
- regulator-boot-on;
regulator-name = "vcc_sd";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
index 5797933ef80e..ea0a8eceefd4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
@@ -152,8 +152,6 @@
gpio = <&gpio3 11 GPIO_ACTIVE_LOW>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
vin-supply = <&vcc_io>;
};
@@ -201,7 +199,6 @@
bus-width = <8>;
cap-mmc-highspeed;
disable-wp;
- keep-power-in-suspend;
mmc-pwrseq = <&emmc_pwrseq>;
mmc-hs200-1_2v;
mmc-hs200-1_8v;
@@ -350,7 +347,6 @@
clock-freq-min-max = <400000 50000000>;
cap-sd-highspeed;
card-detect-delay = <200>;
- keep-power-in-suspend;
num-slots = <1>;
pinctrl-names = "default";
pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index 08fd7cf7769c..56a1b2e92cf3 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -257,18 +257,18 @@
reg = <0x59801000 0x400>;
};
- mioctrl@59810000 {
- compatible = "socionext,uniphier-mioctrl",
+ sdctrl@59810000 {
+ compatible = "socionext,uniphier-ld20-sdctrl",
"simple-mfd", "syscon";
reg = <0x59810000 0x800>;
- mio_clk: clock {
- compatible = "socionext,uniphier-ld20-mio-clock";
+ sd_clk: clock {
+ compatible = "socionext,uniphier-ld20-sd-clock";
#clock-cells = <1>;
};
- mio_rst: reset {
- compatible = "socionext,uniphier-ld20-mio-reset";
+ sd_rst: reset {
+ compatible = "socionext,uniphier-ld20-sd-reset";
#reset-cells = <1>;
};
};
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 758d74fedfad..a27c3245ba21 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -94,7 +94,7 @@ struct arm64_cpu_capabilities {
u16 capability;
int def_scope; /* default scope */
bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
- void (*enable)(void *); /* Called on all active CPUs */
+ int (*enable)(void *); /* Called on all active CPUs */
union {
struct { /* To be used for erratum handling only */
u32 midr_model;
diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h
index db0563c23482..f7865dd9d868 100644
--- a/arch/arm64/include/asm/exec.h
+++ b/arch/arm64/include/asm/exec.h
@@ -18,6 +18,9 @@
#ifndef __ASM_EXEC_H
#define __ASM_EXEC_H
+#include <linux/sched.h>
+
extern unsigned long arch_align_stack(unsigned long sp);
+void uao_thread_switch(struct task_struct *next);
#endif /* __ASM_EXEC_H */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fd9d5fd788f5..f5ea0ba70f07 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -178,11 +178,6 @@ static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
}
-static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
-}
-
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
@@ -203,6 +198,12 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}
+static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
+ kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
+}
+
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index ba62df8c6e35..b71086d25195 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -217,7 +217,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(kaddr) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index e12af6754634..06ff7fd9e81f 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -17,6 +17,7 @@
#define __ASM_MODULE_H
#include <asm-generic/module.h>
+#include <asm/memory.h>
#define MODULE_ARCH_VERMAGIC "aarch64"
@@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
Elf64_Sym *sym);
#ifdef CONFIG_RANDOMIZE_BASE
+#ifdef CONFIG_MODVERSIONS
+#define ARCH_RELOCATES_KCRCTAB
+#define reloc_start (kimage_vaddr - KIMAGE_VADDR)
+#endif
extern u64 module_alloc_base;
#else
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 2fee2f59288c..5394c8405e66 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \
\
switch (size) { \
case 1: \
- do { \
- asm ("//__per_cpu_" #op "_1\n" \
- "ldxrb %w[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_1\n" \
+ "1: ldxrb %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \
- "stxrb %w[loop], %w[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u8 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxrb %w[loop], %w[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u8 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
case 2: \
- do { \
- asm ("//__per_cpu_" #op "_2\n" \
- "ldxrh %w[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_2\n" \
+ "1: ldxrh %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \
- "stxrh %w[loop], %w[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u16 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxrh %w[loop], %w[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u16 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
case 4: \
- do { \
- asm ("//__per_cpu_" #op "_4\n" \
- "ldxr %w[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_4\n" \
+ "1: ldxr %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \
- "stxr %w[loop], %w[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u32 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxr %w[loop], %w[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u32 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
case 8: \
- do { \
- asm ("//__per_cpu_" #op "_8\n" \
- "ldxr %[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_8\n" \
+ "1: ldxr %[ret], %[ptr]\n" \
#asm_op " %[ret], %[ret], %[val]\n" \
- "stxr %w[loop], %[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u64 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxr %w[loop], %[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u64 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
default: \
BUILD_BUG(); \
@@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
switch (size) {
case 1:
- do {
- asm ("//__percpu_xchg_1\n"
- "ldxrb %w[ret], %[ptr]\n"
- "stxrb %w[loop], %w[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u8 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_1\n"
+ "1: ldxrb %w[ret], %[ptr]\n"
+ " stxrb %w[loop], %w[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u8 *)ptr)
+ : [val] "r" (val));
break;
case 2:
- do {
- asm ("//__percpu_xchg_2\n"
- "ldxrh %w[ret], %[ptr]\n"
- "stxrh %w[loop], %w[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u16 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_2\n"
+ "1: ldxrh %w[ret], %[ptr]\n"
+ " stxrh %w[loop], %w[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u16 *)ptr)
+ : [val] "r" (val));
break;
case 4:
- do {
- asm ("//__percpu_xchg_4\n"
- "ldxr %w[ret], %[ptr]\n"
- "stxr %w[loop], %w[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u32 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_4\n"
+ "1: ldxr %w[ret], %[ptr]\n"
+ " stxr %w[loop], %w[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u32 *)ptr)
+ : [val] "r" (val));
break;
case 8:
- do {
- asm ("//__percpu_xchg_8\n"
- "ldxr %[ret], %[ptr]\n"
- "stxr %w[loop], %[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u64 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_8\n"
+ "1: ldxr %[ret], %[ptr]\n"
+ " stxr %w[loop], %[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u64 *)ptr)
+ : [val] "r" (val));
break;
default:
BUILD_BUG();
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index df2e53d3a969..60e34824e18c 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -188,8 +188,8 @@ static inline void spin_lock_prefetch(const void *ptr)
#endif
-void cpu_enable_pan(void *__unused);
-void cpu_enable_uao(void *__unused);
-void cpu_enable_cache_maint_trap(void *__unused);
+int cpu_enable_pan(void *__unused);
+int cpu_enable_uao(void *__unused);
+int cpu_enable_cache_maint_trap(void *__unused);
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index e8d46e8e6079..6c80b3699cb8 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -286,7 +286,7 @@ asm(
#define write_sysreg_s(v, r) do { \
u64 __val = (u64)v; \
- asm volatile("msr_s " __stringify(r) ", %0" : : "rZ" (__val)); \
+ asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
} while (0)
static inline void config_sctlr_el1(u32 clear, u32 set)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index c47257c91b77..55d0adbf6509 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -21,6 +21,7 @@
/*
* User space memory access functions
*/
+#include <linux/bitops.h>
#include <linux/kasan-checks.h>
#include <linux/string.h>
#include <linux/thread_info.h>
@@ -102,6 +103,13 @@ static inline void set_fs(mm_segment_t fs)
flag; \
})
+/*
+ * When dealing with data aborts or instruction traps we may end up with
+ * a tagged userland pointer. Clear the tag to get a sane pointer to pass
+ * on to access_ok(), for instance.
+ */
+#define untagged_addr(addr) sign_extend64(addr, 55)
+
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
@@ -278,14 +286,16 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const v
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res = n;
kasan_check_write(to, n);
if (access_ok(VERIFY_READ, from, n)) {
check_object_size(to, n, false);
- n = __arch_copy_from_user(to, from, n);
- } else /* security hole - plug it */
- memset(to, 0, n);
- return n;
+ res = __arch_copy_from_user(to, from, n);
+ }
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 42ffdb54e162..b0988bb1bf64 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -280,35 +280,43 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
/*
* Error-checking SWP macros implemented using ldxr{b}/stxr{b}
*/
-#define __user_swpX_asm(data, addr, res, temp, B) \
+
+/* Arbitrary constant to ensure forward-progress of the LL/SC loop */
+#define __SWP_LL_SC_LOOPS 4
+
+#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
__asm__ __volatile__( \
+ " mov %w3, %w7\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
- "0: ldxr"B" %w2, [%3]\n" \
- "1: stxr"B" %w0, %w1, [%3]\n" \
+ "0: ldxr"B" %w2, [%4]\n" \
+ "1: stxr"B" %w0, %w1, [%4]\n" \
" cbz %w0, 2f\n" \
- " mov %w0, %w4\n" \
+ " sub %w3, %w3, #1\n" \
+ " cbnz %w3, 0b\n" \
+ " mov %w0, %w5\n" \
" b 3f\n" \
"2:\n" \
" mov %w1, %w2\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
- "4: mov %w0, %w5\n" \
+ "4: mov %w0, %w6\n" \
" b 3b\n" \
" .popsection" \
_ASM_EXTABLE(0b, 4b) \
_ASM_EXTABLE(1b, 4b) \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
- : "=&r" (res), "+r" (data), "=&r" (temp) \
- : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
+ : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
+ : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \
+ "i" (__SWP_LL_SC_LOOPS) \
: "memory")
-#define __user_swp_asm(data, addr, res, temp) \
- __user_swpX_asm(data, addr, res, temp, "")
-#define __user_swpb_asm(data, addr, res, temp) \
- __user_swpX_asm(data, addr, res, temp, "b")
+#define __user_swp_asm(data, addr, res, temp, temp2) \
+ __user_swpX_asm(data, addr, res, temp, temp2, "")
+#define __user_swpb_asm(data, addr, res, temp, temp2) \
+ __user_swpX_asm(data, addr, res, temp, temp2, "b")
/*
* Bit 22 of the instruction encoding distinguishes between
@@ -328,12 +336,12 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
}
while (1) {
- unsigned long temp;
+ unsigned long temp, temp2;
if (type == TYPE_SWPB)
- __user_swpb_asm(*data, address, res, temp);
+ __user_swpb_asm(*data, address, res, temp, temp2);
else
- __user_swp_asm(*data, address, res, temp);
+ __user_swp_asm(*data, address, res, temp, temp2);
if (likely(res != -EAGAIN) || signal_pending(current))
break;
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 0150394f4cab..b75e917aac46 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -39,10 +39,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
}
-static void cpu_enable_trap_ctr_access(void *__unused)
+static int cpu_enable_trap_ctr_access(void *__unused)
{
/* Clear SCTLR_EL1.UCT */
config_sctlr_el1(SCTLR_EL1_UCT, 0);
+ return 0;
}
#define MIDR_RANGE(model, min, max) \
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d577f263cc4a..c02504ea304b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -19,7 +19,9 @@
#define pr_fmt(fmt) "CPU features: " fmt
#include <linux/bsearch.h>
+#include <linux/cpumask.h>
#include <linux/sort.h>
+#include <linux/stop_machine.h>
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
@@ -941,7 +943,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
for (; caps->matches; caps++)
if (caps->enable && cpus_have_cap(caps->capability))
- on_each_cpu(caps->enable, NULL, true);
+ /*
+ * Use stop_machine() as it schedules the work allowing
+ * us to modify PSTATE, instead of on_each_cpu() which
+ * uses an IPI, giving us a PSTATE that disappears when
+ * we return.
+ */
+ stop_machine(caps->enable, NULL, cpu_online_mask);
}
/*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 427f6d3f084c..332e33193ccf 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -586,8 +586,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
b.lt 4f // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
- msr mdcr_el2, x0 // all PMU counters from EL1
4:
+ csel x0, xzr, x0, lt // all PMU counters from EL1
+ msr mdcr_el2, x0 // (if they exist)
/* Stage-2 translation */
msr vttbr_el2, xzr
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 27b2f1387df4..01753cd7d3f0 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -49,6 +49,7 @@
#include <asm/alternative.h>
#include <asm/compat.h>
#include <asm/cacheflush.h>
+#include <asm/exec.h>
#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
@@ -186,10 +187,19 @@ void __show_regs(struct pt_regs *regs)
printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
regs->pc, lr, regs->pstate);
printk("sp : %016llx\n", sp);
- for (i = top_reg; i >= 0; i--) {
+
+ i = top_reg;
+
+ while (i >= 0) {
printk("x%-2d: %016llx ", i, regs->regs[i]);
- if (i % 2 == 0)
- printk("\n");
+ i--;
+
+ if (i % 2 == 0) {
+ pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
+ i--;
+ }
+
+ pr_cont("\n");
}
printk("\n");
}
@@ -301,7 +311,7 @@ static void tls_thread_switch(struct task_struct *next)
}
/* Restore the UAO state depending on next's addr_limit */
-static void uao_thread_switch(struct task_struct *next)
+void uao_thread_switch(struct task_struct *next)
{
if (IS_ENABLED(CONFIG_ARM64_UAO)) {
if (task_thread_info(next)->addr_limit == KERNEL_DS)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index b8799e7c79de..1bec41b5fda3 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -135,7 +135,7 @@ ENTRY(_cpu_resume)
#ifdef CONFIG_KASAN
mov x0, sp
- bl kasan_unpoison_remaining_stack
+ bl kasan_unpoison_task_stack_below
#endif
ldp x19, x20, [x29, #16]
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index d3f151cfd4a1..8507703dabe4 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -544,6 +544,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
return;
}
bootcpu_valid = true;
+ early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid));
return;
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index ad734142070d..bb0cd787a9d3 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,8 +1,11 @@
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <asm/alternative.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
+#include <asm/exec.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
@@ -50,6 +53,14 @@ void notrace __cpu_suspend_exit(void)
set_my_cpu_offset(per_cpu_offset(cpu));
/*
+ * PSTATE was not saved over suspend/resume, re-enable any detected
+ * features that might not have been set correctly.
+ */
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
+ CONFIG_ARM64_PAN));
+ uao_thread_switch(current);
+
+ /*
* Restore HW breakpoint registers to sane values
* before debug exceptions are possibly reenabled
* through local_dbg_restore.
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5ff020f8fb7f..c9986b3e0a96 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -428,24 +428,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
}
-void cpu_enable_cache_maint_trap(void *__unused)
+int cpu_enable_cache_maint_trap(void *__unused)
{
config_sctlr_el1(SCTLR_EL1_UCI, 0);
+ return 0;
}
#define __user_cache_maint(insn, address, res) \
- asm volatile ( \
- "1: " insn ", %1\n" \
- " mov %w0, #0\n" \
- "2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %w0, %w2\n" \
- " b 2b\n" \
- " .popsection\n" \
- _ASM_EXTABLE(1b, 3b) \
- : "=r" (res) \
- : "r" (address), "i" (-EFAULT) )
+ if (untagged_addr(address) >= user_addr_max()) \
+ res = -EFAULT; \
+ else \
+ asm volatile ( \
+ "1: " insn ", %1\n" \
+ " mov %w0, #0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %w0, %w2\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (res) \
+ : "r" (address), "i" (-EFAULT) )
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 0b90497d4424..4fd67ea03bb0 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -79,11 +79,6 @@ ENDPROC(__arch_copy_from_user)
.section .fixup,"ax"
.align 2
-9998:
- sub x0, end, dst
-9999:
- strb wzr, [dst], #1 // zero remaining buffer space
- cmp dst, end
- b.lo 9999b
+9998: sub x0, end, dst // bytes not copied
ret
.previous
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 53d9159662fe..0f8788374815 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -29,7 +29,9 @@
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/perf_event.h>
+#include <linux/preempt.h>
+#include <asm/bug.h>
#include <asm/cpufeature.h>
#include <asm/exception.h>
#include <asm/debug-monitors.h>
@@ -670,9 +672,17 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
NOKPROBE_SYMBOL(do_debug_exception);
#ifdef CONFIG_ARM64_PAN
-void cpu_enable_pan(void *__unused)
+int cpu_enable_pan(void *__unused)
{
+ /*
+ * We modify PSTATE. This won't work from irq context as the PSTATE
+ * is discarded once we return from the exception.
+ */
+ WARN_ON_ONCE(in_interrupt());
+
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+ asm(SET_PSTATE_PAN(1));
+ return 0;
}
#endif /* CONFIG_ARM64_PAN */
@@ -683,8 +693,9 @@ void cpu_enable_pan(void *__unused)
* We need to enable the feature at runtime (instead of adding it to
* PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
*/
-void cpu_enable_uao(void *__unused)
+int cpu_enable_uao(void *__unused)
{
asm(SET_PSTATE_UAO(1));
+ return 0;
}
#endif /* CONFIG_ARM64_UAO */
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 21c489bdeb4e..212c4d1e2f26 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -421,35 +421,35 @@ void __init mem_init(void)
pr_notice("Virtual kernel memory layout:\n");
#ifdef CONFIG_KASAN
- pr_cont(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
+ pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
#endif
- pr_cont(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
+ pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
MLM(MODULES_VADDR, MODULES_END));
- pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
+ pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
MLG(VMALLOC_START, VMALLOC_END));
- pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(_text, _etext));
- pr_cont(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(__start_rodata, __init_begin));
- pr_cont(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(__init_begin, __init_end));
- pr_cont(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(_sdata, _edata));
- pr_cont(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(__bss_start, __bss_stop));
- pr_cont(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
+ pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
MLK(FIXADDR_START, FIXADDR_TOP));
- pr_cont(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
+ pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
MLM(PCI_IO_START, PCI_IO_END));
#ifdef CONFIG_SPARSEMEM_VMEMMAP
- pr_cont(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
+ pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE));
- pr_cont(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
+ pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
(unsigned long)virt_to_page(high_memory)));
#endif
- pr_cont(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
+ pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
MLM(__phys_to_virt(memblock_start_of_DRAM()),
(unsigned long)high_memory));
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 778a985c8a70..4b32168cf91a 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -147,7 +147,7 @@ static int __init early_cpu_to_node(int cpu)
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
- return node_distance(from, to);
+ return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
}
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
@@ -223,8 +223,11 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
void *nd;
int tnid;
- pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
- nid, start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
+ if (start_pfn < end_pfn)
+ pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
+ start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
+ else
+ pr_info("Initmem setup node %d [<memory-less node>]\n", nid);
nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
nd = __va(nd_pa);
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index 0a2a70096d8b..0eff88aa6d6a 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -163,18 +163,29 @@ static inline int bad_user_access_length(void)
: "a" (__ptr(ptr))); \
})
-#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
#define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user
static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ memcpy(to, (const void __force *)from, n);
+ return 0;
+}
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ memcpy((void __force *)to, from, n);
+ SSYNC();
+ return 0;
+}
+
+static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (likely(access_ok(VERIFY_READ, from, n))) {
- memcpy(to, (const void __force *)from, n);
- return 0;
- }
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ return __copy_from_user(to, from, n);
memset(to, 0, n);
return n;
}
@@ -182,12 +193,9 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
- memcpy((void __force *)to, from, n);
- else
- return n;
- SSYNC();
- return 0;
+ if (likely(access_ok(VERIFY_WRITE, to, n)))
+ return __copy_to_user(to, from, n);
+ return n;
}
/*
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 8b8fe671b1a6..8d79286ee4e8 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -271,7 +271,7 @@ long arch_ptrace(struct task_struct *child, long request,
case BFIN_MEM_ACCESS_CORE:
case BFIN_MEM_ACCESS_CORE_ONLY:
copied = access_process_vm(child, addr, &tmp,
- to_copy, 0);
+ to_copy, FOLL_FORCE);
if (copied)
break;
@@ -324,7 +324,8 @@ long arch_ptrace(struct task_struct *child, long request,
case BFIN_MEM_ACCESS_CORE:
case BFIN_MEM_ACCESS_CORE_ONLY:
copied = access_process_vm(child, addr, &data,
- to_copy, 1);
+ to_copy,
+ FOLL_FORCE | FOLL_WRITE);
break;
case BFIN_MEM_ACCESS_DMA:
if (safe_dma_memcpy(paddr, &data, to_copy))
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index b5698c876fcc..0068fd411a84 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2722,7 +2722,6 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
noinpages,
0, /* read access only for in data */
- 0, /* no force */
inpages,
NULL);
@@ -2736,8 +2735,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
if (oper.do_cipher){
err = get_user_pages((unsigned long int)oper.cipher_outdata,
nooutpages,
- 1, /* write access for out data */
- 0, /* no force */
+ FOLL_WRITE, /* write access for out data */
outpages,
NULL);
up_read(&current->mm->mmap_sem);
@@ -3151,7 +3149,7 @@ static void print_dma_descriptors(struct cryptocop_int_operation *iop)
printk("print_dma_descriptors start\n");
printk("iop:\n");
- printk("\tsid: 0x%lld\n", iop->sid);
+ printk("\tsid: 0x%llx\n", iop->sid);
printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f085229cf870..f0df654ac6fc 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -147,7 +147,7 @@ long arch_ptrace(struct task_struct *child, long request,
/* The trampoline page is globally mapped, no page table to traverse.*/
tmp = *(unsigned long*)addr;
} else {
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
break;
@@ -279,7 +279,7 @@ static int insn_size(struct task_struct *child, unsigned long pc)
int opsize = 0;
/* Read the opcode at pc (do what PTRACE_PEEKTEXT would do). */
- copied = access_process_vm(child, pc, &opcode, sizeof(opcode), 0);
+ copied = access_process_vm(child, pc, &opcode, sizeof(opcode), FOLL_FORCE);
if (copied != sizeof(opcode))
return 0;
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
index b408fe660cf8..3cef06875f5c 100644
--- a/arch/h8300/include/asm/thread_info.h
+++ b/arch/h8300/include/asm/thread_info.h
@@ -31,7 +31,6 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit;
- struct restart_block restart_block;
};
/*
@@ -44,9 +43,6 @@ struct thread_info {
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .restart_block = { \
- .fn = do_no_restart_syscall, \
- }, \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index ad1f81f574e5..7138303cbbf2 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
unsigned int er0;
/* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
+ current->restart_block.fn = do_no_restart_syscall;
/* restore passed registers */
#define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
diff --git a/arch/ia64/hp/sim/boot/Makefile b/arch/ia64/hp/sim/boot/Makefile
index 2e805e0cc560..df6e9968c845 100644
--- a/arch/ia64/hp/sim/boot/Makefile
+++ b/arch/ia64/hp/sim/boot/Makefile
@@ -33,5 +33,5 @@ $(obj)/vmlinux.bin: vmlinux FORCE
LDFLAGS_bootloader = -static -T
$(obj)/bootloader: $(src)/bootloader.lds $(obj)/bootloader.o $(obj)/boot_head.o $(obj)/fw-emu.o \
- lib/lib.a arch/ia64/lib/built-in.o arch/ia64/lib/lib.a FORCE
+ lib/lib.a arch/ia64/lib/lib.a FORCE
$(call if_changed,ld)
diff --git a/arch/ia64/include/asm/export.h b/arch/ia64/include/asm/export.h
new file mode 100644
index 000000000000..ad18c6583252
--- /dev/null
+++ b/arch/ia64/include/asm/export.h
@@ -0,0 +1,3 @@
+/* EXPORT_DATA_SYMBOL != EXPORT_SYMBOL here */
+#define KSYM_FUNC(name) @fptr(name)
+#include <asm-generic/export.h>
diff --git a/arch/ia64/include/asm/libata-portmap.h b/arch/ia64/include/asm/libata-portmap.h
index 0e00c9a9f410..7a1f8310596b 100644
--- a/arch/ia64/include/asm/libata-portmap.h
+++ b/arch/ia64/include/asm/libata-portmap.h
@@ -1,12 +1,8 @@
#ifndef __ASM_IA64_LIBATA_PORTMAP_H
#define __ASM_IA64_LIBATA_PORTMAP_H
-#define ATA_PRIMARY_CMD 0x1F0
-#define ATA_PRIMARY_CTL 0x3F6
#define ATA_PRIMARY_IRQ(dev) isa_irq_to_vector(14)
-#define ATA_SECONDARY_CMD 0x170
-#define ATA_SECONDARY_CTL 0x376
#define ATA_SECONDARY_IRQ(dev) isa_irq_to_vector(15)
#endif
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index cfaa7b25084c..6f27a663177c 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -48,6 +48,7 @@
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/ftrace.h>
+#include <asm/export.h>
#include "minstate.h"
@@ -1345,12 +1346,14 @@ GLOBAL_ENTRY(unw_init_running)
mov rp=loc0
br.ret.sptk.many rp
END(unw_init_running)
+EXPORT_SYMBOL(unw_init_running)
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
GLOBAL_ENTRY(_mcount)
br ftrace_stub
END(_mcount)
+EXPORT_SYMBOL(_mcount)
.here:
br.ret.sptk.many b0
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 09f845793d12..5ed0ea92c5bf 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
u64 virt_addr=simple_strtoull(buf, NULL, 16);
int ret;
- ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL);
+ ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
if (ret<=0) {
#ifdef ERR_INJ_DEBUG
printk("Virtual address %lx is not existing.\n",virt_addr);
diff --git a/arch/ia64/kernel/esi_stub.S b/arch/ia64/kernel/esi_stub.S
index 6b3d6c1f99b6..2c369bf77c4b 100644
--- a/arch/ia64/kernel/esi_stub.S
+++ b/arch/ia64/kernel/esi_stub.S
@@ -35,6 +35,7 @@
#include <asm/processor.h>
#include <asm/asmmacro.h>
+#include <asm/export.h>
/*
* Inputs:
@@ -94,3 +95,4 @@ GLOBAL_ENTRY(esi_call_phys)
mov gp=loc2
br.ret.sptk.many rp
END(esi_call_phys)
+EXPORT_SYMBOL_GPL(esi_call_phys)
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index bb748c596443..c9b5e942f671 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -32,6 +32,7 @@
#include <asm/mca_asm.h>
#include <linux/init.h>
#include <linux/linkage.h>
+#include <asm/export.h>
#ifdef CONFIG_HOTPLUG_CPU
#define SAL_PSR_BITS_TO_SET \
@@ -168,6 +169,7 @@ RestRR: \
__PAGE_ALIGNED_DATA
.global empty_zero_page
+EXPORT_DATA_SYMBOL_GPL(empty_zero_page)
empty_zero_page:
.skip PAGE_SIZE
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 096731049538..d111248af719 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -1,101 +1,11 @@
/*
* Architecture-specific kernel symbols
- *
- * Don't put any exports here unless it's defined in an assembler file.
- * All other exports should be put directly after the definition.
*/
-#include <linux/module.h>
-
-#include <linux/string.h>
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(strlen);
-
-#include <asm/pgtable.h>
-EXPORT_SYMBOL_GPL(empty_zero_page);
-
-#include <asm/checksum.h>
-EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
-EXPORT_SYMBOL(csum_ipv6_magic);
-
-#include <asm/page.h>
-EXPORT_SYMBOL(clear_page);
-EXPORT_SYMBOL(copy_page);
-
#ifdef CONFIG_VIRTUAL_MEM_MAP
+#include <linux/compiler.h>
+#include <linux/export.h>
#include <linux/bootmem.h>
EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */
EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */
#endif
-
-#include <asm/processor.h>
-EXPORT_SYMBOL(ia64_cpu_info);
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(local_per_cpu_offset);
-#endif
-
-#include <asm/uaccess.h>
-EXPORT_SYMBOL(__copy_user);
-EXPORT_SYMBOL(__do_clear_user);
-EXPORT_SYMBOL(__strlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
-
-/* from arch/ia64/lib */
-extern void __divsi3(void);
-extern void __udivsi3(void);
-extern void __modsi3(void);
-extern void __umodsi3(void);
-extern void __divdi3(void);
-extern void __udivdi3(void);
-extern void __moddi3(void);
-extern void __umoddi3(void);
-
-EXPORT_SYMBOL(__divsi3);
-EXPORT_SYMBOL(__udivsi3);
-EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__umodsi3);
-EXPORT_SYMBOL(__divdi3);
-EXPORT_SYMBOL(__udivdi3);
-EXPORT_SYMBOL(__moddi3);
-EXPORT_SYMBOL(__umoddi3);
-
-#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
-extern void xor_ia64_2(void);
-extern void xor_ia64_3(void);
-extern void xor_ia64_4(void);
-extern void xor_ia64_5(void);
-
-EXPORT_SYMBOL(xor_ia64_2);
-EXPORT_SYMBOL(xor_ia64_3);
-EXPORT_SYMBOL(xor_ia64_4);
-EXPORT_SYMBOL(xor_ia64_5);
-#endif
-
-#include <asm/pal.h>
-EXPORT_SYMBOL(ia64_pal_call_phys_stacked);
-EXPORT_SYMBOL(ia64_pal_call_phys_static);
-EXPORT_SYMBOL(ia64_pal_call_stacked);
-EXPORT_SYMBOL(ia64_pal_call_static);
-EXPORT_SYMBOL(ia64_load_scratch_fpregs);
-EXPORT_SYMBOL(ia64_save_scratch_fpregs);
-
-#include <asm/unwind.h>
-EXPORT_SYMBOL(unw_init_running);
-
-#if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE)
-extern void esi_call_phys (void);
-EXPORT_SYMBOL_GPL(esi_call_phys);
-#endif
-extern char ia64_ivt[];
-EXPORT_SYMBOL(ia64_ivt);
-
-#include <asm/ftrace.h>
-#ifdef CONFIG_FUNCTION_TRACER
-/* mcount is defined in assembly */
-EXPORT_SYMBOL(_mcount);
-#endif
-
-#include <asm/cacheflush.h>
-EXPORT_SYMBOL_GPL(flush_icache_range);
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index b1c3cfc93e71..44a103a5de2b 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -57,6 +57,7 @@
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/errno.h>
+#include <asm/export.h>
#if 0
# define PSR_DEFAULT_BITS psr.ac
@@ -85,6 +86,7 @@
.align 32768 // align on 32KB boundary
.global ia64_ivt
+ EXPORT_DATA_SYMBOL(ia64_ivt)
ia64_ivt:
/////////////////////////////////////////////////////////////////////////////////////////
// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
index 0b533441c3c9..94fb2e395498 100644
--- a/arch/ia64/kernel/pal.S
+++ b/arch/ia64/kernel/pal.S
@@ -14,6 +14,7 @@
#include <asm/asmmacro.h>
#include <asm/processor.h>
+#include <asm/export.h>
.data
pal_entry_point:
@@ -87,6 +88,7 @@ GLOBAL_ENTRY(ia64_pal_call_static)
srlz.d // seralize restoration of psr.l
br.ret.sptk.many b0
END(ia64_pal_call_static)
+EXPORT_SYMBOL(ia64_pal_call_static)
/*
* Make a PAL call using the stacked registers calling convention.
@@ -122,6 +124,7 @@ GLOBAL_ENTRY(ia64_pal_call_stacked)
srlz.d // serialize restoration of psr.l
br.ret.sptk.many b0
END(ia64_pal_call_stacked)
+EXPORT_SYMBOL(ia64_pal_call_stacked)
/*
* Make a physical mode PAL call using the static registers calling convention.
@@ -193,6 +196,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
srlz.d // seralize restoration of psr.l
br.ret.sptk.many b0
END(ia64_pal_call_phys_static)
+EXPORT_SYMBOL(ia64_pal_call_phys_static)
/*
* Make a PAL call using the stacked registers in physical mode.
@@ -250,6 +254,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
srlz.d // seralize restoration of psr.l
br.ret.sptk.many b0
END(ia64_pal_call_phys_stacked)
+EXPORT_SYMBOL(ia64_pal_call_phys_stacked)
/*
* Save scratch fp scratch regs which aren't saved in pt_regs already
@@ -275,6 +280,7 @@ GLOBAL_ENTRY(ia64_save_scratch_fpregs)
stf.spill [r2] = f15,32
br.ret.sptk.many rp
END(ia64_save_scratch_fpregs)
+EXPORT_SYMBOL(ia64_save_scratch_fpregs)
/*
* Load scratch fp scratch regs (fp10-fp15)
@@ -296,3 +302,4 @@ GLOBAL_ENTRY(ia64_load_scratch_fpregs)
ldf.fill f15 = [r2],32
br.ret.sptk.many rp
END(ia64_load_scratch_fpregs)
+EXPORT_SYMBOL(ia64_load_scratch_fpregs)
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 6f54d511cc50..31aa8c0f68e1 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -453,7 +453,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
return 0;
}
}
- copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
+ copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
if (copied != sizeof(ret))
return -EIO;
*val = ret;
@@ -489,7 +489,8 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
*ia64_rse_skip_regs(krbs, regnum) = val;
}
}
- } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
+ } else if (access_process_vm(child, addr, &val, sizeof(val),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(val))
return -EIO;
return 0;
@@ -543,7 +544,8 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
if (ret < 0)
return ret;
- if (access_process_vm(child, addr, &val, sizeof(val), 1)
+ if (access_process_vm(child, addr, &val, sizeof(val),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(val))
return -EIO;
}
@@ -559,7 +561,8 @@ ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
/* now copy word for word from user rbs to kernel rbs: */
for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
- if (access_process_vm(child, addr, &val, sizeof(val), 0)
+ if (access_process_vm(child, addr, &val, sizeof(val),
+ FOLL_FORCE)
!= sizeof(val))
return -EIO;
@@ -1156,7 +1159,8 @@ arch_ptrace (struct task_struct *child, long request,
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
/* read word at location addr */
- if (access_process_vm(child, addr, &data, sizeof(data), 0)
+ if (access_process_vm(child, addr, &data, sizeof(data),
+ FOLL_FORCE)
!= sizeof(data))
return -EIO;
/* ensure return value is not mistaken for error code */
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index afddb3e80a29..7ec7acc844c2 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -71,7 +71,11 @@ EXPORT_SYMBOL(__per_cpu_offset);
#endif
DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
+EXPORT_SYMBOL(ia64_cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(local_per_cpu_offset);
+#endif
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
struct screen_info screen_info;
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 98771e2a78af..1f3d3877618f 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -2,17 +2,15 @@
# Makefile for ia64-specific library routines..
#
-obj-y := io.o
-
-lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
+lib-y := io.o __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \
checksum.o clear_page.o csum_partial_copy.o \
clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \
flush.o ip_fast_csum.o do_csum.o \
memset.o strlen.o xor.o
-obj-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
-obj-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
+lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
+lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o
AFLAGS___divdi3.o =
diff --git a/arch/ia64/lib/clear_page.S b/arch/ia64/lib/clear_page.S
index 2d814e7ed191..3cf5b76e587f 100644
--- a/arch/ia64/lib/clear_page.S
+++ b/arch/ia64/lib/clear_page.S
@@ -11,6 +11,7 @@
#include <asm/asmmacro.h>
#include <asm/page.h>
+#include <asm/export.h>
#ifdef CONFIG_ITANIUM
# define L3_LINE_SIZE 64 // Itanium L3 line size
@@ -74,3 +75,4 @@ GLOBAL_ENTRY(clear_page)
mov ar.lc = saved_lc // restore lc
br.ret.sptk.many rp
END(clear_page)
+EXPORT_SYMBOL(clear_page)
diff --git a/arch/ia64/lib/clear_user.S b/arch/ia64/lib/clear_user.S
index eecd8577b209..7b40731ee5d8 100644
--- a/arch/ia64/lib/clear_user.S
+++ b/arch/ia64/lib/clear_user.S
@@ -12,6 +12,7 @@
*/
#include <asm/asmmacro.h>
+#include <asm/export.h>
//
// arguments
@@ -207,3 +208,4 @@ GLOBAL_ENTRY(__do_clear_user)
mov ar.lc=saved_lc
br.ret.sptk.many rp
END(__do_clear_user)
+EXPORT_SYMBOL(__do_clear_user)
diff --git a/arch/ia64/lib/copy_page.S b/arch/ia64/lib/copy_page.S
index 127d1d050d78..cbdb9e323ffb 100644
--- a/arch/ia64/lib/copy_page.S
+++ b/arch/ia64/lib/copy_page.S
@@ -16,6 +16,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/page.h>
+#include <asm/export.h>
#define PIPE_DEPTH 3
#define EPI p[PIPE_DEPTH-1]
@@ -96,3 +97,4 @@ GLOBAL_ENTRY(copy_page)
mov ar.lc=saved_lc
br.ret.sptk.many rp
END(copy_page)
+EXPORT_SYMBOL(copy_page)
diff --git a/arch/ia64/lib/copy_page_mck.S b/arch/ia64/lib/copy_page_mck.S
index 3c45d60a81b4..c13f69036876 100644
--- a/arch/ia64/lib/copy_page_mck.S
+++ b/arch/ia64/lib/copy_page_mck.S
@@ -61,6 +61,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/page.h>
+#include <asm/export.h>
#define PREFETCH_DIST 8 // McKinley sustains 16 outstanding L2 misses (8 ld, 8 st)
@@ -183,3 +184,4 @@ GLOBAL_ENTRY(copy_page)
mov pr = saved_pr, -1
br.ret.sptk.many rp
END(copy_page)
+EXPORT_SYMBOL(copy_page)
diff --git a/arch/ia64/lib/copy_user.S b/arch/ia64/lib/copy_user.S
index c952bdc6a093..66facd52e8d0 100644
--- a/arch/ia64/lib/copy_user.S
+++ b/arch/ia64/lib/copy_user.S
@@ -30,6 +30,7 @@
*/
#include <asm/asmmacro.h>
+#include <asm/export.h>
//
// Tuneable parameters
@@ -608,3 +609,4 @@ GLOBAL_ENTRY(__copy_user)
mov ar.pfs=saved_pfs
br.ret.sptk.many rp
END(__copy_user)
+EXPORT_SYMBOL(__copy_user)
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S
index 1d8c88860063..9a5a2f9fad13 100644
--- a/arch/ia64/lib/flush.S
+++ b/arch/ia64/lib/flush.S
@@ -8,6 +8,7 @@
*/
#include <asm/asmmacro.h>
+#include <asm/export.h>
/*
@@ -60,6 +61,7 @@ GLOBAL_ENTRY(flush_icache_range)
mov ar.lc=r3 // restore ar.lc
br.ret.sptk.many rp
END(flush_icache_range)
+EXPORT_SYMBOL_GPL(flush_icache_range)
/*
* clflush_cache_range(start,size)
diff --git a/arch/ia64/lib/idiv32.S b/arch/ia64/lib/idiv32.S
index c91b5b0129ff..715aed79a9ce 100644
--- a/arch/ia64/lib/idiv32.S
+++ b/arch/ia64/lib/idiv32.S
@@ -15,6 +15,7 @@
*/
#include <asm/asmmacro.h>
+#include <asm/export.h>
#ifdef MODULO
# define OP mod
@@ -81,3 +82,4 @@ GLOBAL_ENTRY(NAME)
getf.sig r8 = f6 // transfer result to result register
br.ret.sptk.many rp
END(NAME)
+EXPORT_SYMBOL(NAME)
diff --git a/arch/ia64/lib/idiv64.S b/arch/ia64/lib/idiv64.S
index 627573c4ceb1..25840f697753 100644
--- a/arch/ia64/lib/idiv64.S
+++ b/arch/ia64/lib/idiv64.S
@@ -15,6 +15,7 @@
*/
#include <asm/asmmacro.h>
+#include <asm/export.h>
#ifdef MODULO
# define OP mod
@@ -78,3 +79,4 @@ GLOBAL_ENTRY(NAME)
getf.sig r8 = f11 // transfer result to result register
br.ret.sptk.many rp
END(NAME)
+EXPORT_SYMBOL(NAME)
diff --git a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S
index 620d9dc5220f..648e0d4a4839 100644
--- a/arch/ia64/lib/ip_fast_csum.S
+++ b/arch/ia64/lib/ip_fast_csum.S
@@ -13,6 +13,7 @@
*/
#include <asm/asmmacro.h>
+#include <asm/export.h>
/*
* Since we know that most likely this function is called with buf aligned
@@ -92,6 +93,7 @@ GLOBAL_ENTRY(ip_fast_csum)
mov b0=r34
br.ret.sptk.many b0
END(ip_fast_csum)
+EXPORT_SYMBOL(ip_fast_csum)
GLOBAL_ENTRY(csum_ipv6_magic)
ld4 r20=[in0],4
@@ -142,3 +144,4 @@ GLOBAL_ENTRY(csum_ipv6_magic)
andcm r8=r9,r8
br.ret.sptk.many b0
END(csum_ipv6_magic)
+EXPORT_SYMBOL(csum_ipv6_magic)
diff --git a/arch/ia64/lib/memcpy.S b/arch/ia64/lib/memcpy.S
index 448908d80b69..ba172fd6acf4 100644
--- a/arch/ia64/lib/memcpy.S
+++ b/arch/ia64/lib/memcpy.S
@@ -14,6 +14,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/asmmacro.h>
+#include <asm/export.h>
GLOBAL_ENTRY(memcpy)
@@ -299,3 +300,4 @@ GLOBAL_ENTRY(memcpy)
COPY(56, 0)
END(memcpy)
+EXPORT_SYMBOL(memcpy)
diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S
index ab0f87639729..b264b6a7967b 100644
--- a/arch/ia64/lib/memcpy_mck.S
+++ b/arch/ia64/lib/memcpy_mck.S
@@ -15,6 +15,7 @@
*/
#include <asm/asmmacro.h>
#include <asm/page.h>
+#include <asm/export.h>
#define EK(y...) EX(y)
@@ -78,6 +79,7 @@ GLOBAL_ENTRY(memcpy)
br.cond.sptk .common_code
;;
END(memcpy)
+EXPORT_SYMBOL(memcpy)
GLOBAL_ENTRY(__copy_user)
.prologue
// check dest alignment
@@ -664,3 +666,4 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \
/* end of McKinley specific optimization */
END(__copy_user)
+EXPORT_SYMBOL(__copy_user)
diff --git a/arch/ia64/lib/memset.S b/arch/ia64/lib/memset.S
index f26c16aefb1c..87b974704075 100644
--- a/arch/ia64/lib/memset.S
+++ b/arch/ia64/lib/memset.S
@@ -18,6 +18,7 @@
to get peak speed when value = 0. */
#include <asm/asmmacro.h>
+#include <asm/export.h>
#undef ret
#define dest in0
@@ -360,3 +361,4 @@ GLOBAL_ENTRY(memset)
br.ret.sptk.many rp
}
END(memset)
+EXPORT_SYMBOL(memset)
diff --git a/arch/ia64/lib/strlen.S b/arch/ia64/lib/strlen.S
index e0cdac0a85b8..1a6e17c657b4 100644
--- a/arch/ia64/lib/strlen.S
+++ b/arch/ia64/lib/strlen.S
@@ -17,6 +17,7 @@
*/
#include <asm/asmmacro.h>
+#include <asm/export.h>
//
//
@@ -190,3 +191,4 @@ GLOBAL_ENTRY(strlen)
mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
br.ret.sptk.many rp // end of successful recovery code
END(strlen)
+EXPORT_SYMBOL(strlen)
diff --git a/arch/ia64/lib/strlen_user.S b/arch/ia64/lib/strlen_user.S
index c71eded4285e..9d257684e733 100644
--- a/arch/ia64/lib/strlen_user.S
+++ b/arch/ia64/lib/strlen_user.S
@@ -16,6 +16,7 @@
*/
#include <asm/asmmacro.h>
+#include <asm/export.h>
//
// int strlen_user(char *)
@@ -196,3 +197,4 @@ GLOBAL_ENTRY(__strlen_user)
mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
br.ret.sptk.many rp
END(__strlen_user)
+EXPORT_SYMBOL(__strlen_user)
diff --git a/arch/ia64/lib/strncpy_from_user.S b/arch/ia64/lib/strncpy_from_user.S
index a504381f31eb..ca9ccf280e2e 100644
--- a/arch/ia64/lib/strncpy_from_user.S
+++ b/arch/ia64/lib/strncpy_from_user.S
@@ -17,6 +17,7 @@
*/
#include <asm/asmmacro.h>
+#include <asm/export.h>
GLOBAL_ENTRY(__strncpy_from_user)
alloc r2=ar.pfs,3,0,0,0
@@ -42,3 +43,4 @@ GLOBAL_ENTRY(__strncpy_from_user)
[.Lexit:]
br.ret.sptk.many rp
END(__strncpy_from_user)
+EXPORT_SYMBOL(__strncpy_from_user)
diff --git a/arch/ia64/lib/strnlen_user.S b/arch/ia64/lib/strnlen_user.S
index d09066b1e49d..80a5dfd1d402 100644
--- a/arch/ia64/lib/strnlen_user.S
+++ b/arch/ia64/lib/strnlen_user.S
@@ -13,6 +13,7 @@
*/
#include <asm/asmmacro.h>
+#include <asm/export.h>
GLOBAL_ENTRY(__strnlen_user)
.prologue
@@ -43,3 +44,4 @@ GLOBAL_ENTRY(__strnlen_user)
mov ar.lc=r16 // restore ar.lc
br.ret.sptk.many rp
END(__strnlen_user)
+EXPORT_SYMBOL(__strnlen_user)
diff --git a/arch/ia64/lib/xor.S b/arch/ia64/lib/xor.S
index 54e3f7eab8e9..c83f1c410691 100644
--- a/arch/ia64/lib/xor.S
+++ b/arch/ia64/lib/xor.S
@@ -14,6 +14,7 @@
*/
#include <asm/asmmacro.h>
+#include <asm/export.h>
GLOBAL_ENTRY(xor_ia64_2)
.prologue
@@ -51,6 +52,7 @@ GLOBAL_ENTRY(xor_ia64_2)
mov pr = r29, -1
br.ret.sptk.few rp
END(xor_ia64_2)
+EXPORT_SYMBOL(xor_ia64_2)
GLOBAL_ENTRY(xor_ia64_3)
.prologue
@@ -91,6 +93,7 @@ GLOBAL_ENTRY(xor_ia64_3)
mov pr = r29, -1
br.ret.sptk.few rp
END(xor_ia64_3)
+EXPORT_SYMBOL(xor_ia64_3)
GLOBAL_ENTRY(xor_ia64_4)
.prologue
@@ -134,6 +137,7 @@ GLOBAL_ENTRY(xor_ia64_4)
mov pr = r29, -1
br.ret.sptk.few rp
END(xor_ia64_4)
+EXPORT_SYMBOL(xor_ia64_4)
GLOBAL_ENTRY(xor_ia64_5)
.prologue
@@ -182,3 +186,4 @@ GLOBAL_ENTRY(xor_ia64_5)
mov pr = r29, -1
br.ret.sptk.few rp
END(xor_ia64_5)
+EXPORT_SYMBOL(xor_ia64_5)
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 51f5e9aa4901..c145605a981f 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -493,7 +493,8 @@ unregister_all_debug_traps(struct task_struct *child)
int i;
for (i = 0; i < p->nr_trap; i++)
- access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
+ access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]),
+ FOLL_FORCE | FOLL_WRITE);
p->nr_trap = 0;
}
@@ -537,7 +538,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc)
unsigned long next_insn, code;
unsigned long addr = next_pc & ~3;
- if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
+ if (access_process_vm(child, addr, &next_insn, sizeof(next_insn),
+ FOLL_FORCE)
!= sizeof(next_insn)) {
return -1; /* error */
}
@@ -546,7 +548,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc)
if (register_debug_trap(child, next_pc, next_insn, &code)) {
return -1; /* error */
}
- if (access_process_vm(child, addr, &code, sizeof(code), 1)
+ if (access_process_vm(child, addr, &code, sizeof(code),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(code)) {
return -1; /* error */
}
@@ -562,7 +565,8 @@ withdraw_debug_trap(struct pt_regs *regs)
addr = (regs->bpc - 2) & ~3;
regs->bpc -= 2;
if (unregister_debug_trap(current, addr, &code)) {
- access_process_vm(current, addr, &code, sizeof(code), 1);
+ access_process_vm(current, addr, &code, sizeof(code),
+ FOLL_FORCE | FOLL_WRITE);
invalidate_cache();
}
}
@@ -589,7 +593,8 @@ void user_enable_single_step(struct task_struct *child)
/* Compute next pc. */
pc = get_stack_long(child, PT_BPC);
- if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
+ if (access_process_vm(child, pc&~3, &insn, sizeof(insn),
+ FOLL_FORCE)
!= sizeof(insn))
return;
diff --git a/arch/m68k/include/asm/export.h b/arch/m68k/include/asm/export.h
new file mode 100644
index 000000000000..0af20f48bd07
--- /dev/null
+++ b/arch/m68k/include/asm/export.h
@@ -0,0 +1,3 @@
+#define KSYM_ALIGN 2
+#define KCRC_ALIGN 2
+#include <asm-generic/export.h>
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index 8a1c4d3f91c8..74c898ced8cc 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -13,7 +13,7 @@ extra-$(CONFIG_SUN3X) := head.o
extra-$(CONFIG_SUN3) := sun3-head.o
extra-y += vmlinux.lds
-obj-y := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o
+obj-y := entry.o irq.o module.o process.o ptrace.o
obj-y += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o
obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
deleted file mode 100644
index 774c1bd59c36..000000000000
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ /dev/null
@@ -1,32 +0,0 @@
-#include <linux/module.h>
-
-asmlinkage long long __ashldi3 (long long, int);
-asmlinkage long long __ashrdi3 (long long, int);
-asmlinkage long long __lshrdi3 (long long, int);
-asmlinkage long long __muldi3 (long long, long long);
-
-/* The following are special because they're not called
- explicitly (the C compiler generates them). Fortunately,
- their interface isn't gonna change any time soon now, so
- it's OK to leave it out of version control. */
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__muldi3);
-
-#if defined(CONFIG_CPU_HAS_NO_MULDIV64)
-/*
- * Simpler 68k and ColdFire parts also need a few other gcc functions.
- */
-extern long long __divsi3(long long, long long);
-extern long long __modsi3(long long, long long);
-extern long long __mulsi3(long long, long long);
-extern long long __udivsi3(long long, long long);
-extern long long __umodsi3(long long, long long);
-
-EXPORT_SYMBOL(__divsi3);
-EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__mulsi3);
-EXPORT_SYMBOL(__udivsi3);
-EXPORT_SYMBOL(__umodsi3);
-#endif
diff --git a/arch/m68k/lib/ashldi3.c b/arch/m68k/lib/ashldi3.c
index 37234c2df47f..8dffd36ec4f2 100644
--- a/arch/m68k/lib/ashldi3.c
+++ b/arch/m68k/lib/ashldi3.c
@@ -13,6 +13,9 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. */
+#include <linux/compiler.h>
+#include <linux/export.h>
+
#define BITS_PER_UNIT 8
typedef int SItype __attribute__ ((mode (SI)));
@@ -55,3 +58,4 @@ __ashldi3 (DItype u, word_type b)
return w.ll;
}
+EXPORT_SYMBOL(__ashldi3);
diff --git a/arch/m68k/lib/ashrdi3.c b/arch/m68k/lib/ashrdi3.c
index 1d59345f36c6..e6565a3ee2c3 100644
--- a/arch/m68k/lib/ashrdi3.c
+++ b/arch/m68k/lib/ashrdi3.c
@@ -13,6 +13,9 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. */
+#include <linux/compiler.h>
+#include <linux/export.h>
+
#define BITS_PER_UNIT 8
typedef int SItype __attribute__ ((mode (SI)));
@@ -56,3 +59,4 @@ __ashrdi3 (DItype u, word_type b)
return w.ll;
}
+EXPORT_SYMBOL(__ashrdi3);
diff --git a/arch/m68k/lib/divsi3.S b/arch/m68k/lib/divsi3.S
index 2c0ec85ac661..3a2143f51631 100644
--- a/arch/m68k/lib/divsi3.S
+++ b/arch/m68k/lib/divsi3.S
@@ -33,6 +33,8 @@ General Public License for more details. */
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
+#include <asm/export.h>
+
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
@@ -118,3 +120,4 @@ L2: movel d1, sp@-
L3: movel sp@+, d2
rts
+ EXPORT_SYMBOL(__divsi3)
diff --git a/arch/m68k/lib/lshrdi3.c b/arch/m68k/lib/lshrdi3.c
index 49e1ec8f2cc2..039779737c7d 100644
--- a/arch/m68k/lib/lshrdi3.c
+++ b/arch/m68k/lib/lshrdi3.c
@@ -13,6 +13,9 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. */
+#include <linux/compiler.h>
+#include <linux/export.h>
+
#define BITS_PER_UNIT 8
typedef int SItype __attribute__ ((mode (SI)));
@@ -55,3 +58,4 @@ __lshrdi3 (DItype u, word_type b)
return w.ll;
}
+EXPORT_SYMBOL(__lshrdi3);
diff --git a/arch/m68k/lib/modsi3.S b/arch/m68k/lib/modsi3.S
index 1d9e0efdf31d..1c967649a4e0 100644
--- a/arch/m68k/lib/modsi3.S
+++ b/arch/m68k/lib/modsi3.S
@@ -33,6 +33,8 @@ General Public License for more details. */
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
+#include <asm/export.h>
+
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
@@ -106,3 +108,4 @@ SYM (__modsi3):
movel d1, d0
rts
+ EXPORT_SYMBOL(__modsi3)
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 9006d15b8721..6459af5b2af0 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -14,6 +14,9 @@ but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. */
+#include <linux/compiler.h>
+#include <linux/export.h>
+
#ifdef CONFIG_CPU_HAS_NO_MULDIV64
#define SI_TYPE_SIZE 32
@@ -90,3 +93,4 @@ __muldi3 (DItype u, DItype v)
return w.ll;
}
+EXPORT_SYMBOL(__muldi3);
diff --git a/arch/m68k/lib/mulsi3.S b/arch/m68k/lib/mulsi3.S
index c39ad4e738e9..855675e69a8a 100644
--- a/arch/m68k/lib/mulsi3.S
+++ b/arch/m68k/lib/mulsi3.S
@@ -32,7 +32,7 @@ General Public License for more details. */
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
-
+#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
@@ -102,4 +102,4 @@ SYM (__mulsi3):
addl d1, d0
rts
-
+ EXPORT_SYMBOL(__mulsi3)
diff --git a/arch/m68k/lib/udivsi3.S b/arch/m68k/lib/udivsi3.S
index 35a5446572a5..78440ae513bf 100644
--- a/arch/m68k/lib/udivsi3.S
+++ b/arch/m68k/lib/udivsi3.S
@@ -32,7 +32,7 @@ General Public License for more details. */
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
-
+#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
@@ -154,4 +154,4 @@ L2: subql IMM (1),d4
unlk a6 | and return
rts
#endif /* __mcf5200__ || __mcoldfire__ */
-
+ EXPORT_SYMBOL(__udivsi3)
diff --git a/arch/m68k/lib/umodsi3.S b/arch/m68k/lib/umodsi3.S
index 099da514a8fd..b6fd11f58948 100644
--- a/arch/m68k/lib/umodsi3.S
+++ b/arch/m68k/lib/umodsi3.S
@@ -32,7 +32,7 @@ General Public License for more details. */
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
-
+#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
@@ -105,4 +105,4 @@ SYM (__umodsi3):
subl d0, d1 /* d1 = a - (a/b)*b */
movel d1, d0
rts
-
+ EXPORT_SYMBOL(__umodsi3)
diff --git a/arch/metag/include/asm/atomic.h b/arch/metag/include/asm/atomic.h
index 470e365f04ea..8ff0a70865f6 100644
--- a/arch/metag/include/asm/atomic.h
+++ b/arch/metag/include/asm/atomic.h
@@ -39,11 +39,10 @@
#define atomic_dec(v) atomic_sub(1, (v))
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
#endif
-#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
-
#include <asm-generic/atomic64.h>
#endif /* __ASM_METAG_ATOMIC_H */
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index c5cd63a4b6d5..f5f1bdb292de 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -11,6 +11,7 @@ platforms += cavium-octeon
platforms += cobalt
platforms += dec
platforms += emma
+platforms += generic
platforms += jazz
platforms += jz4740
platforms += lantiq
@@ -18,7 +19,6 @@ platforms += lasat
platforms += loongson32
platforms += loongson64
platforms += mti-malta
-platforms += mti-sead3
platforms += netlogic
platforms += paravirt
platforms += pic32
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 1a322c807f22..b3c5bde43d34 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -65,6 +65,7 @@ config MIPS
select HANDLE_DOMAIN_IRQ
select HAVE_EXIT_THREAD
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_ARCH_HARDENED_USERCOPY
menu "Machine selection"
@@ -72,6 +73,57 @@ choice
prompt "System type"
default SGI_IP22
+config MIPS_GENERIC
+ bool "Generic board-agnostic MIPS kernel"
+ select BOOT_RAW
+ select BUILTIN_DTB
+ select CEVT_R4K
+ select CLKSRC_MIPS_GIC
+ select COMMON_CLK
+ select CPU_MIPSR2_IRQ_VI
+ select CPU_MIPSR2_IRQ_EI
+ select CSRC_R4K
+ select DMA_PERDEV_COHERENT
+ select HW_HAS_PCI
+ select IRQ_MIPS_CPU
+ select LIBFDT
+ select MIPS_CPU_SCACHE
+ select MIPS_GIC
+ select MIPS_L1_CACHE_SHIFT_7
+ select NO_EXCEPT_FILL
+ select PCI_DRIVERS_GENERIC
+ select PINCTRL
+ select SMP_UP if SMP
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_HAS_CPU_MIPS32_R6
+ select SYS_HAS_CPU_MIPS64_R1
+ select SYS_HAS_CPU_MIPS64_R2
+ select SYS_HAS_CPU_MIPS64_R6
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_MICROMIPS
+ select SYS_SUPPORTS_MIPS_CPS
+ select SYS_SUPPORTS_MIPS16
+ select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_RELOCATABLE
+ select SYS_SUPPORTS_SMARTMIPS
+ select USB_EHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
+ select USB_EHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
+ select USB_OHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
+ select USB_OHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
+ select USB_UHCI_BIG_ENDIAN_DESC if BIG_ENDIAN
+ select USB_UHCI_BIG_ENDIAN_MMIO if BIG_ENDIAN
+ select USE_OF
+ help
+ Select this to build a kernel which aims to support multiple boards,
+ generally using a flattened device tree passed from the bootloader
+ using the boot protocol defined in the UHI (Unified Hosting
+ Interface) specification.
+
config MIPS_ALCHEMY
bool "Alchemy processor based machines"
select ARCH_PHYS_ADDR_T_64BIT
@@ -478,6 +530,7 @@ config MIPS_MALTA
select SYS_SUPPORTS_ZBOOT
select SYS_SUPPORTS_RELOCATABLE
select USE_OF
+ select LIBFDT
select ZONE_DMA32 if 64BIT
select BUILTIN_DTB
select LIBFDT
@@ -493,42 +546,6 @@ config MACH_PIC32
Microchip PIC32 is a family of general-purpose 32 bit MIPS core
microcontrollers.
-config MIPS_SEAD3
- bool "MIPS SEAD3 board"
- select BOOT_ELF32
- select BOOT_RAW
- select BUILTIN_DTB
- select CEVT_R4K
- select CSRC_R4K
- select CLKSRC_MIPS_GIC
- select COMMON_CLK
- select CPU_MIPSR2_IRQ_VI
- select CPU_MIPSR2_IRQ_EI
- select DMA_NONCOHERENT
- select IRQ_MIPS_CPU
- select MIPS_GIC
- select LIBFDT
- select MIPS_MSC
- select SYS_HAS_CPU_MIPS32_R1
- select SYS_HAS_CPU_MIPS32_R2
- select SYS_HAS_CPU_MIPS32_R6
- select SYS_HAS_CPU_MIPS64_R1
- select SYS_HAS_EARLY_PRINTK
- select SYS_SUPPORTS_32BIT_KERNEL
- select SYS_SUPPORTS_64BIT_KERNEL
- select SYS_SUPPORTS_BIG_ENDIAN
- select SYS_SUPPORTS_LITTLE_ENDIAN
- select SYS_SUPPORTS_SMARTMIPS
- select SYS_SUPPORTS_MICROMIPS
- select SYS_SUPPORTS_MIPS16
- select SYS_SUPPORTS_RELOCATABLE
- select USB_EHCI_BIG_ENDIAN_DESC
- select USB_EHCI_BIG_ENDIAN_MMIO
- select USE_OF
- help
- This enables support for the MIPS Technologies SEAD3 evaluation
- board.
-
config NEC_MARKEINS
bool "NEC EMMA2RH Mark-eins board"
select SOC_EMMA2RH
@@ -988,6 +1005,7 @@ source "arch/mips/ath79/Kconfig"
source "arch/mips/bcm47xx/Kconfig"
source "arch/mips/bcm63xx/Kconfig"
source "arch/mips/bmips/Kconfig"
+source "arch/mips/generic/Kconfig"
source "arch/mips/jazz/Kconfig"
source "arch/mips/jz4740/Kconfig"
source "arch/mips/lantiq/Kconfig"
@@ -1098,6 +1116,10 @@ config DMA_MAYBE_COHERENT
select DMA_NONCOHERENT
bool
+config DMA_PERDEV_COHERENT
+ bool
+ select DMA_MAYBE_COHERENT
+
config DMA_COHERENT
bool
@@ -1401,6 +1423,16 @@ config CPU_LOONGSON1B
The Loongson 1B is a 32-bit SoC, which implements the MIPS32
release 2 instruction set.
+config CPU_LOONGSON1C
+ bool "Loongson 1C"
+ depends on SYS_HAS_CPU_LOONGSON1C
+ select CPU_LOONGSON1
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select LEDS_GPIO_REGISTER
+ help
+ The Loongson 1C is a 32-bit SoC, which implements the MIPS32
+ release 2 instruction set.
+
config CPU_MIPS32_R1
bool "MIPS32 Release 1"
depends on SYS_HAS_CPU_MIPS32_R1
@@ -1850,6 +1882,9 @@ config SYS_HAS_CPU_LOONGSON2F
config SYS_HAS_CPU_LOONGSON1B
bool
+config SYS_HAS_CPU_LOONGSON1C
+ bool
+
config SYS_HAS_CPU_MIPS32_R1
bool
@@ -2906,7 +2941,7 @@ endchoice
choice
prompt "Kernel command line type" if !CMDLINE_OVERRIDE
default MIPS_CMDLINE_FROM_DTB if USE_OF && !ATH79 && !MACH_INGENIC && \
- !MIPS_MALTA && !MIPS_SEAD3 && \
+ !MIPS_MALTA && \
!CAVIUM_OCTEON_SOC
default MIPS_CMDLINE_FROM_BOOTLOADER
@@ -2960,7 +2995,6 @@ config PCI
bool "Support for PCI controller"
depends on HW_HAS_PCI
select PCI_DOMAINS
- select NO_GENERIC_PCI_IOPORT_MAP
help
Find out whether you have a PCI motherboard. PCI is the name of a
bus system, i.e. the way the CPU talks to the other stuff inside
@@ -2981,6 +3015,17 @@ config HT_PCI
config PCI_DOMAINS
bool
+config PCI_DOMAINS_GENERIC
+ bool
+
+config PCI_DRIVERS_GENERIC
+ select PCI_DOMAINS_GENERIC if PCI_DOMAINS
+ bool
+
+config PCI_DRIVERS_LEGACY
+ def_bool !PCI_DRIVERS_GENERIC
+ select NO_GENERIC_PCI_IOPORT_MAP
+
source "drivers/pci/Kconfig"
#
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 598ab2930fce..fbf40d3c8123 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -262,7 +262,14 @@ KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
- VMLINUX_ENTRY_ADDRESS=$(entry-y)
+ VMLINUX_ENTRY_ADDRESS=$(entry-y) \
+ PLATFORM=$(platform-y)
+ifdef CONFIG_32BIT
+bootvars-y += ADDR_BITS=32
+endif
+ifdef CONFIG_64BIT
+bootvars-y += ADDR_BITS=64
+endif
LDFLAGS += -m $(ld-emul)
@@ -302,6 +309,11 @@ boot-y += uImage.gz
boot-y += uImage.lzma
boot-y += uImage.lzo
endif
+boot-y += vmlinux.itb
+boot-y += vmlinux.gz.itb
+boot-y += vmlinux.bz2.itb
+boot-y += vmlinux.lzma.itb
+boot-y += vmlinux.lzo.itb
# compressed boot image targets (arch/mips/boot/compressed/)
bootz-y := vmlinuz
@@ -425,4 +437,67 @@ define archhelp
echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'
echo
echo ' These will be default as appropriate for a configured platform.'
+ echo
+ echo ' If you are targeting a system supported by generic kernels you may'
+ echo ' configure the kernel for a given architecture target like so:'
+ echo
+ echo ' {micro32,32,64}{r1,r2,r6}{el,}_defconfig <BOARDS="list of boards">'
+ echo
+ echo ' Otherwise, the following default configurations are available:'
endef
+
+generic_config_dir = $(srctree)/arch/$(ARCH)/configs/generic
+generic_defconfigs :=
+
+#
+# If the user generates a generic kernel configuration without specifying a
+# list of boards to include the config fragments for, default to including all
+# available board config fragments.
+#
+ifeq ($(BOARDS),)
+BOARDS = $(patsubst board-%.config,%,$(notdir $(wildcard $(generic_config_dir)/board-*.config)))
+endif
+
+#
+# Generic kernel configurations which merge generic_defconfig with the
+# appropriate config fragments from arch/mips/configs/generic/, resulting in
+# the ability to easily configure the kernel for a given architecture,
+# endianness & set of boards without duplicating the needed configuration in
+# hundreds of defconfig files.
+#
+define gen_generic_defconfigs
+$(foreach bits,$(1),$(foreach rev,$(2),$(foreach endian,$(3),
+target := $(bits)$(rev)$(filter el,$(endian))_defconfig
+generic_defconfigs += $$(target)
+$$(target): $(generic_config_dir)/$(bits)$(rev).config
+$$(target): $(generic_config_dir)/$(endian).config
+)))
+endef
+
+$(eval $(call gen_generic_defconfigs,32 64,r1 r2 r6,eb el))
+$(eval $(call gen_generic_defconfigs,micro32,r2,eb el))
+
+.PHONY: $(generic_defconfigs)
+$(generic_defconfigs):
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
+ -m -O $(objtree) $(srctree)/arch/$(ARCH)/configs/generic_defconfig $^ \
+ $(foreach board,$(BOARDS),$(generic_config_dir)/board-$(board).config)
+ $(Q)$(MAKE) olddefconfig
+
+#
+# Prevent generic merge_config rules attempting to merge single fragments
+#
+$(generic_config_dir)/%.config: ;
+
+#
+# Legacy defconfig compatibility - these targets used to be real defconfigs but
+# now that the boards have been converted to use the generic kernel they are
+# wrappers around the generic rules above.
+#
+.PHONY: sead3_defconfig
+sead3_defconfig:
+ $(Q)$(MAKE) 32r2el_defconfig BOARDS=sead-3
+
+.PHONY: sead3micro_defconfig
+sead3micro_defconfig:
+ $(Q)$(MAKE) micro32r2el_defconfig BOARDS=sead-3
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 2902138b3e0f..7faaa6d593a7 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -48,17 +48,17 @@ void __init plat_mem_setup(void)
clear_c0_config(1 << 19); /* Clear Config[OD] */
hw_coherentio = 0;
- coherentio = 1;
+ coherentio = IO_COHERENCE_ENABLED;
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1100:
- coherentio = 0;
+ coherentio = IO_COHERENCE_DISABLED;
break;
case ALCHEMY_CPU_AU1200:
/* Au1200 AB USB does not support coherent memory */
if (0 == (read_c0_prid() & PRID_REV_MASK))
- coherentio = 0;
+ coherentio = IO_COHERENCE_DISABLED;
break;
}
diff --git a/arch/mips/bcm47xx/serial.c b/arch/mips/bcm47xx/serial.c
index df761d38f7fc..e3c9872a4aa5 100644
--- a/arch/mips/bcm47xx/serial.c
+++ b/arch/mips/bcm47xx/serial.c
@@ -1,4 +1,7 @@
/*
+ * 8250 UART probe driver for the BCM47XX platforms
+ * Author: Aurelien Jarno
+ *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -6,7 +9,6 @@
* Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
@@ -88,9 +90,4 @@ static int __init uart8250_init(void)
}
return -EINVAL;
}
-
-module_init(uart8250_init);
-
-MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("8250 UART probe driver for the BCM47XX platforms");
+device_initcall(uart8250_init);
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index 637565284732..b49fc9cb9cad 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -326,6 +326,9 @@ EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
+ if (!clk)
+ return;
+
mutex_lock(&clocks_mutex);
clk_disable_unlocked(clk);
mutex_unlock(&clocks_mutex);
diff --git a/arch/mips/bmips/Kconfig b/arch/mips/bmips/Kconfig
index 264328d528c7..2d60f25403de 100644
--- a/arch/mips/bmips/Kconfig
+++ b/arch/mips/bmips/Kconfig
@@ -21,10 +21,6 @@ config DT_BCM93384WVG_VIPER
bool "BCM93384WVG Viper CPU (EXPERIMENTAL)"
select BUILTIN_DTB
-config DT_BCM96358NB4SER
- bool "BCM96358NB4SER"
- select BUILTIN_DTB
-
config DT_BCM96368MVWG
bool "BCM96368MVWG"
select BUILTIN_DTB
@@ -65,6 +61,22 @@ config DT_BCM97435SVMB
bool "BCM97435SVMB"
select BUILTIN_DTB
+config DT_COMTREND_VR3032U
+ bool "Comtrend VR-3032u"
+ select BUILTIN_DTB
+
+config DT_NETGEAR_CVG834G
+ bool "NETGEAR CVG834G"
+ select BUILTIN_DTB
+
+config DT_SFR_NEUFBOX4_SERCOMM
+ bool "SFR Neufbox 4 (Sercomm)"
+ select BUILTIN_DTB
+
+config DT_SFR_NEUFBOX6_SERCOMM
+ bool "SFR Neufbox 6 (Sercomm)"
+ select BUILTIN_DTB
+
endchoice
endif
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 6776042679dd..3b6f687f177c 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
+#include <linux/libfdt.h>
#include <linux/smp.h>
#include <asm/addrspace.h>
#include <asm/bmips.h>
@@ -98,7 +99,7 @@ static void bcm6328_quirks(void)
static void bcm6358_quirks(void)
{
/*
- * BCM6358 needs special handling for its shared TLB, so
+ * BCM3368/BCM6358 need special handling for their shared TLB, so
* disable SMP for now
*/
bmips_smp_enabled = 0;
@@ -110,10 +111,12 @@ static void bcm6368_quirks(void)
}
static const struct bmips_quirk bmips_quirk_list[] = {
+ { "brcm,bcm3368", &bcm6358_quirks },
{ "brcm,bcm3384-viper", &bcm3384_viper_quirks },
{ "brcm,bcm33843-viper", &bcm3384_viper_quirks },
{ "brcm,bcm6328", &bcm6328_quirks },
{ "brcm,bcm6358", &bcm6358_quirks },
+ { "brcm,bcm6362", &bcm6368_quirks },
{ "brcm,bcm6368", &bcm6368_quirks },
{ "brcm,bcm63168", &bcm6368_quirks },
{ "brcm,bcm63268", &bcm6368_quirks },
@@ -150,6 +153,8 @@ void __init plat_time_init(void)
mips_hpt_frequency = freq;
}
+extern const char __appended_dtb;
+
void __init plat_mem_setup(void)
{
void *dtb;
@@ -159,6 +164,11 @@ void __init plat_mem_setup(void)
ioport_resource.start = 0;
ioport_resource.end = ~0;
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+ if (!fdt_check_header(&__appended_dtb))
+ dtb = (void *)&__appended_dtb;
+ else
+#endif
/* intended to somewhat resemble ARM; see Documentation/arm/Booting */
if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
dtb = phys_to_virt(fw_arg2);
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index acb1988f354e..2728a9a9c7c5 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -100,3 +100,69 @@ $(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
$(obj)/uImage: $(obj)/uImage.$(suffix-y)
@ln -sf $(notdir $<) $@
@echo ' Image $@ is ready'
+
+#
+# Flattened Image Tree (.itb) images
+#
+
+targets += vmlinux.itb
+targets += vmlinux.gz.itb
+targets += vmlinux.bz2.itb
+targets += vmlinux.lzma.itb
+targets += vmlinux.lzo.itb
+
+ifeq ($(ADDR_BITS),32)
+ itb_addr_cells = 1
+endif
+ifeq ($(ADDR_BITS),64)
+ itb_addr_cells = 2
+endif
+
+quiet_cmd_cpp_its_S = ITS $@
+ cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \
+ -DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \
+ -DVMLINUX_BINARY="\"$(3)\"" \
+ -DVMLINUX_COMPRESSION="\"$(2)\"" \
+ -DVMLINUX_LOAD_ADDRESS=$(VMLINUX_LOAD_ADDRESS) \
+ -DVMLINUX_ENTRY_ADDRESS=$(VMLINUX_ENTRY_ADDRESS) \
+ -DADDR_BITS=$(ADDR_BITS) \
+ -DADDR_CELLS=$(itb_addr_cells)
+
+$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+ $(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
+
+$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+ $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
+
+$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+ $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
+
+$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+ $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
+
+$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE
+ $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
+
+quiet_cmd_itb-image = ITB $@
+ cmd_itb-image = \
+ env PATH="$(objtree)/scripts/dtc:$(PATH)" \
+ $(CONFIG_SHELL) $(MKIMAGE) \
+ -D "-I dts -O dtb -p 500 \
+ --include $(objtree)/arch/mips \
+ --warning no-unit_address_vs_reg" \
+ -f $(2) $@
+
+$(obj)/vmlinux.itb: $(obj)/vmlinux.its $(obj)/vmlinux.bin FORCE
+ $(call if_changed,itb-image,$<)
+
+$(obj)/vmlinux.gz.itb: $(obj)/vmlinux.gz.its $(obj)/vmlinux.bin.gz FORCE
+ $(call if_changed,itb-image,$<)
+
+$(obj)/vmlinux.bz2.itb: $(obj)/vmlinux.bz2.its $(obj)/vmlinux.bin.bz2 FORCE
+ $(call if_changed,itb-image,$<)
+
+$(obj)/vmlinux.lzma.itb: $(obj)/vmlinux.lzma.its $(obj)/vmlinux.bin.lzma FORCE
+ $(call if_changed,itb-image,$<)
+
+$(obj)/vmlinux.lzo.itb: $(obj)/vmlinux.lzo.its $(obj)/vmlinux.bin.lzo FORCE
+ $(call if_changed,itb-image,$<)
diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
index fda9d387cc08..d61bc2aebf69 100644
--- a/arch/mips/boot/dts/brcm/Makefile
+++ b/arch/mips/boot/dts/brcm/Makefile
@@ -1,6 +1,5 @@
dtb-$(CONFIG_DT_BCM93384WVG) += bcm93384wvg.dtb
dtb-$(CONFIG_DT_BCM93384WVG_VIPER) += bcm93384wvg_viper.dtb
-dtb-$(CONFIG_DT_BCM96358NB4SER) += bcm96358nb4ser.dtb
dtb-$(CONFIG_DT_BCM96368MVWG) += bcm96368mvwg.dtb
dtb-$(CONFIG_DT_BCM9EJTAGPRB) += bcm9ejtagprb.dtb
dtb-$(CONFIG_DT_BCM97125CBMB) += bcm97125cbmb.dtb
@@ -11,20 +10,29 @@ dtb-$(CONFIG_DT_BCM97362SVMB) += bcm97362svmb.dtb
dtb-$(CONFIG_DT_BCM97420C) += bcm97420c.dtb
dtb-$(CONFIG_DT_BCM97425SVMB) += bcm97425svmb.dtb
dtb-$(CONFIG_DT_BCM97435SVMB) += bcm97435svmb.dtb
+dtb-$(CONFIG_DT_COMTREND_VR3032U) += bcm63268-comtrend-vr-3032u.dtb
+dtb-$(CONFIG_DT_NETGEAR_CVG834G) += bcm3368-netgear-cvg834g.dtb
+dtb-$(CONFIG_DT_SFR_NEUFBOX4_SERCOMM) += bcm6358-neufbox4-sercomm.dtb
+dtb-$(CONFIG_DT_SFR_NEUFBOX6_SERCOMM) += bcm6362-neufbox6-sercomm.dtb
-dtb-$(CONFIG_DT_NONE) += \
- bcm93384wvg.dtb \
- bcm93384wvg_viper.dtb \
- bcm96358nb4ser.dtb \
- bcm96368mvwg.dtb \
- bcm9ejtagprb.dtb \
- bcm97125cbmb.dtb \
- bcm97346dbsmb.dtb \
- bcm97358svmb.dtb \
- bcm97360svmb.dtb \
- bcm97362svmb.dtb \
- bcm97420c.dtb \
- bcm97425svmb.dtb
+dtb-$(CONFIG_DT_NONE) += \
+ bcm3368-netgear-cvg834g.dtb \
+ bcm6358-neufbox4-sercomm.dtb \
+ bcm6362-neufbox6-sercomm.dtb \
+ bcm63268-comtrend-vr-3032u.dtb \
+ bcm93384wvg.dtb \
+ bcm93384wvg_viper.dtb \
+ bcm96358nb4ser.dtb \
+ bcm96368mvwg.dtb \
+ bcm9ejtagprb.dtb \
+ bcm97125cbmb.dtb \
+ bcm97346dbsmb.dtb \
+ bcm97358svmb.dtb \
+ bcm97360svmb.dtb \
+ bcm97362svmb.dtb \
+ bcm97420c.dtb \
+ bcm97425svmb.dtb \
+ bcm97435svmb.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
diff --git a/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts b/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts
new file mode 100644
index 000000000000..2f2e80fdcde8
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts
@@ -0,0 +1,22 @@
+/dts-v1/;
+
+/include/ "bcm3368.dtsi"
+
+/ {
+ compatible = "netgear,cvg834g", "brcm,bcm3368";
+ model = "NETGEAR CVG834G";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x02000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi
new file mode 100644
index 000000000000..bee855cb8073
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi
@@ -0,0 +1,101 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm3368";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <150000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ clocks {
+ periph_clk: periph-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges;
+
+ periph_cntl: syscon@fff8c000 {
+ compatible = "syscon";
+ reg = <0xfff8c000 0xc>;
+ native-endian;
+ };
+
+ reboot: syscon-reboot@fff8c008 {
+ compatible = "syscon-reboot";
+ regmap = <&periph_cntl>;
+ offset = <0x8>;
+ mask = <0x1>;
+ };
+
+ periph_intc: interrupt-controller@fff8c00c {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0xfff8c00c 0x8>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>;
+ };
+
+ uart0: serial@fff8c100 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0xfff8c100 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+
+ clocks = <&periph_clk>;
+
+ status = "disabled";
+ };
+
+ uart1: serial@fff8c120 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0xfff8c120 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <3>;
+
+ clocks = <&periph_clk>;
+
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
new file mode 100644
index 000000000000..430d35ca33d5
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
@@ -0,0 +1,108 @@
+/dts-v1/;
+
+/include/ "bcm63268.dtsi"
+
+/ {
+ compatible = "comtrend,vr-3032u", "brcm,bcm63268";
+ model = "Comtrend VR-3032u";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x04000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&leds0 {
+ status = "ok";
+ brcm,serial-leds;
+ brcm,serial-dat-low;
+ brcm,serial-shift-inv;
+
+ led@0 {
+ reg = <0>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <0>;
+ /* GPHY0 Speed 0 */
+ };
+ led@1 {
+ reg = <1>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <1>;
+ /* GPHY0 Speed 1 */
+ };
+ led@2 {
+ reg = <2>;
+ active-low;
+ label = "vr-3032u:red:inet";
+ };
+ led@3 {
+ reg = <3>;
+ active-low;
+ label = "vr-3032u:green:dsl";
+ };
+ led@4 {
+ reg = <4>;
+ active-low;
+ label = "vr-3032u:green:usb";
+ };
+ led@7 {
+ reg = <7>;
+ active-low;
+ label = "vr-3032u:green:wps";
+ };
+ led@8 {
+ reg = <8>;
+ active-low;
+ label = "vr-3032u:green:inet";
+ };
+ led@9 {
+ reg = <9>;
+ brcm,hardware-controlled;
+ /* EPHY0 Activity */
+ };
+ led@10 {
+ reg = <10>;
+ brcm,hardware-controlled;
+ /* EPHY1 Activity */
+ };
+ led@11 {
+ reg = <11>;
+ brcm,hardware-controlled;
+ /* EPHY2 Activity */
+ };
+ led@12 {
+ reg = <12>;
+ brcm,hardware-controlled;
+ /* GPHY0 Activity */
+ };
+ led@13 {
+ reg = <13>;
+ brcm,hardware-controlled;
+ /* EPHY0 Speed */
+ };
+ led@14 {
+ reg = <14>;
+ brcm,hardware-controlled;
+ /* EPHY1 Speed */
+ };
+ led@15 {
+ reg = <15>;
+ brcm,hardware-controlled;
+ /* EPHY2 Speed */
+ };
+ led@20 {
+ reg = <20>;
+ active-low;
+ label = "vr-3032u:green:power";
+ default-state = "on";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
new file mode 100644
index 000000000000..7e6bf2cc0287
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
@@ -0,0 +1,134 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm63268";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <200000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ clocks {
+ periph_clk: periph-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges;
+
+ periph_cntl: syscon@10000000 {
+ compatible = "syscon";
+ reg = <0x10000000 0x14>;
+ native-endian;
+ };
+
+ reboot: syscon-reboot@10000008 {
+ compatible = "syscon-reboot";
+ regmap = <&periph_cntl>;
+ offset = <0x8>;
+ mask = <0x1>;
+ };
+
+ periph_intc: interrupt-controller@10000020 {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0x10000020 0x20>,
+ <0x10000040 0x20>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ uart0: serial@10000180 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000180 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <5>;
+
+ clocks = <&periph_clk>;
+
+ status = "disabled";
+ };
+
+ uart1: serial@100001a0 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x100001a0 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <34>;
+
+ clocks = <&periph_clk>;
+
+ status = "disabled";
+ };
+
+ leds0: led-controller@10001900 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6328-leds";
+ reg = <0x10001900 0x24>;
+
+ status = "disabled";
+ };
+
+ ehci: usb@10002500 {
+ compatible = "brcm,bcm63268-ehci", "generic-ehci";
+ reg = <0x10002500 0x100>;
+ big-endian;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <10>;
+
+ status = "disabled";
+ };
+
+ ohci: usb@10002600 {
+ compatible = "brcm,bcm63268-ohci", "generic-ohci";
+ reg = <0x10002600 0x100>;
+ big-endian;
+ no-big-frame-no;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <9>;
+
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm96358nb4ser.dts b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
index f412117972e6..702eae2a22a0 100644
--- a/arch/mips/boot/dts/brcm/bcm96358nb4ser.dts
+++ b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
@@ -12,6 +12,7 @@
};
chosen {
+ bootargs = "console=ttyS0,115200";
stdout-path = &uart0;
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts b/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts
new file mode 100644
index 000000000000..480f2a5bf1da
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts
@@ -0,0 +1,22 @@
+/dts-v1/;
+
+/include/ "bcm6362.dtsi"
+
+/ {
+ compatible = "sfr,nb6-ser", "brcm,bcm6362";
+ model = "SFR NeufBox 6 (Sercomm)";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x08000000>;
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ stdout-path = &uart0;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi
new file mode 100644
index 000000000000..c507da594f2f
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi
@@ -0,0 +1,134 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm6362";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <200000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ clocks {
+ periph_clk: periph-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges;
+
+ periph_cntl: syscon@10000000 {
+ compatible = "syscon";
+ reg = <0x10000000 0x14>;
+ native-endian;
+ };
+
+ reboot: syscon-reboot@10000008 {
+ compatible = "syscon-reboot";
+ regmap = <&periph_cntl>;
+ offset = <0x8>;
+ mask = <0x1>;
+ };
+
+ periph_intc: interrupt-controller@10000020 {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0x10000020 0x10>,
+ <0x10000030 0x10>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ uart0: serial@10000100 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000100 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <3>;
+
+ clocks = <&periph_clk>;
+
+ status = "disabled";
+ };
+
+ uart1: serial@10000120 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000120 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <4>;
+
+ clocks = <&periph_clk>;
+
+ status = "disabled";
+ };
+
+ leds0: led-controller@10001900 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6328-leds";
+ reg = <0x10001900 0x24>;
+
+ status = "disabled";
+ };
+
+ ehci: usb@10002500 {
+ compatible = "brcm,bcm6362-ehci", "generic-ehci";
+ reg = <0x10002500 0x100>;
+ big-endian;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <10>;
+
+ status = "disabled";
+ };
+
+ ohci: usb@10002600 {
+ compatible = "brcm,bcm6362-ohci", "generic-ohci";
+ reg = <0x10002600 0x100>;
+ big-endian;
+ no-big-frame-no;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <9>;
+
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm7125.dtsi b/arch/mips/boot/dts/brcm/bcm7125.dtsi
index 550e1d9e3ee0..bbd00f65ce39 100644
--- a/arch/mips/boot/dts/brcm/bcm7125.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7125.dtsi
@@ -26,7 +26,7 @@
uart0 = &uart0;
};
- cpu_intc: cpu_intc {
+ cpu_intc: interrupt-controller {
#address-cells = <0>;
compatible = "mti,cpu-interrupt-controller";
@@ -40,6 +40,12 @@
#clock-cells = <0>;
clock-frequency = <81000000>;
};
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
};
rdb {
@@ -49,7 +55,7 @@
compatible = "simple-bus";
ranges = <0 0x10000000 0x01000000>;
- periph_intc: periph_intc@441400 {
+ periph_intc: interrupt-controller@441400 {
compatible = "brcm,bcm7038-l1-intc";
reg = <0x441400 0x30>, <0x441600 0x30>;
@@ -60,7 +66,7 @@
interrupts = <2>, <3>;
};
- sun_l2_intc: sun_l2_intc@401800 {
+ sun_l2_intc: interrupt-controller@401800 {
compatible = "brcm,l2-intc";
reg = <0x401800 0x30>;
interrupt-controller;
@@ -81,7 +87,7 @@
"avd_0", "jtag_0";
};
- upg_irq0_intc: upg_irq0_intc@406780 {
+ upg_irq0_intc: interrupt-controller@406780 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x406780 0x8>;
@@ -183,6 +189,26 @@
status = "disabled";
};
+ pwma: pwm@406580 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406580 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ upg_gio: gpio@406700 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406700 0x80>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 18>;
+ };
+
ehci0: usb@488300 {
compatible = "brcm,bcm7125-ehci", "generic-ehci";
reg = <0x488300 0x100>;
diff --git a/arch/mips/boot/dts/brcm/bcm7346.dtsi b/arch/mips/boot/dts/brcm/bcm7346.dtsi
index ec959061d52e..4bbcc95f1c15 100644
--- a/arch/mips/boot/dts/brcm/bcm7346.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7346.dtsi
@@ -26,7 +26,7 @@
uart0 = &uart0;
};
- cpu_intc: cpu_intc {
+ cpu_intc: interrupt-controller {
#address-cells = <0>;
compatible = "mti,cpu-interrupt-controller";
@@ -40,6 +40,12 @@
#clock-cells = <0>;
clock-frequency = <81000000>;
};
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
};
rdb {
@@ -49,7 +55,7 @@
compatible = "simple-bus";
ranges = <0 0x10000000 0x01000000>;
- periph_intc: periph_intc@411400 {
+ periph_intc: interrupt-controller@411400 {
compatible = "brcm,bcm7038-l1-intc";
reg = <0x411400 0x30>, <0x411600 0x30>;
@@ -60,7 +66,7 @@
interrupts = <2>, <3>;
};
- sun_l2_intc: sun_l2_intc@403000 {
+ sun_l2_intc: interrupt-controller@403000 {
compatible = "brcm,l2-intc";
reg = <0x403000 0x30>;
interrupt-controller;
@@ -81,7 +87,7 @@
"jtag_0", "svd_0";
};
- upg_irq0_intc: upg_irq0_intc@406780 {
+ upg_irq0_intc: interrupt-controller@406780 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x406780 0x8>;
@@ -96,7 +102,7 @@
interrupt-names = "upg_main", "upg_bsc";
};
- upg_aon_irq0_intc: upg_aon_irq0_intc@408b80 {
+ upg_aon_irq0_intc: interrupt-controller@408b80 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x408b80 0x8>;
@@ -210,6 +216,59 @@
status = "disabled";
};
+ pwma: pwm@406580 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406580 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ pwmb: pwm@406800 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406800 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ aon_pm_l2_intc: interrupt-controller@408440 {
+ compatible = "brcm,l2-intc";
+ reg = <0x408440 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <53>;
+ brcm,irq-can-wake;
+ };
+
+ upg_gio: gpio@406700 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406700 0x60>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 16>;
+ };
+
+ upg_gio_aon: gpio@408c00 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x408c00 0x60>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupts = <6>;
+ interrupts-extended = <&upg_aon_irq0_intc 6>,
+ <&aon_pm_l2_intc 5>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <27 32 2>;
+ };
+
enet0: ethernet@430000 {
phy-mode = "internal";
phy-handle = <&phy1>;
@@ -313,6 +372,26 @@
status = "disabled";
};
+ hif_l2_intc: interrupt-controller@411000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <30>;
+ };
+
+ nand: nand@412800 {
+ compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "nand";
+ reg = <0x412800 0x400>;
+ interrupt-parent = <&hif_l2_intc>;
+ interrupts = <24>;
+ status = "disabled";
+ };
+
sata: sata@181000 {
compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
reg-names = "ahci", "top-ctrl";
@@ -352,5 +431,13 @@
#phy-cells = <0>;
};
};
+
+ sdhci0: sdhci@413500 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x413500 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <85>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm7358.dtsi b/arch/mips/boot/dts/brcm/bcm7358.dtsi
index ca57fb5eb122..3e42535c8d29 100644
--- a/arch/mips/boot/dts/brcm/bcm7358.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7358.dtsi
@@ -20,7 +20,7 @@
uart0 = &uart0;
};
- cpu_intc: cpu_intc {
+ cpu_intc: interrupt-controller {
#address-cells = <0>;
compatible = "mti,cpu-interrupt-controller";
@@ -34,6 +34,12 @@
#clock-cells = <0>;
clock-frequency = <81000000>;
};
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
};
rdb {
@@ -43,7 +49,7 @@
compatible = "simple-bus";
ranges = <0 0x10000000 0x01000000>;
- periph_intc: periph_intc@411400 {
+ periph_intc: interrupt-controller@411400 {
compatible = "brcm,bcm7038-l1-intc";
reg = <0x411400 0x30>;
@@ -54,7 +60,7 @@
interrupts = <2>;
};
- sun_l2_intc: sun_l2_intc@403000 {
+ sun_l2_intc: interrupt-controller@403000 {
compatible = "brcm,l2-intc";
reg = <0x403000 0x30>;
interrupt-controller;
@@ -75,7 +81,7 @@
"avd_0", "jtag_0";
};
- upg_irq0_intc: upg_irq0_intc@406600 {
+ upg_irq0_intc: interrupt-controller@406600 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x406600 0x8>;
@@ -90,7 +96,7 @@
interrupt-names = "upg_main", "upg_bsc";
};
- upg_aon_irq0_intc: upg_aon_irq0_intc@408b80 {
+ upg_aon_irq0_intc: interrupt-controller@408b80 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x408b80 0x8>;
@@ -194,6 +200,59 @@
status = "disabled";
};
+ pwma: pwm@406400 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406400 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ pwmb: pwm@406700 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406700 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ aon_pm_l2_intc: interrupt-controller@408240 {
+ compatible = "brcm,l2-intc";
+ reg = <0x408240 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <50>;
+ brcm,irq-can-wake;
+ };
+
+ upg_gio: gpio@406500 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406500 0xa0>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 29 4>;
+ };
+
+ upg_gio_aon: gpio@408c00 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x408c00 0x60>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupts = <6>;
+ interrupts-extended = <&upg_aon_irq0_intc 6>,
+ <&aon_pm_l2_intc 5>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <21 32 2>;
+ };
+
enet0: ethernet@430000 {
phy-mode = "internal";
phy-handle = <&phy1>;
@@ -239,5 +298,25 @@
interrupts = <66>;
status = "disabled";
};
+
+ hif_l2_intc: interrupt-controller@411000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <30>;
+ };
+
+ nand: nand@412800 {
+ compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "nand";
+ reg = <0x412800 0x400>;
+ interrupt-parent = <&hif_l2_intc>;
+ interrupts = <24>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm7360.dtsi b/arch/mips/boot/dts/brcm/bcm7360.dtsi
index 1c0c3d438c7a..112a5571c596 100644
--- a/arch/mips/boot/dts/brcm/bcm7360.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7360.dtsi
@@ -20,7 +20,7 @@
uart0 = &uart0;
};
- cpu_intc: cpu_intc {
+ cpu_intc: interrupt-controller {
#address-cells = <0>;
compatible = "mti,cpu-interrupt-controller";
@@ -34,6 +34,12 @@
#clock-cells = <0>;
clock-frequency = <81000000>;
};
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
};
rdb {
@@ -43,7 +49,7 @@
compatible = "simple-bus";
ranges = <0 0x10000000 0x01000000>;
- periph_intc: periph_intc@411400 {
+ periph_intc: interrupt-controller@411400 {
compatible = "brcm,bcm7038-l1-intc";
reg = <0x411400 0x30>;
@@ -54,7 +60,7 @@
interrupts = <2>;
};
- sun_l2_intc: sun_l2_intc@403000 {
+ sun_l2_intc: interrupt-controller@403000 {
compatible = "brcm,l2-intc";
reg = <0x403000 0x30>;
interrupt-controller;
@@ -75,7 +81,7 @@
"avd_0", "jtag_0";
};
- upg_irq0_intc: upg_irq0_intc@406600 {
+ upg_irq0_intc: interrupt-controller@406600 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x406600 0x8>;
@@ -90,7 +96,7 @@
interrupt-names = "upg_main", "upg_bsc";
};
- upg_aon_irq0_intc: upg_aon_irq0_intc@408b80 {
+ upg_aon_irq0_intc: interrupt-controller@408b80 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x408b80 0x8>;
@@ -194,6 +200,51 @@
status = "disabled";
};
+ pwma: pwm@406400 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406400 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ aon_pm_l2_intc: interrupt-controller@408440 {
+ compatible = "brcm,l2-intc";
+ reg = <0x408440 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <50>;
+ brcm,irq-can-wake;
+ };
+
+ upg_gio: gpio@406500 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406500 0xa0>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 29 4>;
+ };
+
+ upg_gio_aon: gpio@408c00 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x408c00 0x60>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupts = <6>;
+ interrupts-extended = <&upg_aon_irq0_intc 6>,
+ <&aon_pm_l2_intc 5>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <21 32 2>;
+ };
+
enet0: ethernet@430000 {
phy-mode = "internal";
phy-handle = <&phy1>;
@@ -240,6 +291,26 @@
status = "disabled";
};
+ hif_l2_intc: interrupt-controller@411000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <30>;
+ };
+
+ nand: nand@412800 {
+ compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "nand";
+ reg = <0x412800 0x400>;
+ interrupt-parent = <&hif_l2_intc>;
+ interrupts = <24>;
+ status = "disabled";
+ };
+
sata: sata@181000 {
compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
reg-names = "ahci", "top-ctrl";
@@ -279,5 +350,13 @@
#phy-cells = <0>;
};
};
+
+ sdhci0: sdhci@410000 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x410000 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <82>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm7362.dtsi b/arch/mips/boot/dts/brcm/bcm7362.dtsi
index 6b4713add4b8..34abfb0b07e7 100644
--- a/arch/mips/boot/dts/brcm/bcm7362.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7362.dtsi
@@ -26,7 +26,7 @@
uart0 = &uart0;
};
- cpu_intc: cpu_intc {
+ cpu_intc: interrupt-controller {
#address-cells = <0>;
compatible = "mti,cpu-interrupt-controller";
@@ -40,6 +40,12 @@
#clock-cells = <0>;
clock-frequency = <81000000>;
};
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
};
rdb {
@@ -49,7 +55,7 @@
compatible = "simple-bus";
ranges = <0 0x10000000 0x01000000>;
- periph_intc: periph_intc@411400 {
+ periph_intc: interrupt-controller@411400 {
compatible = "brcm,bcm7038-l1-intc";
reg = <0x411400 0x30>, <0x411600 0x30>;
@@ -60,7 +66,7 @@
interrupts = <2>, <3>;
};
- sun_l2_intc: sun_l2_intc@403000 {
+ sun_l2_intc: interrupt-controller@403000 {
compatible = "brcm,l2-intc";
reg = <0x403000 0x30>;
interrupt-controller;
@@ -81,7 +87,7 @@
"avd_0", "jtag_0";
};
- upg_irq0_intc: upg_irq0_intc@406600 {
+ upg_irq0_intc: interrupt-controller@406600 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x406600 0x8>;
@@ -96,7 +102,7 @@
interrupt-names = "upg_main", "upg_bsc";
};
- upg_aon_irq0_intc: upg_aon_irq0_intc@408b80 {
+ upg_aon_irq0_intc: interrupt-controller@408b80 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x408b80 0x8>;
@@ -190,6 +196,51 @@
status = "disabled";
};
+ pwma: pwm@406400 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406400 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ aon_pm_l2_intc: interrupt-controller@408440 {
+ compatible = "brcm,l2-intc";
+ reg = <0x408440 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <50>;
+ brcm,irq-can-wake;
+ };
+
+ upg_gio: gpio@406500 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406500 0xa0>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 29 4>;
+ };
+
+ upg_gio_aon: gpio@408c00 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x408c00 0x60>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupts = <6>;
+ interrupts-extended = <&upg_aon_irq0_intc 6>,
+ <&aon_pm_l2_intc 5>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <21 32 2>;
+ };
+
enet0: ethernet@430000 {
phy-mode = "internal";
phy-handle = <&phy1>;
@@ -236,6 +287,26 @@
status = "disabled";
};
+ hif_l2_intc: interrupt-controller@411000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x411000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <30>;
+ };
+
+ nand: nand@412800 {
+ compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "nand";
+ reg = <0x412800 0x400>;
+ interrupt-parent = <&hif_l2_intc>;
+ interrupts = <24>;
+ status = "disabled";
+ };
+
sata: sata@181000 {
compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
reg-names = "ahci", "top-ctrl";
@@ -275,5 +346,13 @@
#phy-cells = <0>;
};
};
+
+ sdhci0: sdhci@410000 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x410000 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <82>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm7420.dtsi b/arch/mips/boot/dts/brcm/bcm7420.dtsi
index 0586bf662571..b143723c674e 100644
--- a/arch/mips/boot/dts/brcm/bcm7420.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7420.dtsi
@@ -26,7 +26,7 @@
uart0 = &uart0;
};
- cpu_intc: cpu_intc {
+ cpu_intc: interrupt-controller {
#address-cells = <0>;
compatible = "mti,cpu-interrupt-controller";
@@ -40,6 +40,12 @@
#clock-cells = <0>;
clock-frequency = <81000000>;
};
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
};
rdb {
@@ -49,7 +55,7 @@
compatible = "simple-bus";
ranges = <0 0x10000000 0x01000000>;
- periph_intc: periph_intc@441400 {
+ periph_intc: interrupt-controller@441400 {
compatible = "brcm,bcm7038-l1-intc";
reg = <0x441400 0x30>, <0x441600 0x30>;
@@ -60,7 +66,7 @@
interrupts = <2>, <3>;
};
- sun_l2_intc: sun_l2_intc@401800 {
+ sun_l2_intc: interrupt-controller@401800 {
compatible = "brcm,l2-intc";
reg = <0x401800 0x30>;
interrupt-controller;
@@ -82,7 +88,7 @@
"jtag_0";
};
- upg_irq0_intc: upg_irq0_intc@406780 {
+ upg_irq0_intc: interrupt-controller@406780 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x406780 0x8>;
@@ -191,6 +197,34 @@
status = "disabled";
};
+ pwma: pwm@406580 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406580 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ pwmb: pwm@406880 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406880 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ upg_gio: gpio@406700 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406700 0x80>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 27>;
+ };
+
enet0: ethernet@468000 {
phy-mode = "internal";
phy-handle = <&phy1>;
diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi
index c1c15edaf829..2488d2f61f60 100644
--- a/arch/mips/boot/dts/brcm/bcm7425.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi
@@ -26,7 +26,7 @@
uart0 = &uart0;
};
- cpu_intc: cpu_intc {
+ cpu_intc: interrupt-controller {
#address-cells = <0>;
compatible = "mti,cpu-interrupt-controller";
@@ -40,6 +40,12 @@
#clock-cells = <0>;
clock-frequency = <81000000>;
};
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
};
rdb {
@@ -49,7 +55,7 @@
compatible = "simple-bus";
ranges = <0 0x10000000 0x01000000>;
- periph_intc: periph_intc@41a400 {
+ periph_intc: interrupt-controller@41a400 {
compatible = "brcm,bcm7038-l1-intc";
reg = <0x41a400 0x30>, <0x41a600 0x30>;
@@ -60,7 +66,7 @@
interrupts = <2>, <3>;
};
- sun_l2_intc: sun_l2_intc@403000 {
+ sun_l2_intc: interrupt-controller@403000 {
compatible = "brcm,l2-intc";
reg = <0x403000 0x30>;
interrupt-controller;
@@ -83,7 +89,7 @@
"vice_0";
};
- upg_irq0_intc: upg_irq0_intc@406780 {
+ upg_irq0_intc: interrupt-controller@406780 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x406780 0x8>;
@@ -98,7 +104,7 @@
interrupt-names = "upg_main", "upg_bsc";
};
- upg_aon_irq0_intc: upg_aon_irq0_intc@409480 {
+ upg_aon_irq0_intc: interrupt-controller@409480 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x409480 0x8>;
@@ -209,6 +215,59 @@
status = "disabled";
};
+ pwma: pwm@406580 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406580 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ pwmb: pwm@406800 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406800 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ aon_pm_l2_intc: interrupt-controller@408440 {
+ compatible = "brcm,l2-intc";
+ reg = <0x408440 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <49>;
+ brcm,irq-can-wake;
+ };
+
+ upg_gio: gpio@406700 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406700 0x80>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 21>;
+ };
+
+ upg_gio_aon: gpio@4094c0 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x4094c0 0x40>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupts = <6>;
+ interrupts-extended = <&upg_aon_irq0_intc 6>,
+ <&aon_pm_l2_intc 5>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <18 4>;
+ };
+
enet0: ethernet@b80000 {
phy-mode = "internal";
phy-handle = <&phy1>;
@@ -312,6 +371,26 @@
status = "disabled";
};
+ hif_l2_intc: interrupt-controller@41a000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x41a000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <24>;
+ };
+
+ nand: nand@41b800 {
+ compatible = "brcm,brcmnand-v5.0", "brcm,brcmnand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "nand";
+ reg = <0x41b800 0x400>;
+ interrupt-parent = <&hif_l2_intc>;
+ interrupts = <24>;
+ status = "disabled";
+ };
+
sata: sata@181000 {
compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
reg-names = "ahci", "top-ctrl";
@@ -351,5 +430,25 @@
#phy-cells = <0>;
};
};
+
+ sdhci0: sdhci@419000 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x419000 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <43>;
+ sd-uhs-sdr50;
+ mmc-hs200-1_8v;
+ status = "disabled";
+ };
+
+ sdhci1: sdhci@419200 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x419200 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <44>;
+ sd-uhs-sdr50;
+ mmc-hs200-1_8v;
+ status = "disabled";
+ };
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi
index a874d3a0e2ee..19fa259b968b 100644
--- a/arch/mips/boot/dts/brcm/bcm7435.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi
@@ -38,7 +38,7 @@
uart0 = &uart0;
};
- cpu_intc: cpu_intc {
+ cpu_intc: interrupt-controller {
#address-cells = <0>;
compatible = "mti,cpu-interrupt-controller";
@@ -52,6 +52,12 @@
#clock-cells = <0>;
clock-frequency = <81000000>;
};
+
+ upg_clk: upg_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
};
rdb {
@@ -61,7 +67,7 @@
compatible = "simple-bus";
ranges = <0 0x10000000 0x01000000>;
- periph_intc: periph_intc@41b500 {
+ periph_intc: interrupt-controller@41b500 {
compatible = "brcm,bcm7038-l1-intc";
reg = <0x41b500 0x40>, <0x41b600 0x40>,
<0x41b700 0x40>, <0x41b800 0x40>;
@@ -73,7 +79,7 @@
interrupts = <2>, <3>, <2>, <3>;
};
- sun_l2_intc: sun_l2_intc@403000 {
+ sun_l2_intc: interrupt-controller@403000 {
compatible = "brcm,l2-intc";
reg = <0x403000 0x30>;
interrupt-controller;
@@ -98,7 +104,7 @@
"scpu";
};
- upg_irq0_intc: upg_irq0_intc@406780 {
+ upg_irq0_intc: interrupt-controller@406780 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x406780 0x8>;
@@ -113,7 +119,7 @@
interrupt-names = "upg_main", "upg_bsc";
};
- upg_aon_irq0_intc: upg_aon_irq0_intc@409480 {
+ upg_aon_irq0_intc: interrupt-controller@409480 {
compatible = "brcm,bcm7120-l2-intc";
reg = <0x409480 0x8>;
@@ -224,6 +230,59 @@
status = "disabled";
};
+ pwma: pwm@406580 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406580 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ pwmb: pwm@406800 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0x406800 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_clk>;
+ status = "disabled";
+ };
+
+ aon_pm_l2_intc: interrupt-controller@408440 {
+ compatible = "brcm,l2-intc";
+ reg = <0x408440 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <54>;
+ brcm,irq-can-wake;
+ };
+
+ upg_gio: gpio@406700 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x406700 0x80>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_irq0_intc>;
+ interrupts = <6>;
+ brcm,gpio-bank-widths = <32 32 32 21>;
+ };
+
+ upg_gio_aon: gpio@4094c0 {
+ compatible = "brcm,brcmstb-gpio";
+ reg = <0x4094c0 0x40>;
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ interrupt-controller;
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ interrupts = <6>;
+ interrupts-extended = <&upg_aon_irq0_intc 6>,
+ <&aon_pm_l2_intc 5>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <18 4>;
+ };
+
enet0: ethernet@b80000 {
phy-mode = "internal";
phy-handle = <&phy1>;
@@ -327,6 +386,26 @@
status = "disabled";
};
+ hif_l2_intc: interrupt-controller@41b000 {
+ compatible = "brcm,l2-intc";
+ reg = <0x41b000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <24>;
+ };
+
+ nand: nand@41c800 {
+ compatible = "brcm,brcmnand-v6.2", "brcm,brcmnand";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "nand", "flash-dma";
+ reg = <0x41c800 0x600>, <0x41d000 0x100>;
+ interrupt-parent = <&hif_l2_intc>;
+ interrupts = <24>, <4>;
+ status = "disabled";
+ };
+
sata: sata@181000 {
compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
reg-names = "ahci", "top-ctrl";
@@ -366,5 +445,25 @@
#phy-cells = <0>;
};
};
+
+ sdhci0: sdhci@41a000 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x41a000 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <47>;
+ sd-uhs-sdr50;
+ mmc-hs200-1_8v;
+ status = "disabled";
+ };
+
+ sdhci1: sdhci@41a200 {
+ compatible = "brcm,bcm7425-sdhci";
+ reg = <0x41a200 0x100>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <48>;
+ sd-uhs-sdr50;
+ mmc-hs200-1_8v;
+ status = "disabled";
+ };
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
index f2449d147c6d..5c24eacd72dd 100644
--- a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
@@ -45,6 +45,10 @@
status = "okay";
};
+&pwma {
+ status = "okay";
+};
+
/* FIXME: USB is wonky; disable it for now */
&ehci0 {
status = "disabled";
diff --git a/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
index d3d28816a027..e67eaf30de3d 100644
--- a/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "bcm7346.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch24.dtsi"
/ {
compatible = "brcm,bcm97346dbsmb", "brcm,bcm7346";
@@ -49,6 +50,14 @@
status = "okay";
};
+&pwma {
+ status = "okay";
+};
+
+&pwmb {
+ status = "okay";
+};
+
&enet0 {
status = "okay";
};
@@ -85,6 +94,10 @@
status = "okay";
};
+&nand {
+ status = "okay";
+};
+
&sata {
status = "okay";
};
@@ -92,3 +105,7 @@
&sata_phy {
status = "okay";
};
+
+&sdhci0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97358svmb.dts b/arch/mips/boot/dts/brcm/bcm97358svmb.dts
index 02ce6b429dc4..ee4607fae47a 100644
--- a/arch/mips/boot/dts/brcm/bcm97358svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97358svmb.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "bcm7358.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch4.dtsi"
/ {
compatible = "brcm,bcm97358svmb", "brcm,bcm7358";
@@ -45,6 +46,14 @@
status = "okay";
};
+&pwma {
+ status = "okay";
+};
+
+&pwmb {
+ status = "okay";
+};
+
&enet0 {
status = "okay";
};
@@ -56,3 +65,7 @@
&ohci0 {
status = "okay";
};
+
+&nand {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97360svmb.dts b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
index 73124be9548a..bed821b03013 100644
--- a/arch/mips/boot/dts/brcm/bcm97360svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
@@ -45,6 +45,10 @@
status = "okay";
};
+&pwma {
+ status = "okay";
+};
+
&enet0 {
status = "okay";
};
@@ -64,3 +68,7 @@
&sata_phy {
status = "okay";
};
+
+&sdhci0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97362svmb.dts b/arch/mips/boot/dts/brcm/bcm97362svmb.dts
index 3cfcaebe7f79..68fd823868e0 100644
--- a/arch/mips/boot/dts/brcm/bcm97362svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97362svmb.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "bcm7362.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch4.dtsi"
/ {
compatible = "brcm,bcm97362svmb", "brcm,bcm7362";
@@ -41,6 +42,10 @@
status = "okay";
};
+&pwma {
+ status = "okay";
+};
+
&enet0 {
status = "okay";
};
@@ -53,6 +58,10 @@
status = "okay";
};
+&nand {
+ status = "okay";
+};
+
&sata {
status = "okay";
};
@@ -60,3 +69,7 @@
&sata_phy {
status = "okay";
};
+
+&sdhci0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97420c.dts b/arch/mips/boot/dts/brcm/bcm97420c.dts
index 600d57abee05..e66271af055e 100644
--- a/arch/mips/boot/dts/brcm/bcm97420c.dts
+++ b/arch/mips/boot/dts/brcm/bcm97420c.dts
@@ -51,6 +51,14 @@
status = "okay";
};
+&pwma {
+ status = "okay";
+};
+
+&pwmb {
+ status = "okay";
+};
+
/* FIXME: MAC driver comes up but cannot attach to PHY */
&enet0 {
status = "disabled";
diff --git a/arch/mips/boot/dts/brcm/bcm97425svmb.dts b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
index 119c714805cb..f95ba1bf3e58 100644
--- a/arch/mips/boot/dts/brcm/bcm97425svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "bcm7425.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch24.dtsi"
/ {
compatible = "brcm,bcm97425svmb", "brcm,bcm7425";
@@ -51,6 +52,14 @@
status = "okay";
};
+&pwma {
+ status = "okay";
+};
+
+&pwmb {
+ status = "okay";
+};
+
&enet0 {
status = "okay";
};
@@ -86,3 +95,15 @@
&ohci3 {
status = "okay";
};
+
+&nand {
+ status = "okay";
+};
+
+&sdhci0 {
+ status = "okay";
+};
+
+&sdhci1 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97435svmb.dts b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
index 43e3ba27f07b..fb37b7111bf4 100644
--- a/arch/mips/boot/dts/brcm/bcm97435svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
@@ -1,6 +1,7 @@
/dts-v1/;
/include/ "bcm7435.dtsi"
+/include/ "bcm97xxx-nand-cs1-bch24.dtsi"
/ {
compatible = "brcm,bcm97435svmb", "brcm,bcm7435";
@@ -51,6 +52,14 @@
status = "okay";
};
+&pwma {
+ status = "okay";
+};
+
+&pwmb {
+ status = "okay";
+};
+
&enet0 {
status = "okay";
};
@@ -87,6 +96,10 @@
status = "okay";
};
+&nand {
+ status = "okay";
+};
+
&sata {
status = "okay";
};
@@ -94,3 +107,11 @@
&sata_phy {
status = "okay";
};
+
+&sdhci0 {
+ status = "okay";
+};
+
+&sdhci1 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch24.dtsi b/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch24.dtsi
new file mode 100644
index 000000000000..3c24f97de922
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch24.dtsi
@@ -0,0 +1,25 @@
+&nand {
+ nandcs@1 {
+ compatible = "brcm,nandcs";
+ reg = <1>;
+ nand-on-flash-bbt;
+
+ nand-ecc-strength = <24>;
+ nand-ecc-step-size = <1024>;
+ brcm,nand-oob-sector-size = <27>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ flash1.rootfs@0 {
+ reg = <0x0 0x10000000>;
+ };
+
+ flash1.kernel@10000000 {
+ reg = <0x10000000 0x400000>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch4.dtsi b/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch4.dtsi
new file mode 100644
index 000000000000..cb531816ef4c
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm97xxx-nand-cs1-bch4.dtsi
@@ -0,0 +1,25 @@
+&nand {
+ nandcs@1 {
+ compatible = "brcm,nandcs";
+ reg = <1>;
+ nand-on-flash-bbt;
+
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+ brcm,nand-oob-sector-size = <16>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ flash1.rootfs@0 {
+ reg = <0x0 0x10000000>;
+ };
+
+ flash1.kernel@10000000 {
+ reg = <0x10000000 0x400000>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
index b134798a0fd7..cfa29156eb69 100644
--- a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
@@ -8,55 +8,16 @@
* published by the Free Software Foundation.
*/
-/include/ "octeon_3xxx.dtsi"
+/include/ "dlink_dsr-500n-1000n.dtsi"
#include <dt-bindings/gpio/gpio.h>
/ {
model = "dlink,dsr-1000n";
soc@0 {
- smi0: mdio@1180000001800 {
- phy8: ethernet-phy@8 {
- reg = <8>;
- compatible = "ethernet-phy-ieee802.3-c22";
- };
- };
-
- pip: pip@11800a0000000 {
- interface@0 {
- ethernet@0 {
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
- ethernet@1 {
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
- ethernet@2 {
- phy-handle = <&phy8>;
- };
- };
- };
-
- twsi0: i2c@1180000001000 {
- rtc@68 {
- compatible = "dallas,ds1337";
- reg = <0x68>;
- };
- };
-
uart0: serial@1180000000800 {
clock-frequency = <500000000>;
};
-
- usbn: usbn@1180068000000 {
- refclk-frequency = <12000000>;
- refclk-type = "crystal";
- };
};
leds {
@@ -87,8 +48,4 @@
gpios = <&gpio 18 GPIO_ACTIVE_LOW>;
};
};
-
- aliases {
- pip = &pip;
- };
};
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n-1000n.dtsi b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n-1000n.dtsi
new file mode 100644
index 000000000000..246b598201f8
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n-1000n.dtsi
@@ -0,0 +1,58 @@
+/*
+ * Device tree source for D-Link DSR-500N/1000N (common parts).
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/include/ "octeon_3xxx.dtsi"
+
+/ {
+ soc@0 {
+ smi0: mdio@1180000001800 {
+ phy8: ethernet-phy@8 {
+ reg = <8>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+ };
+
+ pip: pip@11800a0000000 {
+ interface@0 {
+ ethernet@0 {
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ ethernet@1 {
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ ethernet@2 {
+ phy-handle = <&phy8>;
+ };
+ };
+ };
+
+ twsi0: i2c@1180000001000 {
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ };
+ };
+
+ usbn: usbn@1180068000000 {
+ refclk-frequency = <12000000>;
+ refclk-type = "crystal";
+ };
+ };
+
+ aliases {
+ pip = &pip;
+ };
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n.dts b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n.dts
new file mode 100644
index 000000000000..78886e172c48
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-500n.dts
@@ -0,0 +1,40 @@
+/*
+ * Device tree source for D-Link DSR-500N.
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/include/ "dlink_dsr-500n-1000n.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "dlink,dsr-500n";
+ compatible = "dlink,dsr-500n", "cavium,octeon-3860";
+
+ soc@0 {
+ uart0: serial@1180000000800 {
+ clock-frequency = <300000000>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ usb {
+ gpios = <&gpio 9 GPIO_ACTIVE_LOW>;
+ };
+
+ wps {
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ };
+
+ wireless {
+ label = "2.4g";
+ gpios = <&gpio 18 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/mti/Makefile b/arch/mips/boot/dts/mti/Makefile
index 144d776cc9f2..fcabd69b7030 100644
--- a/arch/mips/boot/dts/mti/Makefile
+++ b/arch/mips/boot/dts/mti/Makefile
@@ -1,5 +1,5 @@
dtb-$(CONFIG_MIPS_MALTA) += malta.dtb
-dtb-$(CONFIG_MIPS_SEAD3) += sead3.dtb
+dtb-$(CONFIG_LEGACY_BOARD_SEAD3) += sead3.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
diff --git a/arch/mips/boot/dts/mti/malta.dts b/arch/mips/boot/dts/mti/malta.dts
index b18c46637d21..f604a272d91d 100644
--- a/arch/mips/boot/dts/mti/malta.dts
+++ b/arch/mips/boot/dts/mti/malta.dts
@@ -1,5 +1,8 @@
/dts-v1/;
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+
/memreserve/ 0x00000000 0x00001000; /* YAMON exception vectors */
/memreserve/ 0x00001000 0x000ef000; /* YAMON */
/memreserve/ 0x000f0000 0x00010000; /* PIIX4 ISA memory */
@@ -8,4 +11,100 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "mti,malta";
+
+ cpu_intc: interrupt-controller {
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gic: interrupt-controller@1bdc0000 {
+ compatible = "mti,gic";
+ reg = <0x1bdc0000 0x20000>;
+
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ /*
+ * Declare the interrupt-parent even though the mti,gic
+ * binding doesn't require it, such that the kernel can
+ * figure out that cpu_intc is the root interrupt
+ * controller & should be probed first.
+ */
+ interrupt-parent = <&cpu_intc>;
+
+ timer {
+ compatible = "mti,gic-timer";
+ interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
+ };
+ };
+
+ i8259: interrupt-controller@20 {
+ compatible = "intel,i8259";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SHARED 3 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ flash@1e000000 {
+ compatible = "intel,dt28f160", "cfi-flash";
+ reg = <0x1e000000 0x400000>;
+ bank-width = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ yamon@0 {
+ label = "YAMON";
+ reg = <0x0 0x100000>;
+ read-only;
+ };
+
+ user-fs@100000 {
+ label = "User FS";
+ reg = <0x100000 0x2e0000>;
+ };
+
+ board-config@3e0000 {
+ label = "Board Config";
+ reg = <0x3e0000 0x20000>;
+ read-only;
+ };
+ };
+ };
+
+ fpga_regs: system-controller@1f000000 {
+ compatible = "mti,malta-fpga", "syscon", "simple-mfd";
+ reg = <0x1f000000 0x1000>;
+
+ reboot {
+ compatible = "syscon-reboot";
+ regmap = <&fpga_regs>;
+ offset = <0x500>;
+ mask = <0x4d>;
+ };
+ };
+
+ isa {
+ compatible = "isa";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <1 0 0 0x1000>;
+
+ rtc@70 {
+ compatible = "motorola,mc146818";
+ reg = <1 0x70 0x8>;
+
+ interrupt-parent = <&i8259>;
+ interrupts = <8>;
+ };
+ };
};
diff --git a/arch/mips/boot/dts/mti/sead3.dts b/arch/mips/boot/dts/mti/sead3.dts
index e4b317d414f1..b112879a5d9d 100644
--- a/arch/mips/boot/dts/mti/sead3.dts
+++ b/arch/mips/boot/dts/mti/sead3.dts
@@ -4,10 +4,23 @@
/memreserve/ 0x00001000 0x000ef000; // ROM data
/memreserve/ 0x000f0000 0x004cc000; // reserved
+#include <dt-bindings/interrupt-controller/mips-gic.h>
+
/ {
#address-cells = <1>;
#size-cells = <1>;
compatible = "mti,sead-3";
+ model = "MIPS SEAD-3";
+ interrupt-parent = <&gic>;
+
+ chosen {
+ stdout-path = "uart1:115200";
+ };
+
+ aliases {
+ uart0 = &uart0;
+ uart1 = &uart1;
+ };
cpus {
cpu@0 {
@@ -19,4 +32,229 @@
device_type = "memory";
reg = <0x0 0x08000000>;
};
+
+ cpu_intc: interrupt-controller {
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gic: interrupt-controller@1b1c0000 {
+ compatible = "mti,gic";
+ reg = <0x1b1c0000 0x20000>;
+
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ /*
+ * Declare the interrupt-parent even though the mti,gic
+ * binding doesn't require it, such that the kernel can
+ * figure out that cpu_intc is the root interrupt
+ * controller & should be probed first.
+ */
+ interrupt-parent = <&cpu_intc>;
+
+ timer {
+ compatible = "mti,gic-timer";
+ interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
+ };
+ };
+
+ ehci@1b200000 {
+ compatible = "generic-ehci";
+ reg = <0x1b200000 0x1000>;
+
+ interrupts = <0>; /* GIC 0 or CPU 6 */
+
+ has-transaction-translator;
+ };
+
+ flash@1c000000 {
+ compatible = "intel,28f128j3", "cfi-flash";
+ reg = <0x1c000000 0x2000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ bank-width = <4>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ user-fs@0 {
+ label = "User FS";
+ reg = <0x0 0x1fc0000>;
+ };
+
+ board-config@3e0000 {
+ label = "Board Config";
+ reg = <0x1fc0000 0x40000>;
+ };
+ };
+ };
+
+ fpga_regs: system-controller@1f000000 {
+ compatible = "mti,sead3-fpga", "syscon", "simple-mfd";
+ reg = <0x1f000000 0x200>;
+
+ reboot {
+ compatible = "syscon-reboot";
+ regmap = <&fpga_regs>;
+ offset = <0x50>;
+ mask = <0x4d>;
+ };
+
+ poweroff {
+ compatible = "restart-poweroff";
+ };
+ };
+
+ system-controller@1f000200 {
+ compatible = "mti,sead3-cpld", "syscon", "simple-mfd";
+ reg = <0x1f000200 0x300>;
+
+ led@10.0 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x1>;
+ label = "pled0";
+ };
+ led@10.1 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x2>;
+ label = "pled1";
+ };
+ led@10.2 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x4>;
+ label = "pled2";
+ };
+ led@10.3 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x8>;
+ label = "pled3";
+ };
+ led@10.4 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x10>;
+ label = "pled4";
+ };
+ led@10.5 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x20>;
+ label = "pled5";
+ };
+ led@10.6 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x40>;
+ label = "pled6";
+ };
+ led@10.7 {
+ compatible = "register-bit-led";
+ offset = <0x10>;
+ mask = <0x80>;
+ label = "pled7";
+ };
+
+ led@18.0 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x1>;
+ label = "fled0";
+ };
+ led@18.1 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x2>;
+ label = "fled1";
+ };
+ led@18.2 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x4>;
+ label = "fled2";
+ };
+ led@18.3 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x8>;
+ label = "fled3";
+ };
+ led@18.4 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x10>;
+ label = "fled4";
+ };
+ led@18.5 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x20>;
+ label = "fled5";
+ };
+ led@18.6 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x40>;
+ label = "fled6";
+ };
+ led@18.7 {
+ compatible = "register-bit-led";
+ offset = <0x18>;
+ mask = <0x80>;
+ label = "fled7";
+ };
+
+ lcd@200 {
+ compatible = "mti,sead3-lcd";
+ offset = <0x200>;
+ };
+ };
+
+ /* UART connected to FTDI & miniUSB socket */
+ uart0: uart@1f000900 {
+ compatible = "ns16550a";
+ reg = <0x1f000900 0x20>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+
+ clock-frequency = <14745600>;
+
+ interrupts = <3>; /* GIC 3 or CPU 4 */
+
+ no-loopback-test;
+ };
+
+ /* UART connected to RS232 socket */
+ uart1: uart@1f000800 {
+ compatible = "ns16550a";
+ reg = <0x1f000800 0x20>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+
+ clock-frequency = <14745600>;
+
+ interrupts = <2>; /* GIC 2 or CPU 4 */
+
+ no-loopback-test;
+ };
+
+ eth@1f010000 {
+ compatible = "smsc,lan9115";
+ reg = <0x1f010000 0x10000>;
+ reg-io-width = <4>;
+
+ interrupts = <0>; /* GIC 0 or CPU 6 */
+
+ phy-mode = "mii";
+ smsc,irq-push-pull;
+ smsc,save-mac-address;
+ };
};
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index ff49fc04500c..ab8362e04461 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -36,8 +36,6 @@
#include <asm/octeon/cvmx-config.h>
-#include <asm/octeon/cvmx-mdio.h>
-
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-util.h>
#include <asm/octeon/cvmx-helper-board.h>
@@ -46,17 +44,6 @@
#include <asm/octeon/cvmx-asxx-defs.h>
/**
- * cvmx_override_board_link_get(int ipd_port) is a function
- * pointer. It is meant to allow customization of the process of
- * talking to a PHY to determine link speed. It is called every
- * time a PHY must be polled for link status. Users should set
- * this pointer to a function before calling any cvmx-helper
- * operations.
- */
-cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port) =
- NULL;
-
-/**
* Return the MII PHY address associated with the given IPD
* port. A result of -1 means there isn't a MII capable PHY
* connected to this port. On chips supporting multiple MII
@@ -222,12 +209,6 @@ int cvmx_helper_board_get_mii_address(int ipd_port)
cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
- int phy_addr;
- int is_broadcom_phy = 0;
-
- /* Give the user a chance to override the processing of this function */
- if (cvmx_override_board_link_get)
- return cvmx_override_board_link_get(ipd_port);
/* Unless we fix it later, all links are defaulted to down */
result.u64 = 0;
@@ -263,8 +244,7 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
result.s.full_duplex = 1;
result.s.speed = 1000;
return result;
- } else /* The other port uses a broadcom PHY */
- is_broadcom_phy = 1;
+ }
break;
case CVMX_BOARD_TYPE_BBGW_REF:
/* Port 1 on these boards is always Gigabit */
@@ -282,108 +262,7 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
break;
}
- phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
- if (phy_addr != -1) {
- if (is_broadcom_phy) {
- /*
- * Below we are going to read SMI/MDIO
- * register 0x19 which works on Broadcom
- * parts
- */
- int phy_status =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- 0x19);
- switch ((phy_status >> 8) & 0x7) {
- case 0:
- result.u64 = 0;
- break;
- case 1:
- result.s.link_up = 1;
- result.s.full_duplex = 0;
- result.s.speed = 10;
- break;
- case 2:
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 10;
- break;
- case 3:
- result.s.link_up = 1;
- result.s.full_duplex = 0;
- result.s.speed = 100;
- break;
- case 4:
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 100;
- break;
- case 5:
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 100;
- break;
- case 6:
- result.s.link_up = 1;
- result.s.full_duplex = 0;
- result.s.speed = 1000;
- break;
- case 7:
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- break;
- }
- } else {
- /*
- * This code assumes we are using a Marvell
- * Gigabit PHY. All the speed information can
- * be read from register 17 in one
- * go. Somebody using a different PHY will
- * need to handle it above in the board
- * specific area.
- */
- int phy_status =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 17);
-
- /*
- * If the resolve bit 11 isn't set, see if
- * autoneg is turned off (bit 12, reg 0). The
- * resolve bit doesn't get set properly when
- * autoneg is off, so force it.
- */
- if ((phy_status & (1 << 11)) == 0) {
- int auto_status =
- cvmx_mdio_read(phy_addr >> 8,
- phy_addr & 0xff, 0);
- if ((auto_status & (1 << 12)) == 0)
- phy_status |= 1 << 11;
- }
-
- /*
- * Only return a link if the PHY has finished
- * auto negotiation and set the resolved bit
- * (bit 11)
- */
- if (phy_status & (1 << 11)) {
- result.s.link_up = 1;
- result.s.full_duplex = ((phy_status >> 13) & 1);
- switch ((phy_status >> 14) & 3) {
- case 0: /* 10 Mbps */
- result.s.speed = 10;
- break;
- case 1: /* 100 Mbps */
- result.s.speed = 100;
- break;
- case 2: /* 1 Gbps */
- result.s.speed = 1000;
- break;
- case 3: /* Illegal */
- result.u64 = 0;
- break;
- }
- }
- }
- } else if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
|| OCTEON_IS_MODEL(OCTEON_CN58XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX)) {
/*
@@ -433,176 +312,6 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
}
/**
- * This function as a board specific method of changing the PHY
- * speed, duplex, and auto-negotiation. This programs the PHY and
- * not Octeon. This can be used to force Octeon's links to
- * specific settings.
- *
- * @phy_addr: The address of the PHY to program
- * @enable_autoneg:
- * Non zero if you want to enable auto-negotiation.
- * @link_info: Link speed to program. If the speed is zero and auto-negotiation
- * is enabled, all possible negotiation speeds are advertised.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_board_link_set_phy(int phy_addr,
- cvmx_helper_board_set_phy_link_flags_types_t
- link_flags,
- cvmx_helper_link_info_t link_info)
-{
-
- /* Set the flow control settings based on link_flags */
- if ((link_flags & set_phy_link_flags_flow_control_mask) !=
- set_phy_link_flags_flow_control_dont_touch) {
- cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
- reg_autoneg_adver.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
- reg_autoneg_adver.s.asymmetric_pause =
- (link_flags & set_phy_link_flags_flow_control_mask) ==
- set_phy_link_flags_flow_control_enable;
- reg_autoneg_adver.s.pause =
- (link_flags & set_phy_link_flags_flow_control_mask) ==
- set_phy_link_flags_flow_control_enable;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
- reg_autoneg_adver.u16);
- }
-
- /* If speed isn't set and autoneg is on advertise all supported modes */
- if ((link_flags & set_phy_link_flags_autoneg)
- && (link_info.s.speed == 0)) {
- cvmx_mdio_phy_reg_control_t reg_control;
- cvmx_mdio_phy_reg_status_t reg_status;
- cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
- cvmx_mdio_phy_reg_extended_status_t reg_extended_status;
- cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
-
- reg_status.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_STATUS);
- reg_autoneg_adver.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
- reg_autoneg_adver.s.advert_100base_t4 =
- reg_status.s.capable_100base_t4;
- reg_autoneg_adver.s.advert_10base_tx_full =
- reg_status.s.capable_10_full;
- reg_autoneg_adver.s.advert_10base_tx_half =
- reg_status.s.capable_10_half;
- reg_autoneg_adver.s.advert_100base_tx_full =
- reg_status.s.capable_100base_x_full;
- reg_autoneg_adver.s.advert_100base_tx_half =
- reg_status.s.capable_100base_x_half;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
- reg_autoneg_adver.u16);
- if (reg_status.s.capable_extended_status) {
- reg_extended_status.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_EXTENDED_STATUS);
- reg_control_1000.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL_1000);
- reg_control_1000.s.advert_1000base_t_full =
- reg_extended_status.s.capable_1000base_t_full;
- reg_control_1000.s.advert_1000base_t_half =
- reg_extended_status.s.capable_1000base_t_half;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL_1000,
- reg_control_1000.u16);
- }
- reg_control.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL);
- reg_control.s.autoneg_enable = 1;
- reg_control.s.restart_autoneg = 1;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
- } else if ((link_flags & set_phy_link_flags_autoneg)) {
- cvmx_mdio_phy_reg_control_t reg_control;
- cvmx_mdio_phy_reg_status_t reg_status;
- cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
- cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
-
- reg_status.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_STATUS);
- reg_autoneg_adver.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
- reg_autoneg_adver.s.advert_100base_t4 = 0;
- reg_autoneg_adver.s.advert_10base_tx_full = 0;
- reg_autoneg_adver.s.advert_10base_tx_half = 0;
- reg_autoneg_adver.s.advert_100base_tx_full = 0;
- reg_autoneg_adver.s.advert_100base_tx_half = 0;
- if (reg_status.s.capable_extended_status) {
- reg_control_1000.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL_1000);
- reg_control_1000.s.advert_1000base_t_full = 0;
- reg_control_1000.s.advert_1000base_t_half = 0;
- }
- switch (link_info.s.speed) {
- case 10:
- reg_autoneg_adver.s.advert_10base_tx_full =
- link_info.s.full_duplex;
- reg_autoneg_adver.s.advert_10base_tx_half =
- !link_info.s.full_duplex;
- break;
- case 100:
- reg_autoneg_adver.s.advert_100base_tx_full =
- link_info.s.full_duplex;
- reg_autoneg_adver.s.advert_100base_tx_half =
- !link_info.s.full_duplex;
- break;
- case 1000:
- reg_control_1000.s.advert_1000base_t_full =
- link_info.s.full_duplex;
- reg_control_1000.s.advert_1000base_t_half =
- !link_info.s.full_duplex;
- break;
- }
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
- reg_autoneg_adver.u16);
- if (reg_status.s.capable_extended_status)
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL_1000,
- reg_control_1000.u16);
- reg_control.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL);
- reg_control.s.autoneg_enable = 1;
- reg_control.s.restart_autoneg = 1;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
- } else {
- cvmx_mdio_phy_reg_control_t reg_control;
- reg_control.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL);
- reg_control.s.autoneg_enable = 0;
- reg_control.s.restart_autoneg = 1;
- reg_control.s.duplex = link_info.s.full_duplex;
- if (link_info.s.speed == 1000) {
- reg_control.s.speed_msb = 1;
- reg_control.s.speed_lsb = 0;
- } else if (link_info.s.speed == 100) {
- reg_control.s.speed_msb = 0;
- reg_control.s.speed_lsb = 1;
- } else if (link_info.s.speed == 10) {
- reg_control.s.speed_msb = 0;
- reg_control.s.speed_lsb = 0;
- }
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
- }
- return 0;
-}
-
-/**
* This function is called by cvmx_helper_interface_probe() after it
* determines the number of ports Octeon can support on a specific
* interface. This function is the per board location to override
@@ -676,48 +385,6 @@ int __cvmx_helper_board_hardware_enable(int interface)
0xc);
}
} else if (cvmx_sysinfo_get()->board_type ==
- CVMX_BOARD_TYPE_CN3010_EVB_HS5) {
- /*
- * Broadcom PHYs require differnet ASX
- * clocks. Unfortunately many boards don't define a
- * new board Id and simply mangle the
- * CN3010_EVB_HS5
- */
- if (interface == 0) {
- /*
- * Some boards use a hacked up bootloader that
- * identifies them as CN3010_EVB_HS5
- * evaluation boards. This leads to all kinds
- * of configuration problems. Detect one
- * case, and print warning, while trying to do
- * the right thing.
- */
- int phy_addr = cvmx_helper_board_get_mii_address(0);
- if (phy_addr != -1) {
- int phy_identifier =
- cvmx_mdio_read(phy_addr >> 8,
- phy_addr & 0xff, 0x2);
- /* Is it a Broadcom PHY? */
- if (phy_identifier == 0x0143) {
- cvmx_dprintf("\n");
- cvmx_dprintf("ERROR:\n");
- cvmx_dprintf
- ("ERROR: Board type is CVMX_BOARD_TYPE_CN3010_EVB_HS5, but Broadcom PHY found.\n");
- cvmx_dprintf
- ("ERROR: The board type is mis-configured, and software malfunctions are likely.\n");
- cvmx_dprintf
- ("ERROR: All boards require a unique board type to identify them.\n");
- cvmx_dprintf("ERROR:\n");
- cvmx_dprintf("\n");
- cvmx_wait(1000000000);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX
- (0, interface), 5);
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX
- (0, interface), 5);
- }
- }
- }
- } else if (cvmx_sysinfo_get()->board_type ==
CVMX_BOARD_TYPE_UBNT_E100) {
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), 0);
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), 0x10);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
index f59c88ee9b31..671ab1db2727 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
@@ -33,8 +33,6 @@
#include <asm/octeon/cvmx-config.h>
-
-#include <asm/octeon/cvmx-mdio.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
@@ -243,8 +241,7 @@ int __cvmx_helper_rgmii_enable(int interface)
/* enable the ports now */
for (port = 0; port < num_ports; port++) {
union cvmx_gmxx_prtx_cfg gmx_cfg;
- cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port
- (interface, port));
+
gmx_cfg.u64 =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
gmx_cfg.s.en = 1;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
index 6f9609e63a65..54375340afe8 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
@@ -34,7 +34,6 @@
#include <asm/octeon/cvmx-config.h>
-#include <asm/octeon/cvmx-mdio.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
index a56ee590de1f..d347fe13b666 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -234,8 +234,6 @@ int __cvmx_helper_xaui_enable(int interface)
cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), pcsx_int_en_reg.u64);
- cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port(interface, 0));
-
/* (8) Enable packet reception */
xauiMiscCtl.s.gmxeno = 0;
cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index ff26d0217b87..6456af642471 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -841,7 +841,6 @@ int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
int retry_cnt;
int retry_loop_cnt;
int i;
- cvmx_helper_link_info_t link_info;
/* Save values for restore at end */
uint64_t prtx_cfg =
@@ -1002,15 +1001,6 @@ fix_ipd_exit:
(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
frame_max);
cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0);
- /* Set link to down so autonegotiation will set it up again */
- link_info.u64 = 0;
- cvmx_helper_link_set(FIX_IPD_OUTPORT, link_info);
-
- /*
- * Bring the link back up as autonegotiation is not done in
- * user applications.
- */
- cvmx_helper_link_autoconf(FIX_IPD_OUTPORT);
CVMX_SYNC;
if (num_segs)
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 5537f95b28c9..9a2db1c013d9 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -65,7 +65,8 @@ EXPORT_SYMBOL(octeon_should_swizzle_table);
extern void pci_console_init(const char *arg);
#endif
-static unsigned long long MAX_MEMORY = 512ull << 20;
+static unsigned long long max_memory = ULLONG_MAX;
+static unsigned long long reserve_low_mem;
DEFINE_SEMAPHORE(octeon_bootbus_sem);
EXPORT_SYMBOL(octeon_bootbus_sem);
@@ -75,7 +76,6 @@ struct octeon_boot_descriptor *octeon_boot_desc_ptr;
struct cvmx_bootinfo *octeon_bootinfo;
EXPORT_SYMBOL(octeon_bootinfo);
-static unsigned long long RESERVE_LOW_MEM = 0ull;
#ifdef CONFIG_KEXEC
#ifdef CONFIG_SMP
/*
@@ -125,18 +125,18 @@ static void kexec_bootmem_init(uint64_t mem_size, uint32_t low_reserved_bytes)
bootmem_desc->major_version = CVMX_BOOTMEM_DESC_MAJ_VER;
bootmem_desc->minor_version = CVMX_BOOTMEM_DESC_MIN_VER;
- addr = (OCTEON_DDR0_BASE + RESERVE_LOW_MEM + low_reserved_bytes);
+ addr = (OCTEON_DDR0_BASE + reserve_low_mem + low_reserved_bytes);
bootmem_desc->head_addr = 0;
if (mem_size <= OCTEON_DDR0_SIZE) {
__cvmx_bootmem_phy_free(addr,
- mem_size - RESERVE_LOW_MEM -
+ mem_size - reserve_low_mem -
low_reserved_bytes, 0);
return;
}
__cvmx_bootmem_phy_free(addr,
- OCTEON_DDR0_SIZE - RESERVE_LOW_MEM -
+ OCTEON_DDR0_SIZE - reserve_low_mem -
low_reserved_bytes, 0);
mem_size -= OCTEON_DDR0_SIZE;
@@ -857,15 +857,15 @@ void __init prom_init(void)
/* Default to 64MB in the simulator to speed things up */
if (octeon_is_simulation())
- MAX_MEMORY = 64ull << 20;
+ max_memory = 64ull << 20;
arg = strstr(arcs_cmdline, "mem=");
if (arg) {
- MAX_MEMORY = memparse(arg + 4, &p);
- if (MAX_MEMORY == 0)
- MAX_MEMORY = 32ull << 30;
+ max_memory = memparse(arg + 4, &p);
+ if (max_memory == 0)
+ max_memory = 32ull << 30;
if (*p == '@')
- RESERVE_LOW_MEM = memparse(p + 1, &p);
+ reserve_low_mem = memparse(p + 1, &p);
}
arcs_cmdline[0] = 0;
@@ -875,11 +875,11 @@ void __init prom_init(void)
cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
if ((strncmp(arg, "MEM=", 4) == 0) ||
(strncmp(arg, "mem=", 4) == 0)) {
- MAX_MEMORY = memparse(arg + 4, &p);
- if (MAX_MEMORY == 0)
- MAX_MEMORY = 32ull << 30;
+ max_memory = memparse(arg + 4, &p);
+ if (max_memory == 0)
+ max_memory = 32ull << 30;
if (*p == '@')
- RESERVE_LOW_MEM = memparse(p + 1, &p);
+ reserve_low_mem = memparse(p + 1, &p);
#ifdef CONFIG_KEXEC
} else if (strncmp(arg, "crashkernel=", 12) == 0) {
crashk_size = memparse(arg+12, &p);
@@ -971,13 +971,13 @@ void __init plat_mem_setup(void)
* to consistently work.
*/
mem_alloc_size = 4 << 20;
- if (mem_alloc_size > MAX_MEMORY)
- mem_alloc_size = MAX_MEMORY;
+ if (mem_alloc_size > max_memory)
+ mem_alloc_size = max_memory;
/* Crashkernel ignores bootmem list. It relies on mem=X@Y option */
#ifdef CONFIG_CRASH_DUMP
- add_memory_region(RESERVE_LOW_MEM, MAX_MEMORY, BOOT_MEM_RAM);
- total += MAX_MEMORY;
+ add_memory_region(reserve_low_mem, max_memory, BOOT_MEM_RAM);
+ total += max_memory;
#else
#ifdef CONFIG_KEXEC
if (crashk_size > 0) {
@@ -992,7 +992,7 @@ void __init plat_mem_setup(void)
*/
cvmx_bootmem_lock();
while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
- && (total < MAX_MEMORY)) {
+ && (total < max_memory)) {
memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
__pa_symbol(&_end), -1,
0x100000,
diff --git a/arch/mips/configs/generic/32r1.config b/arch/mips/configs/generic/32r1.config
new file mode 100644
index 000000000000..a11cd8715519
--- /dev/null
+++ b/arch/mips/configs/generic/32r1.config
@@ -0,0 +1,2 @@
+CONFIG_CPU_MIPS32_R1=y
+CONFIG_HIGHMEM=y
diff --git a/arch/mips/configs/generic/32r2.config b/arch/mips/configs/generic/32r2.config
new file mode 100644
index 000000000000..9570672d4f9f
--- /dev/null
+++ b/arch/mips/configs/generic/32r2.config
@@ -0,0 +1,3 @@
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_HIGHMEM=y
diff --git a/arch/mips/configs/generic/32r6.config b/arch/mips/configs/generic/32r6.config
new file mode 100644
index 000000000000..ca606e71f4d0
--- /dev/null
+++ b/arch/mips/configs/generic/32r6.config
@@ -0,0 +1,2 @@
+CONFIG_CPU_MIPS32_R6=y
+CONFIG_HIGHMEM=y
diff --git a/arch/mips/configs/generic/64r1.config b/arch/mips/configs/generic/64r1.config
new file mode 100644
index 000000000000..7c1ea7e7bae3
--- /dev/null
+++ b/arch/mips/configs/generic/64r1.config
@@ -0,0 +1,4 @@
+CONFIG_CPU_MIPS64_R1=y
+CONFIG_64BIT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
diff --git a/arch/mips/configs/generic/64r2.config b/arch/mips/configs/generic/64r2.config
new file mode 100644
index 000000000000..b4d31ae8bfec
--- /dev/null
+++ b/arch/mips/configs/generic/64r2.config
@@ -0,0 +1,5 @@
+CONFIG_CPU_MIPS64_R2=y
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_64BIT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
diff --git a/arch/mips/configs/generic/64r6.config b/arch/mips/configs/generic/64r6.config
new file mode 100644
index 000000000000..7cac0339c4d5
--- /dev/null
+++ b/arch/mips/configs/generic/64r6.config
@@ -0,0 +1,4 @@
+CONFIG_CPU_MIPS64_R6=y
+CONFIG_64BIT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
diff --git a/arch/mips/configs/generic/board-sead-3.config b/arch/mips/configs/generic/board-sead-3.config
new file mode 100644
index 000000000000..3b5e1ac579eb
--- /dev/null
+++ b/arch/mips/configs/generic/board-sead-3.config
@@ -0,0 +1,32 @@
+CONFIG_LEGACY_BOARD_SEAD3=y
+
+CONFIG_AUXDISPLAY=y
+CONFIG_IMG_ASCII_LCD=y
+
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_SYSCON=y
+
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_GLUEBI=y
+
+CONFIG_NETDEVICES=y
+CONFIG_SMSC911X=y
+CONFIG_SMSC_PHY=y
+
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
diff --git a/arch/mips/configs/generic/eb.config b/arch/mips/configs/generic/eb.config
new file mode 100644
index 000000000000..c5cdc99a6530
--- /dev/null
+++ b/arch/mips/configs/generic/eb.config
@@ -0,0 +1 @@
+CONFIG_CPU_BIG_ENDIAN=y
diff --git a/arch/mips/configs/generic/el.config b/arch/mips/configs/generic/el.config
new file mode 100644
index 000000000000..ee43fdb3b8f4
--- /dev/null
+++ b/arch/mips/configs/generic/el.config
@@ -0,0 +1 @@
+CONFIG_CPU_LITTLE_ENDIAN=y
diff --git a/arch/mips/configs/generic/micro32r2.config b/arch/mips/configs/generic/micro32r2.config
new file mode 100644
index 000000000000..b701fe7aaa68
--- /dev/null
+++ b/arch/mips/configs/generic/micro32r2.config
@@ -0,0 +1,4 @@
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MICROMIPS=y
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_HIGHMEM=y
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
new file mode 100644
index 000000000000..c95d94c7838b
--- /dev/null
+++ b/arch/mips/configs/generic_defconfig
@@ -0,0 +1,96 @@
+CONFIG_MIPS_GENERIC=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_MIPS_CPS=y
+CONFIG_CPU_HAS_MSA=y
+CONFIG_HIGHMEM=y
+CONFIG_NR_CPUS=2
+CONFIG_MIPS_O32_FP64_SUPPORT=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_USERFAULTFD=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_TRIM_UNUSED_KSYMS=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_NETFILTER=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_SCSI=y
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_MFD_SYSCON=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MIPS_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_FANOTIFY=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_REDUCED=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="earlycon"
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
diff --git a/arch/mips/configs/loongson1c_defconfig b/arch/mips/configs/loongson1c_defconfig
new file mode 100644
index 000000000000..2304d4165773
--- /dev/null
+++ b/arch/mips/configs/loongson1c_defconfig
@@ -0,0 +1,126 @@
+CONFIG_MACH_LOONGSON32=y
+CONFIG_LOONGSON1_LS1C=y
+CONFIG_PREEMPT=y
+# CONFIG_SECCOMP is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_XZ=y
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_NAMESPACES=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+CONFIG_PERF_EVENTS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_LOONGSON1=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_SCSI=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=m
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_LEGACY_PTY_COUNT=8
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_LOONGSON1=y
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_HID_GENERIC=m
+CONFIG_USB_HID=m
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_LOONGSON1=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_UBIFS_ATIME_SUPPORT=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+# CONFIG_EARLY_PRINTK is not set
+# CONFIG_CRYPTO_ECHAINIV is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 5afb4840aec7..58d43f3c348d 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -230,7 +230,7 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_FD=m
@@ -318,6 +318,8 @@ CONFIG_LIBERTAS=m
# CONFIG_SERIO_I8042 is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index 98f13879bb8f..c8f7e2835840 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -235,7 +235,7 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_FD=m
@@ -331,6 +331,8 @@ CONFIG_LIBERTAS=m
# CONFIG_SERIO_I8042 is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index 3b5d5913f548..d2f54e55356c 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -234,7 +234,7 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_FD=m
@@ -331,6 +331,8 @@ CONFIG_LIBERTAS=m
# CONFIG_SERIO_I8042 is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
index 65f140e1e872..cbf37dd0c490 100644
--- a/arch/mips/configs/malta_qemu_32r6_defconfig
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -132,6 +132,8 @@ CONFIG_LEGACY_PTY_COUNT=4
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index 799c4338fd5e..35f6ba260df8 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -132,6 +132,8 @@ CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index ac0eb4daf101..900f14543eeb 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -134,6 +134,8 @@ CONFIG_LEGACY_PTY_COUNT=4
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index 31846000530f..8e2738b5e180 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -137,6 +137,8 @@ CONFIG_LEGACY_PTY_COUNT=4
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index a79107da0675..6dc4e309a691 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -131,6 +131,8 @@ CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 732215732751..3d0d9cb9673f 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -231,7 +231,7 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_FD=m
@@ -326,6 +326,8 @@ CONFIG_LIBERTAS=m
# CONFIG_SERIO_I8042 is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_CIRRUS=y
diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
index 8b7429127a1d..7d32fbbca962 100644
--- a/arch/mips/configs/pistachio_defconfig
+++ b/arch/mips/configs/pistachio_defconfig
@@ -29,7 +29,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_CC_STACKPROTECTOR_STRONG=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
@@ -264,7 +263,6 @@ CONFIG_DMADEVICES=y
CONFIG_IMG_MDC_DMA=y
CONFIG_STAGING=y
CONFIG_ASHMEM=y
-# CONFIG_ANDROID_TIMED_OUTPUT is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_MEMORY=y
CONFIG_IIO=y
diff --git a/arch/mips/configs/sead3_defconfig b/arch/mips/configs/sead3_defconfig
deleted file mode 100644
index dae9354b6256..000000000000
--- a/arch/mips/configs/sead3_defconfig
+++ /dev/null
@@ -1,121 +0,0 @@
-CONFIG_MIPS_SEAD3=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_HZ_100=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=15
-CONFIG_EMBEDDED=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
-CONFIG_MODULES=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_MTD=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_GLUEBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_SCSI=y
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_NETDEVICES=y
-CONFIG_SMSC911X=y
-# CONFIG_NET_VENDOR_WIZNET is not set
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_QSEMI_PHY=y
-CONFIG_LXT_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_SMSC_PHY=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_ICPLUS_PHY=y
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_CONSOLE_TRANSLATIONS is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_LEGACY_PTY_COUNT=32
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-# CONFIG_I2C_COMPAT is not set
-CONFIG_I2C_CHARDEV=y
-# CONFIG_I2C_HELPER_AUTO is not set
-CONFIG_SPI=y
-CONFIG_SENSORS_ADT7475=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_DEBUG=y
-CONFIG_MMC_SPI=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_M41T80=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_XFS_FS=y
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_QUOTA=y
-# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_UTF8=y
-# CONFIG_FTRACE is not set
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_ARC4=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/sead3micro_defconfig b/arch/mips/configs/sead3micro_defconfig
deleted file mode 100644
index cd91a775c74e..000000000000
--- a/arch/mips/configs/sead3micro_defconfig
+++ /dev/null
@@ -1,122 +0,0 @@
-CONFIG_MIPS_SEAD3=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_CPU_MICROMIPS=y
-CONFIG_HZ_100=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=15
-CONFIG_EMBEDDED=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
-CONFIG_MODULES=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_MTD=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_GLUEBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_SCSI=y
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_NETDEVICES=y
-CONFIG_SMSC911X=y
-# CONFIG_NET_VENDOR_WIZNET is not set
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_QSEMI_PHY=y
-CONFIG_LXT_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_SMSC_PHY=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_ICPLUS_PHY=y
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_CONSOLE_TRANSLATIONS is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_LEGACY_PTY_COUNT=32
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-# CONFIG_I2C_COMPAT is not set
-CONFIG_I2C_CHARDEV=y
-# CONFIG_I2C_HELPER_AUTO is not set
-CONFIG_SPI=y
-CONFIG_SENSORS_ADT7475=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_DEBUG=y
-CONFIG_MMC_SPI=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_M41T80=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_XFS_FS=y
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_QUOTA=y
-# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_15=y
-CONFIG_NLS_UTF8=y
-# CONFIG_FTRACE is not set
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_ARC4=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig
new file mode 100644
index 000000000000..a606b3f9196c
--- /dev/null
+++ b/arch/mips/generic/Kconfig
@@ -0,0 +1,19 @@
+if MIPS_GENERIC
+
+config LEGACY_BOARDS
+ bool
+ help
+ Select this from your board if the board must use a legacy, non-UHI,
+ boot protocol. This will cause the kernel to scan through the list of
+ supported machines calling their detect functions in turn if the
+ kernel is booted without being provided with an FDT via the UHI
+ boot protocol.
+
+config LEGACY_BOARD_SEAD3
+ bool "Support MIPS SEAD-3 boards"
+ select LEGACY_BOARDS
+ help
+ Enable this to include support for booting on MIPS SEAD-3 FPGA-based
+ development boards, which boot using a legacy boot protocol.
+
+endif
diff --git a/arch/mips/generic/Makefile b/arch/mips/generic/Makefile
new file mode 100644
index 000000000000..7c66494151db
--- /dev/null
+++ b/arch/mips/generic/Makefile
@@ -0,0 +1,15 @@
+#
+# Copyright (C) 2016 Imagination Technologies
+# Author: Paul Burton <paul.burton@imgtec.com>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+
+obj-y += init.o
+obj-y += irq.o
+obj-y += proc.o
+
+obj-$(CONFIG_LEGACY_BOARD_SEAD3) += board-sead3.o
diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform
new file mode 100644
index 000000000000..9a30d69e2281
--- /dev/null
+++ b/arch/mips/generic/Platform
@@ -0,0 +1,14 @@
+#
+# Copyright (C) 2016 Imagination Technologies
+# Author: Paul Burton <paul.burton@imgtec.com>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+
+platform-$(CONFIG_MIPS_GENERIC) += generic/
+cflags-$(CONFIG_MIPS_GENERIC) += -I$(srctree)/arch/mips/include/asm/mach-generic
+load-$(CONFIG_MIPS_GENERIC) += 0xffffffff80100000
+all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb
diff --git a/arch/mips/generic/board-sead3.c b/arch/mips/generic/board-sead3.c
new file mode 100644
index 000000000000..f4ae0584a33b
--- /dev/null
+++ b/arch/mips/generic/board-sead3.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#define pr_fmt(fmt) "sead3: " fmt
+
+#include <linux/errno.h>
+#include <linux/libfdt.h>
+#include <linux/printk.h>
+
+#include <asm/fw/fw.h>
+#include <asm/io.h>
+#include <asm/machine.h>
+
+#define SEAD_CONFIG CKSEG1ADDR(0x1b100110)
+#define SEAD_CONFIG_GIC_PRESENT BIT(1)
+
+#define MIPS_REVISION CKSEG1ADDR(0x1fc00010)
+#define MIPS_REVISION_MACHINE (0xf << 4)
+#define MIPS_REVISION_MACHINE_SEAD3 (0x4 << 4)
+
+static __init bool sead3_detect(void)
+{
+ uint32_t rev;
+
+ rev = __raw_readl((void *)MIPS_REVISION);
+ return (rev & MIPS_REVISION_MACHINE) == MIPS_REVISION_MACHINE_SEAD3;
+}
+
+static __init int append_cmdline(void *fdt)
+{
+ int err, chosen_off;
+
+ /* find or add chosen node */
+ chosen_off = fdt_path_offset(fdt, "/chosen");
+ if (chosen_off == -FDT_ERR_NOTFOUND)
+ chosen_off = fdt_path_offset(fdt, "/chosen@0");
+ if (chosen_off == -FDT_ERR_NOTFOUND)
+ chosen_off = fdt_add_subnode(fdt, 0, "chosen");
+ if (chosen_off < 0) {
+ pr_err("Unable to find or add DT chosen node: %d\n",
+ chosen_off);
+ return chosen_off;
+ }
+
+ err = fdt_setprop_string(fdt, chosen_off, "bootargs", fw_getcmdline());
+ if (err) {
+ pr_err("Unable to set bootargs property: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static __init int append_memory(void *fdt)
+{
+ unsigned long phys_memsize, memsize;
+ __be32 mem_array[2];
+ int err, mem_off;
+ char *var;
+
+ /* find memory size from the bootloader environment */
+ var = fw_getenv("memsize");
+ if (var) {
+ err = kstrtoul(var, 0, &phys_memsize);
+ if (err) {
+ pr_err("Failed to read memsize env variable '%s'\n",
+ var);
+ return -EINVAL;
+ }
+ } else {
+ pr_warn("The bootloader didn't provide memsize: defaulting to 32MB\n");
+ phys_memsize = 32 << 20;
+ }
+
+ /* default to using all available RAM */
+ memsize = phys_memsize;
+
+ /* allow the user to override the usable memory */
+ var = strstr(arcs_cmdline, "memsize=");
+ if (var)
+ memsize = memparse(var + strlen("memsize="), NULL);
+
+ /* if the user says there's more RAM than we thought, believe them */
+ phys_memsize = max_t(unsigned long, phys_memsize, memsize);
+
+ /* find or add a memory node */
+ mem_off = fdt_path_offset(fdt, "/memory");
+ if (mem_off == -FDT_ERR_NOTFOUND)
+ mem_off = fdt_add_subnode(fdt, 0, "memory");
+ if (mem_off < 0) {
+ pr_err("Unable to find or add memory DT node: %d\n", mem_off);
+ return mem_off;
+ }
+
+ err = fdt_setprop_string(fdt, mem_off, "device_type", "memory");
+ if (err) {
+ pr_err("Unable to set memory node device_type: %d\n", err);
+ return err;
+ }
+
+ mem_array[0] = 0;
+ mem_array[1] = cpu_to_be32(phys_memsize);
+ err = fdt_setprop(fdt, mem_off, "reg", mem_array, sizeof(mem_array));
+ if (err) {
+ pr_err("Unable to set memory regs property: %d\n", err);
+ return err;
+ }
+
+ mem_array[0] = 0;
+ mem_array[1] = cpu_to_be32(memsize);
+ err = fdt_setprop(fdt, mem_off, "linux,usable-memory",
+ mem_array, sizeof(mem_array));
+ if (err) {
+ pr_err("Unable to set linux,usable-memory property: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static __init int remove_gic(void *fdt)
+{
+ const unsigned int cpu_ehci_int = 2;
+ const unsigned int cpu_uart_int = 4;
+ const unsigned int cpu_eth_int = 6;
+ int gic_off, cpu_off, uart_off, eth_off, ehci_off, err;
+ uint32_t cfg, cpu_phandle;
+
+ /* leave the GIC node intact if a GIC is present */
+ cfg = __raw_readl((uint32_t *)SEAD_CONFIG);
+ if (cfg & SEAD_CONFIG_GIC_PRESENT)
+ return 0;
+
+ gic_off = fdt_node_offset_by_compatible(fdt, -1, "mti,gic");
+ if (gic_off < 0) {
+ pr_err("unable to find DT GIC node: %d\n", gic_off);
+ return gic_off;
+ }
+
+ err = fdt_nop_node(fdt, gic_off);
+ if (err) {
+ pr_err("unable to nop GIC node\n");
+ return err;
+ }
+
+ cpu_off = fdt_node_offset_by_compatible(fdt, -1,
+ "mti,cpu-interrupt-controller");
+ if (cpu_off < 0) {
+ pr_err("unable to find CPU intc node: %d\n", cpu_off);
+ return cpu_off;
+ }
+
+ cpu_phandle = fdt_get_phandle(fdt, cpu_off);
+ if (!cpu_phandle) {
+ pr_err("unable to get CPU intc phandle\n");
+ return -EINVAL;
+ }
+
+ err = fdt_setprop_u32(fdt, 0, "interrupt-parent", cpu_phandle);
+ if (err) {
+ pr_err("unable to set root interrupt-parent: %d\n", err);
+ return err;
+ }
+
+ uart_off = fdt_node_offset_by_compatible(fdt, -1, "ns16550a");
+ while (uart_off >= 0) {
+ err = fdt_setprop_u32(fdt, uart_off, "interrupts",
+ cpu_uart_int);
+ if (err) {
+ pr_err("unable to set UART interrupts property: %d\n",
+ err);
+ return err;
+ }
+
+ uart_off = fdt_node_offset_by_compatible(fdt, uart_off,
+ "ns16550a");
+ }
+ if (uart_off != -FDT_ERR_NOTFOUND) {
+ pr_err("error searching for UART DT node: %d\n", uart_off);
+ return uart_off;
+ }
+
+ eth_off = fdt_node_offset_by_compatible(fdt, -1, "smsc,lan9115");
+ if (eth_off < 0) {
+ pr_err("unable to find ethernet DT node: %d\n", eth_off);
+ return eth_off;
+ }
+
+ err = fdt_setprop_u32(fdt, eth_off, "interrupts", cpu_eth_int);
+ if (err) {
+ pr_err("unable to set ethernet interrupts property: %d\n", err);
+ return err;
+ }
+
+ ehci_off = fdt_node_offset_by_compatible(fdt, -1, "generic-ehci");
+ if (ehci_off < 0) {
+ pr_err("unable to find EHCI DT node: %d\n", ehci_off);
+ return ehci_off;
+ }
+
+ err = fdt_setprop_u32(fdt, ehci_off, "interrupts", cpu_ehci_int);
+ if (err) {
+ pr_err("unable to set EHCI interrupts property: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static __init int serial_config(void *fdt)
+{
+ const char *yamontty, *mode_var;
+ char mode_var_name[9], path[18], parity;
+ unsigned int uart, baud, stop_bits;
+ bool hw_flow;
+ int chosen_off, err;
+
+ yamontty = fw_getenv("yamontty");
+ if (!yamontty || !strcmp(yamontty, "tty0")) {
+ uart = 0;
+ } else if (!strcmp(yamontty, "tty1")) {
+ uart = 1;
+ } else {
+ pr_warn("yamontty environment variable '%s' invalid\n",
+ yamontty);
+ uart = 0;
+ }
+
+ baud = stop_bits = 0;
+ parity = 0;
+ hw_flow = false;
+
+ snprintf(mode_var_name, sizeof(mode_var_name), "modetty%u", uart);
+ mode_var = fw_getenv(mode_var_name);
+ if (mode_var) {
+ while (mode_var[0] >= '0' && mode_var[0] <= '9') {
+ baud *= 10;
+ baud += mode_var[0] - '0';
+ mode_var++;
+ }
+ if (mode_var[0] == ',')
+ mode_var++;
+ if (mode_var[0])
+ parity = mode_var[0];
+ if (mode_var[0] == ',')
+ mode_var++;
+ if (mode_var[0])
+ stop_bits = mode_var[0] - '0';
+ if (mode_var[0] == ',')
+ mode_var++;
+ if (!strcmp(mode_var, "hw"))
+ hw_flow = true;
+ }
+
+ if (!baud)
+ baud = 38400;
+
+ if (parity != 'e' && parity != 'n' && parity != 'o')
+ parity = 'n';
+
+ if (stop_bits != 7 && stop_bits != 8)
+ stop_bits = 8;
+
+ WARN_ON(snprintf(path, sizeof(path), "uart%u:%u%c%u%s",
+ uart, baud, parity, stop_bits,
+ hw_flow ? "r" : "") >= sizeof(path));
+
+ /* find or add chosen node */
+ chosen_off = fdt_path_offset(fdt, "/chosen");
+ if (chosen_off == -FDT_ERR_NOTFOUND)
+ chosen_off = fdt_path_offset(fdt, "/chosen@0");
+ if (chosen_off == -FDT_ERR_NOTFOUND)
+ chosen_off = fdt_add_subnode(fdt, 0, "chosen");
+ if (chosen_off < 0) {
+ pr_err("Unable to find or add DT chosen node: %d\n",
+ chosen_off);
+ return chosen_off;
+ }
+
+ err = fdt_setprop_string(fdt, chosen_off, "stdout-path", path);
+ if (err) {
+ pr_err("Unable to set stdout-path property: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static __init const void *sead3_fixup_fdt(const void *fdt,
+ const void *match_data)
+{
+ static unsigned char fdt_buf[16 << 10] __initdata;
+ int err;
+
+ if (fdt_check_header(fdt))
+ panic("Corrupt DT");
+
+ /* if this isn't SEAD3, something went wrong */
+ BUG_ON(fdt_node_check_compatible(fdt, 0, "mti,sead-3"));
+
+ fw_init_cmdline();
+
+ err = fdt_open_into(fdt, fdt_buf, sizeof(fdt_buf));
+ if (err)
+ panic("Unable to open FDT: %d", err);
+
+ err = append_cmdline(fdt_buf);
+ if (err)
+ panic("Unable to patch FDT: %d", err);
+
+ err = append_memory(fdt_buf);
+ if (err)
+ panic("Unable to patch FDT: %d", err);
+
+ err = remove_gic(fdt_buf);
+ if (err)
+ panic("Unable to patch FDT: %d", err);
+
+ err = serial_config(fdt_buf);
+ if (err)
+ panic("Unable to patch FDT: %d", err);
+
+ err = fdt_pack(fdt_buf);
+ if (err)
+ panic("Unable to pack FDT: %d\n", err);
+
+ return fdt_buf;
+}
+
+static __init unsigned int sead3_measure_hpt_freq(void)
+{
+ void __iomem *status_reg = (void __iomem *)0xbf000410;
+ unsigned int freq, orig, tick = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ orig = readl(status_reg) & 0x2; /* get original sample */
+ /* wait for transition */
+ while ((readl(status_reg) & 0x2) == orig)
+ ;
+ orig = orig ^ 0x2; /* flip the bit */
+
+ write_c0_count(0);
+
+ /* wait 1 second (the sampling clock transitions every 10ms) */
+ while (tick < 100) {
+ /* wait for transition */
+ while ((readl(status_reg) & 0x2) == orig)
+ ;
+ orig = orig ^ 0x2; /* flip the bit */
+ tick++;
+ }
+
+ freq = read_c0_count();
+
+ local_irq_restore(flags);
+
+ return freq;
+}
+
+extern char __dtb_sead3_begin[];
+
+MIPS_MACHINE(sead3) = {
+ .fdt = __dtb_sead3_begin,
+ .detect = sead3_detect,
+ .fixup_fdt = sead3_fixup_fdt,
+ .measure_hpt_freq = sead3_measure_hpt_freq,
+};
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
new file mode 100644
index 000000000000..0ea73e845440
--- /dev/null
+++ b/arch/mips/generic/init.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/irqchip.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+
+#include <asm/fw/fw.h>
+#include <asm/irq_cpu.h>
+#include <asm/machine.h>
+#include <asm/mips-cpc.h>
+#include <asm/prom.h>
+#include <asm/smp-ops.h>
+#include <asm/time.h>
+
+static __initdata const void *fdt;
+static __initdata const struct mips_machine *mach;
+static __initdata const void *mach_match_data;
+
+void __init prom_init(void)
+{
+ const struct mips_machine *check_mach;
+ const struct of_device_id *match;
+
+ if ((fw_arg0 == -2) && !fdt_check_header((void *)fw_arg1)) {
+ /*
+ * We booted using the UHI boot protocol, so we have been
+ * provided with the appropriate device tree for the board.
+ * Make use of it & search for any machine struct based upon
+ * the root compatible string.
+ */
+ fdt = (void *)fw_arg1;
+
+ for_each_mips_machine(check_mach) {
+ match = mips_machine_is_compatible(check_mach, fdt);
+ if (match) {
+ mach = check_mach;
+ mach_match_data = match->data;
+ break;
+ }
+ }
+ } else if (IS_ENABLED(CONFIG_LEGACY_BOARDS)) {
+ /*
+ * We weren't booted using the UHI boot protocol, but do
+ * support some number of boards with legacy boot protocols.
+ * Attempt to find the right one.
+ */
+ for_each_mips_machine(check_mach) {
+ if (!check_mach->detect)
+ continue;
+
+ if (!check_mach->detect())
+ continue;
+
+ mach = check_mach;
+ }
+
+ /*
+ * If we don't recognise the machine then we can't continue, so
+ * die here.
+ */
+ BUG_ON(!mach);
+
+ /* Retrieve the machine's FDT */
+ fdt = mach->fdt;
+ }
+
+ BUG_ON(!fdt);
+}
+
+void __init *plat_get_fdt(void)
+{
+ return (void *)fdt;
+}
+
+void __init plat_mem_setup(void)
+{
+ if (mach && mach->fixup_fdt)
+ fdt = mach->fixup_fdt(fdt, mach_match_data);
+
+ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ __dt_setup_arch((void *)fdt);
+}
+
+void __init device_tree_init(void)
+{
+ int err;
+
+ unflatten_and_copy_device_tree();
+ mips_cpc_probe();
+
+ err = register_cps_smp_ops();
+ if (err)
+ err = register_up_smp_ops();
+}
+
+void __init plat_time_init(void)
+{
+ struct device_node *np;
+ struct clk *clk;
+
+ of_clk_init(NULL);
+
+ if (!cpu_has_counter) {
+ mips_hpt_frequency = 0;
+ } else if (mach && mach->measure_hpt_freq) {
+ mips_hpt_frequency = mach->measure_hpt_freq();
+ } else {
+ np = of_get_cpu_node(0, NULL);
+ if (!np) {
+ pr_err("Failed to get CPU node\n");
+ return;
+ }
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
+ return;
+ }
+
+ mips_hpt_frequency = clk_get_rate(clk);
+ clk_put(clk);
+
+ switch (boot_cpu_type()) {
+ case CPU_20KC:
+ case CPU_25KF:
+ /* The counter runs at the CPU clock rate */
+ break;
+ default:
+ /* The counter runs at half the CPU clock rate */
+ mips_hpt_frequency /= 2;
+ break;
+ }
+ }
+
+ clocksource_probe();
+}
+
+void __init arch_init_irq(void)
+{
+ struct device_node *intc_node;
+
+ intc_node = of_find_compatible_node(NULL, NULL,
+ "mti,cpu-interrupt-controller");
+ if (!cpu_has_veic && !intc_node)
+ mips_cpu_irq_init();
+
+ irqchip_init();
+}
+
+static int __init publish_devices(void)
+{
+ if (!of_have_populated_dt())
+ panic("Device-tree not present");
+
+ if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
+ panic("Failed to populate DT");
+
+ return 0;
+}
+arch_initcall(publish_devices);
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/generic/irq.c b/arch/mips/generic/irq.c
new file mode 100644
index 000000000000..14064bdd91dd
--- /dev/null
+++ b/arch/mips/generic/irq.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/irqchip/mips-gic.h>
+#include <linux/types.h>
+
+#include <asm/irq.h>
+
+int get_c0_fdc_int(void)
+{
+ int mips_cpu_fdc_irq;
+
+ if (cpu_has_veic)
+ panic("Unimplemented!");
+ else if (gic_present)
+ mips_cpu_fdc_irq = gic_get_c0_fdc_int();
+ else if (cp0_fdc_irq >= 0)
+ mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
+ else
+ mips_cpu_fdc_irq = -1;
+
+ return mips_cpu_fdc_irq;
+}
+
+int get_c0_perfcount_int(void)
+{
+ int mips_cpu_perf_irq;
+
+ if (cpu_has_veic)
+ panic("Unimplemented!");
+ else if (gic_present)
+ mips_cpu_perf_irq = gic_get_c0_perfcount_int();
+ else if (cp0_perfcount_irq >= 0)
+ mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ else
+ mips_cpu_perf_irq = -1;
+
+ return mips_cpu_perf_irq;
+}
+
+unsigned int get_c0_compare_int(void)
+{
+ int mips_cpu_timer_irq;
+
+ if (cpu_has_veic)
+ panic("Unimplemented!");
+ else if (gic_present)
+ mips_cpu_timer_irq = gic_get_c0_compare_int();
+ else
+ mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+
+ return mips_cpu_timer_irq;
+}
diff --git a/arch/mips/generic/proc.c b/arch/mips/generic/proc.c
new file mode 100644
index 000000000000..42b33250a4a2
--- /dev/null
+++ b/arch/mips/generic/proc.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/of.h>
+
+#include <asm/bootinfo.h>
+
+const char *get_system_type(void)
+{
+ const char *str;
+ int err;
+
+ err = of_property_read_string(of_root, "model", &str);
+ if (!err)
+ return str;
+
+ err = of_property_read_string_index(of_root, "compatible", 0, &str);
+ if (!err)
+ return str;
+
+ return "Unknown";
+}
diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
new file mode 100644
index 000000000000..f67fbf1c8541
--- /dev/null
+++ b/arch/mips/generic/vmlinux.its.S
@@ -0,0 +1,31 @@
+/dts-v1/;
+
+/ {
+ description = KERNEL_NAME;
+ #address-cells = <ADDR_CELLS>;
+
+ images {
+ kernel@0 {
+ description = KERNEL_NAME;
+ data = /incbin/(VMLINUX_BINARY);
+ type = "kernel";
+ arch = "mips";
+ os = "linux";
+ compression = VMLINUX_COMPRESSION;
+ load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
+ entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
+ hash@0 {
+ algo = "sha1";
+ };
+ };
+ };
+
+ configurations {
+ default = "conf@default";
+
+ conf@default {
+ description = "Generic Linux kernel";
+ kernel = "kernel@0";
+ };
+ };
+};
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
index c5b04e752e97..4856adc8906e 100644
--- a/arch/mips/include/asm/addrspace.h
+++ b/arch/mips/include/asm/addrspace.h
@@ -126,8 +126,7 @@
#define PHYS_TO_XKSEG_UNCACHED(p) PHYS_TO_XKPHYS(K_CALG_UNCACHED, (p))
#define PHYS_TO_XKSEG_CACHED(p) PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE, (p))
#define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK)
-#define PHYS_TO_XKPHYS(cm, a) (_CONST64_(0x8000000000000000) | \
- (_CONST64_(cm) << 59) | (a))
+#define PHYS_TO_XKPHYS(cm, a) (XKPHYS | (_ACAST64_(cm) << 59) | (a))
/*
* The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index d296633d890e..a5eb1bb199a7 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -10,6 +10,102 @@
#include <asm/addrspace.h>
+/*
+ * Sync types defined by the MIPS architecture (document MD00087 table 6.5)
+ * These values are used with the sync instruction to perform memory barriers.
+ * Types of ordering guarantees available through the SYNC instruction:
+ * - Completion Barriers
+ * - Ordering Barriers
+ * As compared to the completion barrier, the ordering barrier is a
+ * lighter-weight operation as it does not require the specified instructions
+ * before the SYNC to be already completed. Instead it only requires that those
+ * specified instructions which are subsequent to the SYNC in the instruction
+ * stream are never re-ordered for processing ahead of the specified
+ * instructions which are before the SYNC in the instruction stream.
+ * This potentially reduces how many cycles the barrier instruction must stall
+ * before it completes.
+ * Implementations that do not use any of the non-zero values of stype to define
+ * different barriers, such as ordering barriers, must make those stype values
+ * act the same as stype zero.
+ */
+
+/*
+ * Completion barriers:
+ * - Every synchronizable specified memory instruction (loads or stores or both)
+ * that occurs in the instruction stream before the SYNC instruction must be
+ * already globally performed before any synchronizable specified memory
+ * instructions that occur after the SYNC are allowed to be performed, with
+ * respect to any other processor or coherent I/O module.
+ *
+ * - The barrier does not guarantee the order in which instruction fetches are
+ * performed.
+ *
+ * - A stype value of zero will always be defined such that it performs the most
+ * complete set of synchronization operations that are defined.This means
+ * stype zero always does a completion barrier that affects both loads and
+ * stores preceding the SYNC instruction and both loads and stores that are
+ * subsequent to the SYNC instruction. Non-zero values of stype may be defined
+ * by the architecture or specific implementations to perform synchronization
+ * behaviors that are less complete than that of stype zero. If an
+ * implementation does not use one of these non-zero values to define a
+ * different synchronization behavior, then that non-zero value of stype must
+ * act the same as stype zero completion barrier. This allows software written
+ * for an implementation with a lighter-weight barrier to work on another
+ * implementation which only implements the stype zero completion barrier.
+ *
+ * - A completion barrier is required, potentially in conjunction with SSNOP (in
+ * Release 1 of the Architecture) or EHB (in Release 2 of the Architecture),
+ * to guarantee that memory reference results are visible across operating
+ * mode changes. For example, a completion barrier is required on some
+ * implementations on entry to and exit from Debug Mode to guarantee that
+ * memory effects are handled correctly.
+ */
+
+/*
+ * stype 0 - A completion barrier that affects preceding loads and stores and
+ * subsequent loads and stores.
+ * Older instructions which must reach the load/store ordering point before the
+ * SYNC instruction completes: Loads, Stores
+ * Younger instructions which must reach the load/store ordering point only
+ * after the SYNC instruction completes: Loads, Stores
+ * Older instructions which must be globally performed when the SYNC instruction
+ * completes: Loads, Stores
+ */
+#define STYPE_SYNC 0x0
+
+/*
+ * Ordering barriers:
+ * - Every synchronizable specified memory instruction (loads or stores or both)
+ * that occurs in the instruction stream before the SYNC instruction must
+ * reach a stage in the load/store datapath after which no instruction
+ * re-ordering is possible before any synchronizable specified memory
+ * instruction which occurs after the SYNC instruction in the instruction
+ * stream reaches the same stage in the load/store datapath.
+ *
+ * - If any memory instruction before the SYNC instruction in program order,
+ * generates a memory request to the external memory and any memory
+ * instruction after the SYNC instruction in program order also generates a
+ * memory request to external memory, the memory request belonging to the
+ * older instruction must be globally performed before the time the memory
+ * request belonging to the younger instruction is globally performed.
+ *
+ * - The barrier does not guarantee the order in which instruction fetches are
+ * performed.
+ */
+
+/*
+ * stype 0x10 - An ordering barrier that affects preceding loads and stores and
+ * subsequent loads and stores.
+ * Older instructions which must reach the load/store ordering point before the
+ * SYNC instruction completes: Loads, Stores
+ * Younger instructions which must reach the load/store ordering point only
+ * after the SYNC instruction completes: Loads, Stores
+ * Older instructions which must be globally performed when the SYNC instruction
+ * completes: N/A
+ */
+#define STYPE_SYNC_MB 0x10
+
+
#ifdef CONFIG_CPU_HAS_SYNC
#define __sync() \
__asm__ __volatile__( \
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index 34ed22ec6c33..4812d1fed0c2 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -28,6 +28,7 @@
* - flush_cache_sigtramp() flush signal trampoline
* - flush_icache_all() flush the entire instruction cache
* - flush_data_cache_page() flushes a page from the data cache
+ * - __flush_icache_user_range(start, end) flushes range of user instructions
*/
/*
@@ -80,6 +81,10 @@ static inline void flush_icache_page(struct vm_area_struct *vma,
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
+extern void (*__flush_icache_user_range)(unsigned long start,
+ unsigned long end);
+extern void (*__local_flush_icache_user_range)(unsigned long start,
+ unsigned long end);
extern void (*__flush_cache_vmap)(void);
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index fbe1881f28fc..bdd6dc18e65c 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -24,7 +24,8 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_LOONGSON3:
#endif
-#ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B
+#if defined(CONFIG_SYS_HAS_CPU_LOONGSON1B) || \
+ defined(CONFIG_SYS_HAS_CPU_LOONGSON1C)
case CPU_LOONGSON1:
#endif
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index f672df8b26d0..9a8372484edc 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -240,6 +240,7 @@
#define PRID_REV_VR4130 0x0080
#define PRID_REV_34K_V1_0_2 0x0022
#define PRID_REV_LOONGSON1B 0x0020
+#define PRID_REV_LOONGSON1C 0x0020 /* Same as Loongson-1B */
#define PRID_REV_LOONGSON2E 0x0002
#define PRID_REV_LOONGSON2F 0x0003
#define PRID_REV_LOONGSON3A_R1 0x0005
diff --git a/arch/mips/include/asm/device.h b/arch/mips/include/asm/device.h
index c94fafba9e62..21c2082a0dfb 100644
--- a/arch/mips/include/asm/device.h
+++ b/arch/mips/include/asm/device.h
@@ -11,6 +11,11 @@ struct dma_map_ops;
struct dev_archdata {
/* DMA operations on that device */
struct dma_map_ops *dma_ops;
+
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+ /* Non-zero if DMA is coherent with CPU caches */
+ bool dma_coherent;
+#endif
};
struct pdev_archdata {
diff --git a/arch/mips/include/asm/dma-coherence.h b/arch/mips/include/asm/dma-coherence.h
index bc5e85d579e6..72d0eab02afc 100644
--- a/arch/mips/include/asm/dma-coherence.h
+++ b/arch/mips/include/asm/dma-coherence.h
@@ -9,14 +9,22 @@
#ifndef __ASM_DMA_COHERENCE_H
#define __ASM_DMA_COHERENCE_H
-#ifdef CONFIG_DMA_MAYBE_COHERENT
-extern int coherentio;
+enum coherent_io_user_state {
+ IO_COHERENCE_DEFAULT,
+ IO_COHERENCE_ENABLED,
+ IO_COHERENCE_DISABLED,
+};
+
+#if defined(CONFIG_DMA_PERDEV_COHERENT)
+/* Don't provide (hw_)coherentio to avoid misuse */
+#elif defined(CONFIG_DMA_MAYBE_COHERENT)
+extern enum coherent_io_user_state coherentio;
extern int hw_coherentio;
#else
#ifdef CONFIG_DMA_COHERENT
-#define coherentio 1
+#define coherentio IO_COHERENCE_ENABLED
#else
-#define coherentio 0
+#define coherentio IO_COHERENCE_DISABLED
#endif
#define hw_coherentio 0
#endif /* CONFIG_DMA_MAYBE_COHERENT */
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 12fa79e2f1b4..7aa71b9b0258 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -32,4 +32,14 @@ static inline void dma_mark_clean(void *addr, size_t size) {}
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
+#define arch_setup_dma_ops arch_setup_dma_ops
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
+ u64 size, const struct iommu_ops *iommu,
+ bool coherent)
+{
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+ dev->archdata.dma_coherent = coherent;
+#endif
+}
+
#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/mips/include/asm/i8259.h b/arch/mips/include/asm/i8259.h
index a7fbcd6ed13c..32229c77906a 100644
--- a/arch/mips/include/asm/i8259.h
+++ b/arch/mips/include/asm/i8259.h
@@ -37,12 +37,22 @@
extern raw_spinlock_t i8259A_lock;
-extern int i8259A_irq_pending(unsigned int irq);
extern void make_8259A_irq(unsigned int irq);
extern void init_i8259_irqs(void);
extern int i8259_of_init(struct device_node *node, struct device_node *parent);
+/**
+ * i8159_set_poll() - Override the i8259 polling function
+ * @poll: pointer to platform-specific polling function
+ *
+ * Call this to override the generic i8259 polling function, which directly
+ * accesses i8259 registers, with a platform specific one which may be faster
+ * in cases where hardware provides a more optimal means of polling for an
+ * interrupt.
+ */
+extern void i8259_set_poll(int (*poll)(void));
+
/*
* Do the traditional i8259 interrupt polling thing. This is for the few
* cases where no better interrupt acknowledge method is available and we
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 0f8a354fd468..61addb1677e9 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -49,7 +49,19 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
static inline int plat_device_is_coherent(struct device *dev)
{
- return coherentio;
+#ifdef CONFIG_DMA_PERDEV_COHERENT
+ return dev->archdata.dma_coherent;
+#else
+ switch (coherentio) {
+ default:
+ case IO_COHERENCE_DEFAULT:
+ return hw_coherentio;
+ case IO_COHERENCE_ENABLED:
+ return 1;
+ case IO_COHERENCE_DISABLED:
+ return 0;
+ }
+#endif
}
#ifndef plat_post_dma_flush
diff --git a/arch/mips/include/asm/mach-generic/floppy.h b/arch/mips/include/asm/mach-generic/floppy.h
index e2561d99a3fe..9ec2f6a5200b 100644
--- a/arch/mips/include/asm/mach-generic/floppy.h
+++ b/arch/mips/include/asm/mach-generic/floppy.h
@@ -115,11 +115,7 @@ static inline unsigned long fd_getfdaddr1(void)
static inline unsigned long fd_dma_mem_alloc(unsigned long size)
{
- unsigned long mem;
-
- mem = __get_dma_pages(GFP_KERNEL, get_order(size));
-
- return mem;
+ return __get_dma_pages(GFP_KERNEL, get_order(size));
}
static inline void fd_dma_mem_free(unsigned long addr, unsigned long size)
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index afc96ecb9004..952b0fdfda0e 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -12,6 +12,8 @@
#include <linux/const.h>
+#include <asm/mipsregs.h>
+
/*
* This gives the physical RAM offset.
*/
@@ -52,11 +54,7 @@
#ifdef CONFIG_64BIT
#ifndef CAC_BASE
-#ifdef CONFIG_DMA_NONCOHERENT
-#define CAC_BASE _AC(0x9800000000000000, UL)
-#else
-#define CAC_BASE _AC(0xa800000000000000, UL)
-#endif
+#define CAC_BASE PHYS_TO_XKPHYS(read_c0_config() & CONF_CM_CMASK, 0)
#endif
#ifndef IO_BASE
diff --git a/arch/mips/include/asm/mach-ip27/spaces.h b/arch/mips/include/asm/mach-ip27/spaces.h
index b18802a0b17e..4775a1136a5b 100644
--- a/arch/mips/include/asm/mach-ip27/spaces.h
+++ b/arch/mips/include/asm/mach-ip27/spaces.h
@@ -19,6 +19,7 @@
#define IO_BASE 0x9200000000000000
#define MSPEC_BASE 0x9400000000000000
#define UNCAC_BASE 0x9600000000000000
+#define CAC_BASE 0xa800000000000000
#define TO_MSPEC(x) (MSPEC_BASE | ((x) & TO_PHYS_MASK))
#define TO_HSPEC(x) (HSPEC_BASE | ((x) & TO_PHYS_MASK))
diff --git a/arch/mips/include/asm/mach-loongson32/irq.h b/arch/mips/include/asm/mach-loongson32/irq.h
index c1c744197de4..8c01b304b7ec 100644
--- a/arch/mips/include/asm/mach-loongson32/irq.h
+++ b/arch/mips/include/asm/mach-loongson32/irq.h
@@ -36,9 +36,14 @@
#define LS1X_IRQ(n, x) (LS1X_IRQ_BASE + (n << 5) + (x))
#define LS1X_UART0_IRQ LS1X_IRQ(0, 2)
+#if defined(CONFIG_LOONGSON1_LS1B)
#define LS1X_UART1_IRQ LS1X_IRQ(0, 3)
#define LS1X_UART2_IRQ LS1X_IRQ(0, 4)
#define LS1X_UART3_IRQ LS1X_IRQ(0, 5)
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define LS1X_UART1_IRQ LS1X_IRQ(0, 4)
+#define LS1X_UART2_IRQ LS1X_IRQ(0, 5)
+#endif
#define LS1X_CAN0_IRQ LS1X_IRQ(0, 6)
#define LS1X_CAN1_IRQ LS1X_IRQ(0, 7)
#define LS1X_SPI0_IRQ LS1X_IRQ(0, 8)
@@ -47,6 +52,9 @@
#define LS1X_DMA0_IRQ LS1X_IRQ(0, 13)
#define LS1X_DMA1_IRQ LS1X_IRQ(0, 14)
#define LS1X_DMA2_IRQ LS1X_IRQ(0, 15)
+#if defined(CONFIG_LOONGSON1_LS1C)
+#define LS1X_NAND_IRQ LS1X_IRQ(0, 16)
+#endif
#define LS1X_PWM0_IRQ LS1X_IRQ(0, 17)
#define LS1X_PWM1_IRQ LS1X_IRQ(0, 18)
#define LS1X_PWM2_IRQ LS1X_IRQ(0, 19)
@@ -54,18 +62,49 @@
#define LS1X_RTC_INT0_IRQ LS1X_IRQ(0, 21)
#define LS1X_RTC_INT1_IRQ LS1X_IRQ(0, 22)
#define LS1X_RTC_INT2_IRQ LS1X_IRQ(0, 23)
+#if defined(CONFIG_LOONGSON1_LS1B)
#define LS1X_TOY_INT0_IRQ LS1X_IRQ(0, 24)
#define LS1X_TOY_INT1_IRQ LS1X_IRQ(0, 25)
#define LS1X_TOY_INT2_IRQ LS1X_IRQ(0, 26)
#define LS1X_RTC_TICK_IRQ LS1X_IRQ(0, 27)
#define LS1X_TOY_TICK_IRQ LS1X_IRQ(0, 28)
+#define LS1X_UART4_IRQ LS1X_IRQ(0, 29)
+#define LS1X_UART5_IRQ LS1X_IRQ(0, 30)
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define LS1X_UART3_IRQ LS1X_IRQ(0, 29)
+#define LS1X_ADC_IRQ LS1X_IRQ(0, 30)
+#define LS1X_SDIO_IRQ LS1X_IRQ(0, 31)
+#endif
#define LS1X_EHCI_IRQ LS1X_IRQ(1, 0)
#define LS1X_OHCI_IRQ LS1X_IRQ(1, 1)
+#if defined(CONFIG_LOONGSON1_LS1B)
#define LS1X_GMAC0_IRQ LS1X_IRQ(1, 2)
#define LS1X_GMAC1_IRQ LS1X_IRQ(1, 3)
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define LS1X_OTG_IRQ LS1X_IRQ(1, 2)
+#define LS1X_GMAC0_IRQ LS1X_IRQ(1, 3)
+#define LS1X_CAM_IRQ LS1X_IRQ(1, 4)
+#define LS1X_UART4_IRQ LS1X_IRQ(1, 5)
+#define LS1X_UART5_IRQ LS1X_IRQ(1, 6)
+#define LS1X_UART6_IRQ LS1X_IRQ(1, 7)
+#define LS1X_UART7_IRQ LS1X_IRQ(1, 8)
+#define LS1X_UART8_IRQ LS1X_IRQ(1, 9)
+#define LS1X_UART9_IRQ LS1X_IRQ(1, 13)
+#define LS1X_UART10_IRQ LS1X_IRQ(1, 14)
+#define LS1X_UART11_IRQ LS1X_IRQ(1, 15)
+#define LS1X_I2C0_IRQ LS1X_IRQ(1, 17)
+#define LS1X_I2C1_IRQ LS1X_IRQ(1, 18)
+#define LS1X_I2C2_IRQ LS1X_IRQ(1, 19)
+#endif
-#define LS1X_IRQS (LS1X_IRQ(4, 31) + 1 - LS1X_IRQ_BASE)
+#if defined(CONFIG_LOONGSON1_LS1B)
+#define INTN 4
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define INTN 5
+#endif
+
+#define LS1X_IRQS (LS1X_IRQ(INTN, 31) + 1 - LS1X_IRQ_BASE)
#define NR_IRQS (MIPS_CPU_IRQS + LS1X_IRQS)
diff --git a/arch/mips/include/asm/mach-loongson32/loongson1.h b/arch/mips/include/asm/mach-loongson32/loongson1.h
index 978f6df8970a..3584c40caf79 100644
--- a/arch/mips/include/asm/mach-loongson32/loongson1.h
+++ b/arch/mips/include/asm/mach-loongson32/loongson1.h
@@ -12,7 +12,11 @@
#ifndef __ASM_MACH_LOONGSON32_LOONGSON1_H
#define __ASM_MACH_LOONGSON32_LOONGSON1_H
+#if defined(CONFIG_LOONGSON1_LS1B)
#define DEFAULT_MEMSIZE 256 /* If no memsize provided */
+#elif defined(CONFIG_LOONGSON1_LS1C)
+#define DEFAULT_MEMSIZE 32
+#endif
/* Loongson 1 Register Bases */
#define LS1X_MUX_BASE 0x1fd00420
@@ -20,6 +24,7 @@
#define LS1X_GPIO0_BASE 0x1fd010c0
#define LS1X_GPIO1_BASE 0x1fd010c4
#define LS1X_DMAC_BASE 0x1fd01160
+#define LS1X_CBUS_BASE 0x1fd011c0
#define LS1X_EHCI_BASE 0x1fe00000
#define LS1X_OHCI_BASE 0x1fe08000
#define LS1X_GMAC0_BASE 0x1fe10000
diff --git a/arch/mips/include/asm/mach-loongson32/platform.h b/arch/mips/include/asm/mach-loongson32/platform.h
index 672531aa9bef..7adc31364939 100644
--- a/arch/mips/include/asm/mach-loongson32/platform.h
+++ b/arch/mips/include/asm/mach-loongson32/platform.h
@@ -30,5 +30,6 @@ void __init ls1x_clk_init(void);
void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata);
void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata);
void __init ls1x_serial_set_uartclk(struct platform_device *pdev);
+void __init ls1x_rtc_set_extclk(struct platform_device *pdev);
#endif /* __ASM_MACH_LOONGSON32_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-clk.h b/arch/mips/include/asm/mach-loongson32/regs-clk.h
index 4d56fc38f0c4..e5e8f118f34b 100644
--- a/arch/mips/include/asm/mach-loongson32/regs-clk.h
+++ b/arch/mips/include/asm/mach-loongson32/regs-clk.h
@@ -18,6 +18,7 @@
#define LS1X_CLK_PLL_FREQ LS1X_CLK_REG(0x0)
#define LS1X_CLK_PLL_DIV LS1X_CLK_REG(0x4)
+#if defined(CONFIG_LOONGSON1_LS1B)
/* Clock PLL Divisor Register Bits */
#define DIV_DC_EN BIT(31)
#define DIV_DC_RST BIT(30)
@@ -48,4 +49,37 @@
#define BYPASS_DDR_WIDTH 1
#define BYPASS_CPU_WIDTH 1
+#elif defined(CONFIG_LOONGSON1_LS1C)
+/* PLL/SDRAM Frequency configuration register Bits */
+#define PLL_VALID BIT(31)
+#define FRAC_N GENMASK(23, 16)
+#define RST_TIME GENMASK(3, 2)
+#define SDRAM_DIV GENMASK(1, 0)
+
+/* CPU/CAMERA/DC Frequency configuration register Bits */
+#define DIV_DC_EN BIT(31)
+#define DIV_DC GENMASK(30, 24)
+#define DIV_CAM_EN BIT(23)
+#define DIV_CAM GENMASK(22, 16)
+#define DIV_CPU_EN BIT(15)
+#define DIV_CPU GENMASK(14, 8)
+#define DIV_DC_SEL_EN BIT(5)
+#define DIV_DC_SEL BIT(4)
+#define DIV_CAM_SEL_EN BIT(3)
+#define DIV_CAM_SEL BIT(2)
+#define DIV_CPU_SEL_EN BIT(1)
+#define DIV_CPU_SEL BIT(0)
+
+#define DIV_DC_SHIFT 24
+#define DIV_CAM_SHIFT 16
+#define DIV_CPU_SHIFT 8
+#define DIV_DDR_SHIFT 0
+
+#define DIV_DC_WIDTH 7
+#define DIV_CAM_WIDTH 7
+#define DIV_CPU_WIDTH 7
+#define DIV_DDR_WIDTH 2
+
+#endif
+
#endif /* __ASM_MACH_LOONGSON32_REGS_CLK_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-mux.h b/arch/mips/include/asm/mach-loongson32/regs-mux.h
index 7c394f93cb9e..4a0bdeb0eb9b 100644
--- a/arch/mips/include/asm/mach-loongson32/regs-mux.h
+++ b/arch/mips/include/asm/mach-loongson32/regs-mux.h
@@ -18,6 +18,7 @@
#define LS1X_MUX_CTRL0 LS1X_MUX_REG(0x0)
#define LS1X_MUX_CTRL1 LS1X_MUX_REG(0x4)
+#if defined(CONFIG_LOONGSON1_LS1B)
/* MUX CTRL0 Register Bits */
#define UART0_USE_PWM23 BIT(28)
#define UART0_USE_PWM01 BIT(27)
@@ -64,4 +65,64 @@
#define GMAC1_USE_PWM23 BIT(1)
#define GMAC0_USE_PWM01 BIT(0)
+#elif defined(CONFIG_LOONGSON1_LS1C)
+
+/* SHUT_CTRL Register Bits */
+#define UART_SPLIT GENMASK(31, 30)
+#define OUTPUT_CLK GENMASK(29, 26)
+#define ADC_SHUT BIT(25)
+#define SDIO_SHUT BIT(24)
+#define DMA2_SHUT BIT(23)
+#define DMA1_SHUT BIT(22)
+#define DMA0_SHUT BIT(21)
+#define SPI1_SHUT BIT(20)
+#define SPI0_SHUT BIT(19)
+#define I2C2_SHUT BIT(18)
+#define I2C1_SHUT BIT(17)
+#define I2C0_SHUT BIT(16)
+#define AC97_SHUT BIT(15)
+#define I2S_SHUT BIT(14)
+#define UART3_SHUT BIT(13)
+#define UART2_SHUT BIT(12)
+#define UART1_SHUT BIT(11)
+#define UART0_SHUT BIT(10)
+#define CAN1_SHUT BIT(9)
+#define CAN0_SHUT BIT(8)
+#define ECC_SHUT BIT(7)
+#define GMAC_SHUT BIT(6)
+#define USBHOST_SHUT BIT(5)
+#define USBOTG_SHUT BIT(4)
+#define SDRAM_SHUT BIT(3)
+#define SRAM_SHUT BIT(2)
+#define CAM_SHUT BIT(1)
+#define LCD_SHUT BIT(0)
+
+#define UART_SPLIT_SHIFT 30
+#define OUTPUT_CLK_SHIFT 26
+
+/* MISC_CTRL Register Bits */
+#define USBHOST_RSTN BIT(31)
+#define PHY_INTF_SELI GENMASK(30, 28)
+#define AC97_EN BIT(25)
+#define SDIO_DMA_EN GENMASK(24, 23)
+#define ADC_DMA_EN BIT(22)
+#define SDIO_USE_SPI1 BIT(17)
+#define SDIO_USE_SPI0 BIT(16)
+#define SRAM_CTRL GENMASK(15, 0)
+
+#define PHY_INTF_SELI_SHIFT 28
+#define SDIO_DMA_EN_SHIFT 23
+#define SRAM_CTRL_SHIFT 0
+
+#define LS1X_CBUS_REG(n, x) \
+ ((void __iomem *)KSEG1ADDR(LS1X_CBUS_BASE + (n * 0x04) + (x)))
+
+#define LS1X_CBUS_FIRST(n) LS1X_CBUS_REG(n, 0x00)
+#define LS1X_CBUS_SECOND(n) LS1X_CBUS_REG(n, 0x10)
+#define LS1X_CBUS_THIRD(n) LS1X_CBUS_REG(n, 0x20)
+#define LS1X_CBUS_FOURTHT(n) LS1X_CBUS_REG(n, 0x30)
+#define LS1X_CBUS_FIFTHT(n) LS1X_CBUS_REG(n, 0x40)
+
+#endif
+
#endif /* __ASM_MACH_LOONGSON32_REGS_MUX_H */
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
deleted file mode 100644
index bfbd7035d4c5..000000000000
--- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003, 2004 Chris Dearman
- * Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
- */
-#ifndef __ASM_MACH_MIPS_CPU_FEATURE_OVERRIDES_H
-#define __ASM_MACH_MIPS_CPU_FEATURE_OVERRIDES_H
-
-
-/*
- * CPU feature overrides for MIPS boards
- */
-#ifdef CONFIG_CPU_MIPS32
-#define cpu_has_tlb 1
-#define cpu_has_4kex 1
-#define cpu_has_4k_cache 1
-/* #define cpu_has_fpu ? */
-/* #define cpu_has_32fpr ? */
-#define cpu_has_counter 1
-/* #define cpu_has_watch ? */
-#define cpu_has_divec 1
-#define cpu_has_vce 0
-/* #define cpu_has_cache_cdex_p ? */
-/* #define cpu_has_cache_cdex_s ? */
-/* #define cpu_has_prefetch ? */
-#define cpu_has_mcheck 1
-/* #define cpu_has_ejtag ? */
-#ifdef CONFIG_CPU_MICROMIPS
-#define cpu_has_llsc 0
-#else
-#define cpu_has_llsc 1
-#endif
-/* #define cpu_has_vtag_icache ? */
-/* #define cpu_has_dc_aliases ? */
-/* #define cpu_has_ic_fills_f_dc ? */
-#define cpu_has_nofpuex 0
-/* #define cpu_has_64bits ? */
-/* #define cpu_has_64bit_zero_reg ? */
-/* #define cpu_has_inclusive_pcaches ? */
-#define cpu_icache_snoops_remote_store 1
-#endif
-
-#ifdef CONFIG_CPU_MIPS64
-#define cpu_has_tlb 1
-#define cpu_has_4kex 1
-#define cpu_has_4k_cache 1
-/* #define cpu_has_fpu ? */
-/* #define cpu_has_32fpr ? */
-#define cpu_has_counter 1
-/* #define cpu_has_watch ? */
-#define cpu_has_divec 1
-#define cpu_has_vce 0
-/* #define cpu_has_cache_cdex_p ? */
-/* #define cpu_has_cache_cdex_s ? */
-/* #define cpu_has_prefetch ? */
-#define cpu_has_mcheck 1
-/* #define cpu_has_ejtag ? */
-#define cpu_has_llsc 1
-/* #define cpu_has_vtag_icache ? */
-/* #define cpu_has_dc_aliases ? */
-/* #define cpu_has_ic_fills_f_dc ? */
-#define cpu_has_nofpuex 0
-/* #define cpu_has_64bits ? */
-/* #define cpu_has_64bit_zero_reg ? */
-/* #define cpu_has_inclusive_pcaches ? */
-#define cpu_icache_snoops_remote_store 1
-#endif
-
-#endif /* __ASM_MACH_MIPS_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-sead3/irq.h b/arch/mips/include/asm/mach-sead3/irq.h
deleted file mode 100644
index 5d154cfbcf4c..000000000000
--- a/arch/mips/include/asm/mach-sead3/irq.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_MACH_MIPS_IRQ_H
-#define __ASM_MACH_MIPS_IRQ_H
-
-#define NR_IRQS 256
-
-
-#include_next <irq.h>
-
-#endif /* __ASM_MACH_MIPS_IRQ_H */
diff --git a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h b/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
deleted file mode 100644
index 6cccd4d558d7..000000000000
--- a/arch/mips/include/asm/mach-sead3/kernel-entry-init.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Chris Dearman (chris@mips.com)
- * Copyright (C) 2007 Mips Technologies, Inc.
- */
-#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
-#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
-
- .macro kernel_entry_setup
- .endm
-
-/*
- * Do SMP slave processor setup necessary before we can safely execute C code.
- */
- .macro smp_slave_setup
- .endm
-
-#endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */
diff --git a/arch/mips/include/asm/mach-sead3/war.h b/arch/mips/include/asm/mach-sead3/war.h
deleted file mode 100644
index d068fc411f47..000000000000
--- a/arch/mips/include/asm/mach-sead3/war.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- */
-#ifndef __ASM_MIPS_MACH_MIPS_WAR_H
-#define __ASM_MIPS_MACH_MIPS_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 1
-#define MIPS_CACHE_SYNC_WAR 1
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 1
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
-
-#endif /* __ASM_MIPS_MACH_MIPS_WAR_H */
diff --git a/arch/mips/include/asm/machine.h b/arch/mips/include/asm/machine.h
new file mode 100644
index 000000000000..6b444cd9526f
--- /dev/null
+++ b/arch/mips/include/asm/machine.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_MACHINE_H__
+#define __MIPS_ASM_MACHINE_H__
+
+#include <linux/libfdt.h>
+#include <linux/of.h>
+
+struct mips_machine {
+ const struct of_device_id *matches;
+ const void *fdt;
+ bool (*detect)(void);
+ const void *(*fixup_fdt)(const void *fdt, const void *match_data);
+ unsigned int (*measure_hpt_freq)(void);
+};
+
+extern long __mips_machines_start;
+extern long __mips_machines_end;
+
+#define MIPS_MACHINE(name) \
+ static const struct mips_machine __mips_mach_##name \
+ __used __section(.mips.machines.init)
+
+#define for_each_mips_machine(mach) \
+ for ((mach) = (struct mips_machine *)&__mips_machines_start; \
+ (mach) < (struct mips_machine *)&__mips_machines_end; \
+ (mach)++)
+
+/**
+ * mips_machine_is_compatible() - check if a machine is compatible with an FDT
+ * @mach: the machine struct to check
+ * @fdt: the FDT to check for compatibility with
+ *
+ * Check whether the given machine @mach is compatible with the given flattened
+ * device tree @fdt, based upon the compatibility property of the root node.
+ *
+ * Return: the device id matched if any, else NULL
+ */
+static inline const struct of_device_id *
+mips_machine_is_compatible(const struct mips_machine *mach, const void *fdt)
+{
+ const struct of_device_id *match;
+
+ if (!mach->matches)
+ return NULL;
+
+ for (match = mach->matches; match->compatible; match++) {
+ if (fdt_node_check_compatible(fdt, 0, match->compatible) == 0)
+ return match;
+ }
+
+ return NULL;
+}
+
+#endif /* __MIPS_ASM_MACHINE_H__ */
diff --git a/arch/mips/include/asm/mips-boards/sead3int.h b/arch/mips/include/asm/mips-boards/sead3int.h
deleted file mode 100644
index 8932c7de0419..000000000000
--- a/arch/mips/include/asm/mips-boards/sead3int.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2000,2012 MIPS Technologies, Inc. All rights reserved.
- * Douglas Leung <douglas@mips.com>
- * Steven J. Hill <sjhill@mips.com>
- */
-#ifndef _MIPS_SEAD3INT_H
-#define _MIPS_SEAD3INT_H
-
-#include <linux/irqchip/mips-gic.h>
-
-/* SEAD-3 GIC address space definitions. */
-#define GIC_BASE_ADDR 0x1b1c0000
-#define GIC_ADDRSPACE_SZ (128 * 1024)
-
-/* CPU interrupt offsets */
-#define CPU_INT_GIC 2
-#define CPU_INT_EHCI 2
-#define CPU_INT_UART0 4
-#define CPU_INT_UART1 4
-#define CPU_INT_NET 6
-
-/* GIC interrupt offsets */
-#define GIC_INT_NET GIC_SHARED_TO_HWIRQ(0)
-#define GIC_INT_UART1 GIC_SHARED_TO_HWIRQ(2)
-#define GIC_INT_UART0 GIC_SHARED_TO_HWIRQ(3)
-#define GIC_INT_EHCI GIC_SHARED_TO_HWIRQ(5)
-
-#endif /* !(_MIPS_SEAD3INT_H) */
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index 4fafeefe65c2..2e4180797b21 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -359,6 +359,7 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
/* GCR_Cx_COHERENCE register fields */
#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0
#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0)
+#define CM3_GCR_Cx_COHERENCE_COHEN_MSK (_ULCAST_(0x1) << 0)
/* GCR_Cx_CONFIG register fields */
#define CM_GCR_Cx_CONFIG_IOCUTYPE_SHF 10
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index cda93aee712c..b4d19c21b62c 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -58,16 +58,6 @@ typedef enum {
#define CVMX_HELPER_BOARD_MGMT_IPD_PORT -10
/**
- * cvmx_override_board_link_get(int ipd_port) is a function
- * pointer. It is meant to allow customization of the process of
- * talking to a PHY to determine link speed. It is called every
- * time a PHY must be polled for link status. Users should set
- * this pointer to a function before calling any cvmx-helper
- * operations.
- */
-extern cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port);
-
-/**
* Return the MII PHY address associated with the given IPD
* port. A result of -1 means there isn't a MII capable PHY
* connected to this port. On chips supporting multiple MII
@@ -86,26 +76,6 @@ extern cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port);
extern int cvmx_helper_board_get_mii_address(int ipd_port);
/**
- * This function as a board specific method of changing the PHY
- * speed, duplex, and autonegotiation. This programs the PHY and
- * not Octeon. This can be used to force Octeon's links to
- * specific settings.
- *
- * @phy_addr: The address of the PHY to program
- * @link_flags:
- * Flags to control autonegotiation. Bit 0 is autonegotiation
- * enable/disable to maintain backward compatibility.
- * @link_info: Link speed to program. If the speed is zero and autonegotiation
- * is enabled, all possible negotiation speeds are advertised.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_board_link_set_phy(int phy_addr,
- cvmx_helper_board_set_phy_link_flags_types_t
- link_flags,
- cvmx_helper_link_info_t link_info);
-
-/**
* This function is the board specific method of determining an
* ethernet ports link speed. Most Octeon boards have Marvell PHYs
* and are handled by the fall through case. This function must be
diff --git a/arch/mips/include/asm/octeon/cvmx-mdio.h b/arch/mips/include/asm/octeon/cvmx-mdio.h
deleted file mode 100644
index 9f6a4f32a83c..000000000000
--- a/arch/mips/include/asm/octeon/cvmx-mdio.h
+++ /dev/null
@@ -1,506 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
- * clause 22 and clause 45 operations.
- *
- */
-
-#ifndef __CVMX_MIO_H__
-#define __CVMX_MIO_H__
-
-#include <asm/octeon/cvmx-smix-defs.h>
-
-/**
- * PHY register 0 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_CONTROL 0
-typedef union {
- uint16_t u16;
- struct {
- uint16_t reset:1;
- uint16_t loopback:1;
- uint16_t speed_lsb:1;
- uint16_t autoneg_enable:1;
- uint16_t power_down:1;
- uint16_t isolate:1;
- uint16_t restart_autoneg:1;
- uint16_t duplex:1;
- uint16_t collision_test:1;
- uint16_t speed_msb:1;
- uint16_t unidirectional_enable:1;
- uint16_t reserved_0_4:5;
- } s;
-} cvmx_mdio_phy_reg_control_t;
-
-/**
- * PHY register 1 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_STATUS 1
-typedef union {
- uint16_t u16;
- struct {
- uint16_t capable_100base_t4:1;
- uint16_t capable_100base_x_full:1;
- uint16_t capable_100base_x_half:1;
- uint16_t capable_10_full:1;
- uint16_t capable_10_half:1;
- uint16_t capable_100base_t2_full:1;
- uint16_t capable_100base_t2_half:1;
- uint16_t capable_extended_status:1;
- uint16_t capable_unidirectional:1;
- uint16_t capable_mf_preamble_suppression:1;
- uint16_t autoneg_complete:1;
- uint16_t remote_fault:1;
- uint16_t capable_autoneg:1;
- uint16_t link_status:1;
- uint16_t jabber_detect:1;
- uint16_t capable_extended_registers:1;
-
- } s;
-} cvmx_mdio_phy_reg_status_t;
-
-/**
- * PHY register 2 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_ID1 2
-typedef union {
- uint16_t u16;
- struct {
- uint16_t oui_bits_3_18;
- } s;
-} cvmx_mdio_phy_reg_id1_t;
-
-/**
- * PHY register 3 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_ID2 3
-typedef union {
- uint16_t u16;
- struct {
- uint16_t oui_bits_19_24:6;
- uint16_t model:6;
- uint16_t revision:4;
- } s;
-} cvmx_mdio_phy_reg_id2_t;
-
-/**
- * PHY register 4 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4
-typedef union {
- uint16_t u16;
- struct {
- uint16_t next_page:1;
- uint16_t reserved_14:1;
- uint16_t remote_fault:1;
- uint16_t reserved_12:1;
- uint16_t asymmetric_pause:1;
- uint16_t pause:1;
- uint16_t advert_100base_t4:1;
- uint16_t advert_100base_tx_full:1;
- uint16_t advert_100base_tx_half:1;
- uint16_t advert_10base_tx_full:1;
- uint16_t advert_10base_tx_half:1;
- uint16_t selector:5;
- } s;
-} cvmx_mdio_phy_reg_autoneg_adver_t;
-
-/**
- * PHY register 5 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
-typedef union {
- uint16_t u16;
- struct {
- uint16_t next_page:1;
- uint16_t ack:1;
- uint16_t remote_fault:1;
- uint16_t reserved_12:1;
- uint16_t asymmetric_pause:1;
- uint16_t pause:1;
- uint16_t advert_100base_t4:1;
- uint16_t advert_100base_tx_full:1;
- uint16_t advert_100base_tx_half:1;
- uint16_t advert_10base_tx_full:1;
- uint16_t advert_10base_tx_half:1;
- uint16_t selector:5;
- } s;
-} cvmx_mdio_phy_reg_link_partner_ability_t;
-
-/**
- * PHY register 6 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6
-typedef union {
- uint16_t u16;
- struct {
- uint16_t reserved_5_15:11;
- uint16_t parallel_detection_fault:1;
- uint16_t link_partner_next_page_capable:1;
- uint16_t local_next_page_capable:1;
- uint16_t page_received:1;
- uint16_t link_partner_autoneg_capable:1;
-
- } s;
-} cvmx_mdio_phy_reg_autoneg_expansion_t;
-
-/**
- * PHY register 9 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_CONTROL_1000 9
-typedef union {
- uint16_t u16;
- struct {
- uint16_t test_mode:3;
- uint16_t manual_master_slave:1;
- uint16_t master:1;
- uint16_t port_type:1;
- uint16_t advert_1000base_t_full:1;
- uint16_t advert_1000base_t_half:1;
- uint16_t reserved_0_7:8;
- } s;
-} cvmx_mdio_phy_reg_control_1000_t;
-
-/**
- * PHY register 10 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_STATUS_1000 10
-typedef union {
- uint16_t u16;
- struct {
- uint16_t master_slave_fault:1;
- uint16_t is_master:1;
- uint16_t local_receiver_ok:1;
- uint16_t remote_receiver_ok:1;
- uint16_t remote_capable_1000base_t_full:1;
- uint16_t remote_capable_1000base_t_half:1;
- uint16_t reserved_8_9:2;
- uint16_t idle_error_count:8;
- } s;
-} cvmx_mdio_phy_reg_status_1000_t;
-
-/**
- * PHY register 15 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15
-typedef union {
- uint16_t u16;
- struct {
- uint16_t capable_1000base_x_full:1;
- uint16_t capable_1000base_x_half:1;
- uint16_t capable_1000base_t_full:1;
- uint16_t capable_1000base_t_half:1;
- uint16_t reserved_0_11:12;
- } s;
-} cvmx_mdio_phy_reg_extended_status_t;
-
-/**
- * PHY register 13 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13
-typedef union {
- uint16_t u16;
- struct {
- uint16_t function:2;
- uint16_t reserved_5_13:9;
- uint16_t devad:5;
- } s;
-} cvmx_mdio_phy_reg_mmd_control_t;
-
-/**
- * PHY register 14 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
-typedef union {
- uint16_t u16;
- struct {
- uint16_t address_data:16;
- } s;
-} cvmx_mdio_phy_reg_mmd_address_data_t;
-
-/* Operating request encodings. */
-#define MDIO_CLAUSE_22_WRITE 0
-#define MDIO_CLAUSE_22_READ 1
-
-#define MDIO_CLAUSE_45_ADDRESS 0
-#define MDIO_CLAUSE_45_WRITE 1
-#define MDIO_CLAUSE_45_READ_INC 2
-#define MDIO_CLAUSE_45_READ 3
-
-/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
-#define CVMX_MMD_DEVICE_PMA_PMD 1
-#define CVMX_MMD_DEVICE_WIS 2
-#define CVMX_MMD_DEVICE_PCS 3
-#define CVMX_MMD_DEVICE_PHY_XS 4
-#define CVMX_MMD_DEVICE_DTS_XS 5
-#define CVMX_MMD_DEVICE_TC 6
-#define CVMX_MMD_DEVICE_CL22_EXT 29
-#define CVMX_MMD_DEVICE_VENDOR_1 30
-#define CVMX_MMD_DEVICE_VENDOR_2 31
-
-/* Helper function to put MDIO interface into clause 45 mode */
-static inline void __cvmx_mdio_set_clause45_mode(int bus_id)
-{
- union cvmx_smix_clk smi_clk;
- /* Put bus into clause 45 mode */
- smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
- smi_clk.s.mode = 1;
- smi_clk.s.preamble = 1;
- cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
-}
-
-/* Helper function to put MDIO interface into clause 22 mode */
-static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
-{
- union cvmx_smix_clk smi_clk;
- /* Put bus into clause 22 mode */
- smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
- smi_clk.s.mode = 0;
- cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
-}
-
-/**
- * Perform an MII read. This function is used to read PHY
- * registers controlling auto negotiation.
- *
- * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
- * @phy_id: The MII phy id
- * @location: Register location to read
- *
- * Returns Result from the read or -1 on failure
- */
-static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_rd_dat smi_rd;
- int timeout = 1000;
-
- if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
- __cvmx_mdio_set_clause22_mode(bus_id);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = location;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
- } while (smi_rd.s.pending && timeout--);
-
- if (smi_rd.s.val)
- return smi_rd.s.dat;
- else
- return -1;
-}
-
-/**
- * Perform an MII write. This function is used to write PHY
- * registers controlling auto negotiation.
- *
- * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
- * @phy_id: The MII phy id
- * @location: Register location to write
- * @val: Value to write
- *
- * Returns -1 on error
- * 0 on success
- */
-static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_wr_dat smi_wr;
- int timeout = 1000;
-
- if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
- __cvmx_mdio_set_clause22_mode(bus_id);
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = val;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = location;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
- } while (smi_wr.s.pending && --timeout);
- if (timeout <= 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Perform an IEEE 802.3 clause 45 MII read. This function is used to
- * read PHY registers controlling auto negotiation.
- *
- * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
- * @phy_id: The MII phy id
- * @device: MDIO Managable Device (MMD) id
- * @location: Register location to read
- *
- * Returns Result from the read or -1 on failure
- */
-
-static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
- int location)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_rd_dat smi_rd;
- union cvmx_smix_wr_dat smi_wr;
- int timeout = 1000;
-
- if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
- return -1;
-
- __cvmx_mdio_set_clause45_mode(bus_id);
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = location;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = device;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
- } while (smi_wr.s.pending && --timeout);
- if (timeout <= 0) {
- cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d TIME OUT(address)\n",
- bus_id, phy_id, device, location);
- return -1;
- }
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = device;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
- } while (smi_rd.s.pending && --timeout);
-
- if (timeout <= 0) {
- cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d TIME OUT(data)\n",
- bus_id, phy_id, device, location);
- return -1;
- }
-
- if (smi_rd.s.val)
- return smi_rd.s.dat;
- else {
- cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d INVALID READ\n",
- bus_id, phy_id, device, location);
- return -1;
- }
-}
-
-/**
- * Perform an IEEE 802.3 clause 45 MII write. This function is used to
- * write PHY registers controlling auto negotiation.
- *
- * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
- * @phy_id: The MII phy id
- * @device: MDIO Managable Device (MMD) id
- * @location: Register location to write
- * @val: Value to write
- *
- * Returns -1 on error
- * 0 on success
- */
-static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
- int location, int val)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_wr_dat smi_wr;
- int timeout = 1000;
-
- if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
- return -1;
-
- __cvmx_mdio_set_clause45_mode(bus_id);
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = location;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = device;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
- } while (smi_wr.s.pending && --timeout);
- if (timeout <= 0)
- return -1;
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = val;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = device;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
- } while (smi_wr.s.pending && --timeout);
- if (timeout <= 0)
- return -1;
-
- return 0;
-}
-
-#endif
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 9b63cd41213d..30d1129d8624 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -17,15 +17,18 @@
*/
#include <linux/ioport.h>
+#include <linux/list.h>
#include <linux/of.h>
+#ifdef CONFIG_PCI_DRIVERS_LEGACY
+
/*
* Each pci channel is a top-level PCI bus seem by CPU. A machine with
* multiple PCI channels may have multiple PCI host controllers or a
* single controller supporting multiple channels.
*/
struct pci_controller {
- struct pci_controller *next;
+ struct list_head list;
struct pci_bus *bus;
struct device_node *of_node;
@@ -38,10 +41,12 @@ struct pci_controller {
struct resource *busn_resource;
unsigned long busn_offset;
+#ifndef CONFIG_PCI_DOMAINS_GENERIC
unsigned int index;
/* For compatibility with current (as of July 2003) pciutils
and XFree86. Eventually will be removed. */
unsigned int need_domain_info;
+#endif
/* Optional access methods for reading/writing the bus number
of the PCI controller */
@@ -59,12 +64,43 @@ extern void register_pci_controller(struct pci_controller *hose);
*/
extern int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+/* Do platform specific device initialization at pci_enable_device() time */
+extern int pcibios_plat_dev_init(struct pci_dev *dev);
+
+extern char * (*pcibios_plat_setup)(char *str);
+
+#ifdef CONFIG_OF
+/* this function parses memory ranges from a device node */
+extern void pci_load_of_ranges(struct pci_controller *hose,
+ struct device_node *node);
+#else
+static inline void pci_load_of_ranges(struct pci_controller *hose,
+ struct device_node *node) {}
+#endif
+
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+static inline void set_pci_need_domain_info(struct pci_controller *hose,
+ int need_domain_info)
+{
+ /* nothing to do */
+}
+#elif defined(CONFIG_PCI_DOMAINS)
+static inline void set_pci_need_domain_info(struct pci_controller *hose,
+ int need_domain_info)
+{
+ hose->need_domain_info = need_domain_info;
+}
+#endif /* CONFIG_PCI_DOMAINS */
+
+#endif
/* Can be used to override the logic in pci_scan_bus for skipping
already-configured bus numbers - to be used for buggy BIOSes
or architectures with incomplete PCI setup by the loader */
-
-extern unsigned int pcibios_assign_all_busses(void);
+static inline unsigned int pcibios_assign_all_busses(void)
+{
+ return 1;
+}
extern unsigned long PCIBIOS_MIN_IO;
extern unsigned long PCIBIOS_MIN_MEM;
@@ -100,7 +136,12 @@ struct pci_dev;
*/
#define PCI_DMA_BUS_IS_PHYS (1)
-#ifdef CONFIG_PCI_DOMAINS
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ return pci_domain_nr(bus);
+}
+#elif defined(CONFIG_PCI_DOMAINS)
#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
static inline int pci_proc_domain(struct pci_bus *bus)
@@ -121,15 +162,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return channel ? 15 : 14;
}
-extern char * (*pcibios_plat_setup)(char *str);
-
-#ifdef CONFIG_OF
-/* this function parses memory ranges from a device node */
-extern void pci_load_of_ranges(struct pci_controller *hose,
- struct device_node *node);
-#else
-static inline void pci_load_of_ranges(struct pci_controller *hose,
- struct device_node *node) {}
-#endif
-
#endif /* _ASM_PCI_H */
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 93c079a1cfc8..a03e86969f78 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -67,11 +67,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- pte_t *pte;
-
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
-
- return pte;
+ return (pte_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PTE_ORDER);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
diff --git a/arch/mips/include/asm/pm-cps.h b/arch/mips/include/asm/pm-cps.h
index 625eda53d571..89d58d80b77b 100644
--- a/arch/mips/include/asm/pm-cps.h
+++ b/arch/mips/include/asm/pm-cps.h
@@ -13,10 +13,12 @@
/*
* The CM & CPC can only handle coherence & power control on a per-core basis,
- * thus in an MT system the VPEs within each core are coupled and can only
+ * thus in an MT system the VP(E)s within each core are coupled and can only
* enter or exit states requiring CM or CPC assistance in unison.
*/
-#ifdef CONFIG_MIPS_MT
+#if defined(CONFIG_CPU_MIPSR6)
+# define coupled_coherence cpu_has_vp
+#elif defined(CONFIG_MIPS_MT)
# define coupled_coherence cpu_has_mipsmt
#else
# define coupled_coherence 0
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index f6fc6aac5496..b6578611dddb 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -152,7 +152,7 @@ static inline int is_syscall_success(struct pt_regs *regs)
static inline long regs_return_value(struct pt_regs *regs)
{
- if (is_syscall_success(regs))
+ if (is_syscall_success(regs) || !user_mode(regs))
return regs->regs[2];
else
return -regs->regs[2];
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 8bc6c70a4030..060f23ff1817 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -85,6 +85,20 @@ static inline void __cpu_die(unsigned int cpu)
extern void play_dead(void);
#endif
+/*
+ * This function will set up the necessary IPIs for Linux to communicate
+ * with the CPUs in mask.
+ * Return 0 on success.
+ */
+int mips_smp_ipi_allocate(const struct cpumask *mask);
+
+/*
+ * This function will free up IPIs allocated with mips_smp_ipi_allocate to the
+ * CPUs in mask, which must be a subset of the IPIs that have been configured.
+ * Return 0 on success.
+ */
+int mips_smp_ipi_free(const struct cpumask *mask);
+
static inline void arch_send_call_function_single_ipi(int cpu)
{
extern struct plat_smp_ops *mp_ops; /* private */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 4daf839cd8a8..89fa5c0b1579 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -859,7 +859,10 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
+ \
+ check_object_size(__cu_from, __cu_len, true); \
might_fault(); \
+ \
if (eva_kernel_access()) \
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
__cu_len); \
@@ -880,6 +883,9 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
+ \
+ check_object_size(__cu_from, __cu_len, true); \
+ \
if (eva_kernel_access()) \
__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
__cu_len); \
@@ -898,6 +904,9 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
+ \
+ check_object_size(__cu_to, __cu_len, false); \
+ \
if (eva_kernel_access()) \
__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
__cu_from,\
@@ -932,6 +941,9 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
+ \
+ check_object_size(__cu_from, __cu_len, true); \
+ \
if (eva_kernel_access()) { \
__cu_len = __invoke_copy_to_kernel(__cu_to, \
__cu_from, \
@@ -1124,6 +1136,9 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
+ \
+ check_object_size(__cu_to, __cu_len, false); \
+ \
if (eva_kernel_access()) { \
__cu_len = __invoke_copy_from_kernel(__cu_to, \
__cu_from, \
@@ -1162,6 +1177,9 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
+ \
+ check_object_size(__cu_to, __cu_len, false); \
+ \
if (eva_kernel_access()) { \
__cu_len = __invoke_copy_from_kernel(__cu_to, \
__cu_from, \
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 24ad815c7f38..3e940dbe0262 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -383,16 +383,20 @@
#define __NR_copy_file_range (__NR_Linux + 360)
#define __NR_preadv2 (__NR_Linux + 361)
#define __NR_pwritev2 (__NR_Linux + 362)
+#define __NR_pkey_mprotect (__NR_Linux + 363)
+#define __NR_pkey_alloc (__NR_Linux + 364)
+#define __NR_pkey_free (__NR_Linux + 365)
+
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 362
+#define __NR_Linux_syscalls 365
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 362
+#define __NR_O32_Linux_syscalls 365
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -723,16 +727,19 @@
#define __NR_copy_file_range (__NR_Linux + 320)
#define __NR_preadv2 (__NR_Linux + 321)
#define __NR_pwritev2 (__NR_Linux + 322)
+#define __NR_pkey_mprotect (__NR_Linux + 323)
+#define __NR_pkey_alloc (__NR_Linux + 324)
+#define __NR_pkey_free (__NR_Linux + 325)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 322
+#define __NR_Linux_syscalls 325
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 322
+#define __NR_64_Linux_syscalls 325
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1067,15 +1074,18 @@
#define __NR_copy_file_range (__NR_Linux + 324)
#define __NR_preadv2 (__NR_Linux + 325)
#define __NR_pwritev2 (__NR_Linux + 326)
+#define __NR_pkey_mprotect (__NR_Linux + 327)
+#define __NR_pkey_alloc (__NR_Linux + 328)
+#define __NR_pkey_free (__NR_Linux + 329)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 326
+#define __NR_Linux_syscalls 329
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 326
+#define __NR_N32_Linux_syscalls 329
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 58ad63d7eb42..9c7f3e136d50 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -1,5 +1,6 @@
/*
* Support for n32 Linux/MIPS ELF binaries.
+ * Author: Ralf Baechle (ralf@linux-mips.org)
*
* Copyright (C) 1999, 2001 Ralf Baechle
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
@@ -37,7 +38,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
-#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
#include <linux/math64.h>
@@ -96,12 +96,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
#define ELF_CORE_EFLAGS EF_MIPS_ABI2
-MODULE_DESCRIPTION("Binary format loader for compatibility with n32 Linux/MIPS binaries");
-MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
-
-#undef MODULE_DESCRIPTION
-#undef MODULE_AUTHOR
-
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 49fb881481f7..1ab34322dd97 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -1,5 +1,6 @@
/*
* Support for o32 Linux/MIPS ELF binaries.
+ * Author: Ralf Baechle (ralf@linux-mips.org)
*
* Copyright (C) 1999, 2001 Ralf Baechle
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
@@ -42,7 +43,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#include <asm/processor.h>
-#include <linux/module.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
#include <linux/math64.h>
@@ -99,12 +99,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
value->tv_usec = rem / NSEC_PER_USEC;
}
-MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries");
-MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
-
-#undef MODULE_DESCRIPTION
-#undef MODULE_AUTHOR
-
#undef TASK_SIZE
#define TASK_SIZE TASK_SIZE32
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 46c227fc98f5..12c718181e5e 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/signal.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/branch.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
@@ -866,3 +866,37 @@ unaligned:
force_sig(SIGBUS, current);
return -EFAULT;
}
+
+#if (defined CONFIG_KPROBES) || (defined CONFIG_UPROBES)
+
+int __insn_is_compact_branch(union mips_instruction insn)
+{
+ if (!cpu_has_mips_r6)
+ return 0;
+
+ switch (insn.i_format.opcode) {
+ case blezl_op:
+ case bgtzl_op:
+ case blez_op:
+ case bgtz_op:
+ /*
+ * blez[l] and bgtz[l] opcodes with non-zero rt
+ * are MIPS R6 compact branches
+ */
+ if (insn.i_format.rt)
+ return 1;
+ break;
+ case bc6_op:
+ case balc6_op:
+ case pop10_op:
+ case pop30_op:
+ case pop66_op:
+ case pop76_op:
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__insn_is_compact_branch);
+
+#endif /* CONFIG_KPROBES || CONFIG_UPROBES */
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 212f46f2014e..f5c8bce70db2 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -32,7 +32,8 @@
#include <asm/ptrace.h>
#include <asm/branch.h>
#include <asm/break.h>
-#include <asm/inst.h>
+
+#include "probes-common.h"
static const union mips_instruction breakpoint_insn = {
.b_format = {
@@ -55,63 +56,7 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static int __kprobes insn_has_delayslot(union mips_instruction insn)
{
- switch (insn.i_format.opcode) {
-
- /*
- * This group contains:
- * jr and jalr are in r_format format.
- */
- case spec_op:
- switch (insn.r_format.func) {
- case jr_op:
- case jalr_op:
- break;
- default:
- goto insn_ok;
- }
-
- /*
- * This group contains:
- * bltz_op, bgez_op, bltzl_op, bgezl_op,
- * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
- */
- case bcond_op:
-
- /*
- * These are unconditional and in j_format.
- */
- case jal_op:
- case j_op:
-
- /*
- * These are conditional and in i_format.
- */
- case beq_op:
- case beql_op:
- case bne_op:
- case bnel_op:
- case blez_op:
- case blezl_op:
- case bgtz_op:
- case bgtzl_op:
-
- /*
- * These are the FPA/cp1 branch instructions.
- */
- case cop1_op:
-
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
- case lwc2_op: /* This is bbit0 on Octeon */
- case ldc2_op: /* This is bbit032 on Octeon */
- case swc2_op: /* This is bbit1 on Octeon */
- case sdc2_op: /* This is bbit132 on Octeon */
-#endif
- return 1;
- default:
- break;
- }
-insn_ok:
- return 0;
+ return __insn_has_delay_slot(insn);
}
/*
@@ -161,6 +106,12 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
goto out;
}
+ if (__insn_is_compact_branch(insn)) {
+ pr_notice("Kprobes for compact branches are not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
/* insn: must be on special executable page on mips. */
p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn) {
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 0b29646bcee7..50fb62544df7 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -26,7 +26,6 @@
#include <linux/utsname.h>
#include <linux/personality.h>
#include <linux/dnotify.h>
-#include <linux/module.h>
#include <linux/binfmts.h>
#include <linux/security.h>
#include <linux/compat.h>
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 566b8d2c092c..2a45867d3b4f 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -52,7 +52,7 @@ static phys_addr_t mips_cpc_phys_base(void)
int mips_cpc_probe(void)
{
phys_addr_t addr;
- unsigned cpu;
+ unsigned int cpu;
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(cpc_core_lock, cpu));
@@ -70,7 +70,12 @@ int mips_cpc_probe(void)
void mips_cpc_lock_other(unsigned int core)
{
- unsigned curr_core;
+ unsigned int curr_core;
+
+ if (mips_cm_revision() >= CM_REV_CM3)
+ /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
+ return;
+
preempt_disable();
curr_core = current_cpu_data.core;
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
@@ -86,7 +91,13 @@ void mips_cpc_lock_other(unsigned int core)
void mips_cpc_unlock_other(void)
{
- unsigned curr_core = current_cpu_data.core;
+ unsigned int curr_core;
+
+ if (mips_cm_revision() >= CM_REV_CM3)
+ /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
+ return;
+
+ curr_core = current_cpu_data.core;
spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
per_cpu(cpc_core_lock_flags, curr_core));
preempt_enable();
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 0a7e10b5f9e3..22dedd62818a 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -15,7 +15,6 @@
#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 79850e376ef6..94627a3a6a0d 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -20,6 +20,7 @@
#undef DEBUG
+#include <linux/extable.h>
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/mm.h>
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 5b31a9405ebc..7cf653e21423 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -8,6 +8,7 @@
* option) any later version.
*/
+#include <linux/cpuhotplug.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/slab.h>
@@ -70,13 +71,8 @@ static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
/* A somewhat arbitrary number of labels & relocs for uasm */
-static struct uasm_label labels[32] __initdata;
-static struct uasm_reloc relocs[32] __initdata;
-
-/* CPU dependant sync types */
-static unsigned stype_intervention;
-static unsigned stype_memory;
-static unsigned stype_ordering;
+static struct uasm_label labels[32];
+static struct uasm_reloc relocs[32];
enum mips_reg {
zero, at, v0, v1, a0, a1, a2, a3,
@@ -134,7 +130,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
return -EINVAL;
/* Calculate which coupled CPUs (VPEs) are online */
-#ifdef CONFIG_MIPS_MT
+#if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6)
if (cpu_online(cpu)) {
cpumask_and(coupled_mask, cpu_online_mask,
&cpu_sibling_map[cpu]);
@@ -198,10 +194,10 @@ int cps_pm_enter_state(enum cps_pm_state state)
return 0;
}
-static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
- struct uasm_reloc **pr,
- const struct cache_desc *cache,
- unsigned op, int lbl)
+static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ const struct cache_desc *cache,
+ unsigned op, int lbl)
{
unsigned cache_size = cache->ways << cache->waybit;
unsigned i;
@@ -242,10 +238,10 @@ static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
uasm_i_nop(pp);
}
-static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
- struct uasm_reloc **pr,
- const struct cpuinfo_mips *cpu_info,
- int lbl)
+static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ const struct cpuinfo_mips *cpu_info,
+ int lbl)
{
unsigned i, fsb_size = 8;
unsigned num_loads = (fsb_size * 3) / 2;
@@ -272,14 +268,9 @@ static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
/* On older ones it's unavailable */
return -1;
- /* CPUs which do not require the workaround */
- case CPU_P5600:
- case CPU_I6400:
- return 0;
-
default:
- WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n");
- return -1;
+ /* Assume that the CPU does not need this workaround */
+ return 0;
}
/*
@@ -320,8 +311,8 @@ static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
i * line_size * line_stride, t0);
}
- /* Completion barrier */
- uasm_i_sync(pp, stype_memory);
+ /* Barrier ensuring previous cache invalidates are complete */
+ uasm_i_sync(pp, STYPE_SYNC);
uasm_i_ehb(pp);
/* Check whether the pipeline stalled due to the FSB being full */
@@ -340,9 +331,9 @@ static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
return 0;
}
-static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
- struct uasm_reloc **pr,
- unsigned r_addr, int lbl)
+static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ unsigned r_addr, int lbl)
{
uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
uasm_build_label(pl, *pp, lbl);
@@ -353,7 +344,7 @@ static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
uasm_i_nop(pp);
}
-static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
+static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
{
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
@@ -411,7 +402,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
if (coupled_coherence) {
/* Increment ready_count */
- uasm_i_sync(&p, stype_ordering);
+ uasm_i_sync(&p, STYPE_SYNC_MB);
uasm_build_label(&l, p, lbl_incready);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, 1);
@@ -419,8 +410,8 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_il_beqz(&p, &r, t2, lbl_incready);
uasm_i_addiu(&p, t1, t1, 1);
- /* Ordering barrier */
- uasm_i_sync(&p, stype_ordering);
+ /* Barrier ensuring all CPUs see the updated r_nc_count value */
+ uasm_i_sync(&p, STYPE_SYNC_MB);
/*
* If this is the last VPE to become ready for non-coherence
@@ -441,7 +432,8 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
uasm_i_lw(&p, t0, 0, r_nc_count);
uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
uasm_i_ehb(&p);
- uasm_i_yield(&p, zero, t1);
+ if (cpu_has_mipsmt)
+ uasm_i_yield(&p, zero, t1);
uasm_il_b(&p, &r, lbl_poll_cont);
uasm_i_nop(&p);
} else {
@@ -449,8 +441,21 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
* The core will lose power & this VPE will not continue
* so it can simply halt here.
*/
- uasm_i_addiu(&p, t0, zero, TCHALT_H);
- uasm_i_mtc0(&p, t0, 2, 4);
+ if (cpu_has_mipsmt) {
+ /* Halt the VPE via C0 tchalt register */
+ uasm_i_addiu(&p, t0, zero, TCHALT_H);
+ uasm_i_mtc0(&p, t0, 2, 4);
+ } else if (cpu_has_vp) {
+ /* Halt the VP via the CPC VP_STOP register */
+ unsigned int vpe_id;
+
+ vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ uasm_i_addiu(&p, t0, zero, 1 << vpe_id);
+ UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop());
+ uasm_i_sw(&p, t0, 0, t1);
+ } else {
+ BUG();
+ }
uasm_build_label(&l, p, lbl_secondary_hang);
uasm_il_b(&p, &r, lbl_secondary_hang);
uasm_i_nop(&p);
@@ -472,22 +477,24 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
Index_Writeback_Inv_D, lbl_flushdcache);
- /* Completion barrier */
- uasm_i_sync(&p, stype_memory);
+ /* Barrier ensuring previous cache invalidates are complete */
+ uasm_i_sync(&p, STYPE_SYNC);
uasm_i_ehb(&p);
- /*
- * Disable all but self interventions. The load from COHCTL is defined
- * by the interAptiv & proAptiv SUMs as ensuring that the operation
- * resulting from the preceding store is complete.
- */
- uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
- uasm_i_sw(&p, t0, 0, r_pcohctl);
- uasm_i_lw(&p, t0, 0, r_pcohctl);
-
- /* Sync to ensure previous interventions are complete */
- uasm_i_sync(&p, stype_intervention);
- uasm_i_ehb(&p);
+ if (mips_cm_revision() < CM_REV_CM3) {
+ /*
+ * Disable all but self interventions. The load from COHCTL is
+ * defined by the interAptiv & proAptiv SUMs as ensuring that the
+ * operation resulting from the preceding store is complete.
+ */
+ uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
+ uasm_i_sw(&p, t0, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ /* Barrier to ensure write to coherence control is complete */
+ uasm_i_sync(&p, STYPE_SYNC);
+ uasm_i_ehb(&p);
+ }
/* Disable coherence */
uasm_i_sw(&p, zero, 0, r_pcohctl);
@@ -531,8 +538,8 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
goto gen_done;
}
- /* Completion barrier */
- uasm_i_sync(&p, stype_memory);
+ /* Barrier to ensure write to CPC command is complete */
+ uasm_i_sync(&p, STYPE_SYNC);
uasm_i_ehb(&p);
}
@@ -562,26 +569,29 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
* will run this. The first will actually re-enable coherence & the
* rest will just be performing a rather unusual nop.
*/
- uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK);
+ uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3
+ ? CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK
+ : CM3_GCR_Cx_COHERENCE_COHEN_MSK);
+
uasm_i_sw(&p, t0, 0, r_pcohctl);
uasm_i_lw(&p, t0, 0, r_pcohctl);
- /* Completion barrier */
- uasm_i_sync(&p, stype_memory);
+ /* Barrier to ensure write to coherence control is complete */
+ uasm_i_sync(&p, STYPE_SYNC);
uasm_i_ehb(&p);
if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
/* Decrement ready_count */
uasm_build_label(&l, p, lbl_decready);
- uasm_i_sync(&p, stype_ordering);
+ uasm_i_sync(&p, STYPE_SYNC_MB);
uasm_i_ll(&p, t1, 0, r_nc_count);
uasm_i_addiu(&p, t2, t1, -1);
uasm_i_sc(&p, t2, 0, r_nc_count);
uasm_il_beqz(&p, &r, t2, lbl_decready);
uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
- /* Ordering barrier */
- uasm_i_sync(&p, stype_ordering);
+ /* Barrier ensuring all CPUs see the updated r_nc_count value */
+ uasm_i_sync(&p, STYPE_SYNC_MB);
}
if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
@@ -602,8 +612,8 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
*/
uasm_build_label(&l, p, lbl_secondary_cont);
- /* Ordering barrier */
- uasm_i_sync(&p, stype_ordering);
+ /* Barrier ensuring all CPUs see the updated r_nc_count value */
+ uasm_i_sync(&p, STYPE_SYNC_MB);
}
/* The core is coherent, time to return to C code */
@@ -628,7 +638,7 @@ out_err:
return NULL;
}
-static int __init cps_gen_core_entries(unsigned cpu)
+static int cps_pm_online_cpu(unsigned int cpu)
{
enum cps_pm_state state;
unsigned core = cpu_data[cpu].core;
@@ -670,29 +680,10 @@ static int __init cps_gen_core_entries(unsigned cpu)
static int __init cps_pm_init(void)
{
- unsigned cpu;
- int err;
-
- /* Detect appropriate sync types for the system */
- switch (current_cpu_data.cputype) {
- case CPU_INTERAPTIV:
- case CPU_PROAPTIV:
- case CPU_M5150:
- case CPU_P5600:
- case CPU_I6400:
- stype_intervention = 0x2;
- stype_memory = 0x3;
- stype_ordering = 0x10;
- break;
-
- default:
- pr_warn("Power management is using heavyweight sync 0\n");
- }
-
/* A CM is required for all non-coherent states */
if (!mips_cm_present()) {
pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
- goto out;
+ return 0;
}
/*
@@ -722,12 +713,7 @@ static int __init cps_pm_init(void)
pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
}
- for_each_present_cpu(cpu) {
- err = cps_gen_core_entries(cpu);
- if (err)
- return err;
- }
-out:
- return 0;
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PM_CPS_CPU_ONLINE",
+ cps_pm_online_cpu, NULL);
}
arch_initcall(cps_pm_init);
diff --git a/arch/mips/kernel/probes-common.h b/arch/mips/kernel/probes-common.h
new file mode 100644
index 000000000000..dd08e41134b6
--- /dev/null
+++ b/arch/mips/kernel/probes-common.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Marcin Nowakowski <marcin.nowakowski@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __PROBES_COMMON_H
+#define __PROBES_COMMON_H
+
+#include <asm/inst.h>
+
+int __insn_is_compact_branch(union mips_instruction insn);
+
+static inline int __insn_has_delay_slot(const union mips_instruction insn)
+{
+ switch (insn.i_format.opcode) {
+ /*
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jalr_op:
+ case jr_op:
+ return 1;
+ }
+ break;
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+ switch (insn.i_format.rt) {
+ case bltz_op:
+ case bltzl_op:
+ case bgez_op:
+ case bgezl_op:
+ case bltzal_op:
+ case bltzall_op:
+ case bgezal_op:
+ case bgezall_op:
+ case bposge32_op:
+ return 1;
+ }
+ break;
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jal_op:
+ case j_op:
+ case beq_op:
+ case beql_op:
+ case bne_op:
+ case bnel_op:
+ case blez_op: /* not really i_format */
+ case blezl_op:
+ case bgtz_op:
+ case bgtzl_op:
+ return 1;
+
+ /*
+ * And now the FPA/cp1 branch instructions.
+ */
+ case cop1_op:
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ case ldc2_op: /* This is bbit032 on Octeon */
+ case swc2_op: /* This is bbit1 on Octeon */
+ case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+#endif /* __PROBES_COMMON_H */
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 97dc01b03631..4eff2aed7360 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -135,6 +135,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
+ if (cpu_has_mipsmt)
+ seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
+ else if (cpu_has_vp)
+ seq_printf(m, "VP\t\t\t: %d\n", cpu_data[n].vpe_id);
+#endif
+
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
cpu_has_vce ? "%u" : "not available");
seq_printf(m, fmt, 'D', vced_count);
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 283b5a1967d1..7e71a4e0281b 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -70,7 +70,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
copied = access_process_vm(child, (u64)addrOthers, &tmp,
- sizeof(tmp), 0);
+ sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
break;
ret = put_user(tmp, (u32 __user *) (unsigned long) data);
@@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
ret = 0;
if (access_process_vm(child, (u64)addrOthers, &data,
- sizeof(data), 1) == sizeof(data))
+ sizeof(data),
+ FOLL_FORCE | FOLL_WRITE) == sizeof(data))
break;
ret = -EIO;
break;
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index c8e43e0c4066..c29d397eee86 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -597,3 +597,6 @@ EXPORT(sys_call_table)
PTR sys_copy_file_range /* 4360 */
PTR sys_preadv2
PTR sys_pwritev2
+ PTR sys_pkey_mprotect
+ PTR sys_pkey_alloc
+ PTR sys_pkey_free /* 4365 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index e6ede125059f..0687f96ee912 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -435,4 +435,7 @@ EXPORT(sys_call_table)
PTR sys_copy_file_range /* 5320 */
PTR sys_preadv2
PTR sys_pwritev2
+ PTR sys_pkey_mprotect
+ PTR sys_pkey_alloc
+ PTR sys_pkey_free /* 5325 */
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 51d3988933f8..0331ba39a065 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -430,4 +430,7 @@ EXPORT(sysn32_call_table)
PTR sys_copy_file_range
PTR compat_sys_preadv2 /* 6325 */
PTR compat_sys_pwritev2
+ PTR sys_pkey_mprotect
+ PTR sys_pkey_alloc
+ PTR sys_pkey_free
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 6efa7136748f..5a47042dd25f 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -585,4 +585,7 @@ EXPORT(sys32_call_table)
PTR sys_copy_file_range /* 4360 */
PTR compat_sys_preadv2
PTR compat_sys_pwritev2
+ PTR sys_pkey_mprotect
+ PTR sys_pkey_alloc
+ PTR sys_pkey_free /* 4365 */
.size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c
deleted file mode 100644
index 9b63829cf929..000000000000
--- a/arch/mips/kernel/smp-gic.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (C) 2013 Imagination Technologies
- * Author: Paul Burton <paul.burton@imgtec.com>
- *
- * Based on smp-cmp.c:
- * Copyright (C) 2007 MIPS Technologies, Inc.
- * Author: Chris Dearman (chris@mips.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/irqchip/mips-gic.h>
-#include <linux/printk.h>
-
-#include <asm/mips-cpc.h>
-#include <asm/smp-ops.h>
-
-void gic_send_ipi_single(int cpu, unsigned int action)
-{
- unsigned long flags;
- unsigned int intr;
- unsigned int core = cpu_data[cpu].core;
-
- pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
- smp_processor_id(), __func__, cpu, action, read_c0_status());
-
- local_irq_save(flags);
-
- switch (action) {
- case SMP_CALL_FUNCTION:
- intr = plat_ipi_call_int_xlate(cpu);
- break;
-
- case SMP_RESCHEDULE_YOURSELF:
- intr = plat_ipi_resched_int_xlate(cpu);
- break;
-
- default:
- BUG();
- }
-
- gic_send_ipi(intr);
-
- if (mips_cpc_present() && (core != current_cpu_data.core)) {
- while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
- mips_cm_lock_other(core, 0);
- mips_cpc_lock_other(core);
- write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
- mips_cpc_unlock_other();
- mips_cm_unlock_other();
- }
- }
-
- local_irq_restore(flags);
-}
-
-void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action)
-{
- unsigned int i;
-
- for_each_cpu(i, mask)
- gic_send_ipi_single(i, action);
-}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 4f9570a57e8d..e077ea3e11fb 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -289,26 +289,3 @@ struct plat_smp_ops vsmp_smp_ops = {
.prepare_cpus = vsmp_prepare_cpus,
};
-#ifdef CONFIG_PROC_FS
-static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
- unsigned long action_unused, void *data)
-{
- struct proc_cpuinfo_notifier_args *pcn = data;
- struct seq_file *m = pcn->m;
- unsigned long n = pcn->n;
-
- if (!cpu_has_mipsmt)
- return NOTIFY_OK;
-
- seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
-
- return NOTIFY_OK;
-}
-
-static int __init proc_cpuinfo_notifier_init(void)
-{
- return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
-}
-
-subsys_initcall(proc_cpuinfo_notifier_init);
-#endif
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index b0baf48951fa..7ebb1918e2ac 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -25,7 +25,7 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/sched.h>
@@ -192,9 +192,11 @@ void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
continue;
while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
+ mips_cm_lock_other(core, 0);
mips_cpc_lock_other(core);
write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
mips_cpc_unlock_other();
+ mips_cm_unlock_other();
}
}
}
@@ -229,7 +231,7 @@ static struct irqaction irq_call = {
.name = "IPI call"
};
-static __init void smp_ipi_init_one(unsigned int virq,
+static void smp_ipi_init_one(unsigned int virq,
struct irqaction *action)
{
int ret;
@@ -239,9 +241,11 @@ static __init void smp_ipi_init_one(unsigned int virq,
BUG_ON(ret);
}
-static int __init mips_smp_ipi_init(void)
+static unsigned int call_virq, sched_virq;
+
+int mips_smp_ipi_allocate(const struct cpumask *mask)
{
- unsigned int call_virq, sched_virq;
+ int virq;
struct irq_domain *ipidomain;
struct device_node *node;
@@ -268,16 +272,20 @@ static int __init mips_smp_ipi_init(void)
if (!ipidomain)
return 0;
- call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
- BUG_ON(!call_virq);
+ virq = irq_reserve_ipi(ipidomain, mask);
+ BUG_ON(!virq);
+ if (!call_virq)
+ call_virq = virq;
- sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
- BUG_ON(!sched_virq);
+ virq = irq_reserve_ipi(ipidomain, mask);
+ BUG_ON(!virq);
+ if (!sched_virq)
+ sched_virq = virq;
if (irq_domain_is_ipi_per_cpu(ipidomain)) {
int cpu;
- for_each_cpu(cpu, cpu_possible_mask) {
+ for_each_cpu(cpu, mask) {
smp_ipi_init_one(call_virq + cpu, &irq_call);
smp_ipi_init_one(sched_virq + cpu, &irq_resched);
}
@@ -286,6 +294,45 @@ static int __init mips_smp_ipi_init(void)
smp_ipi_init_one(sched_virq, &irq_resched);
}
+ return 0;
+}
+
+int mips_smp_ipi_free(const struct cpumask *mask)
+{
+ struct irq_domain *ipidomain;
+ struct device_node *node;
+
+ node = of_irq_find_parent(of_root);
+ ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
+
+ /*
+ * Some platforms have half DT setup. So if we found irq node but
+ * didn't find an ipidomain, try to search for one that is not in the
+ * DT.
+ */
+ if (node && !ipidomain)
+ ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
+
+ BUG_ON(!ipidomain);
+
+ if (irq_domain_is_ipi_per_cpu(ipidomain)) {
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ remove_irq(call_virq + cpu, &irq_call);
+ remove_irq(sched_virq + cpu, &irq_resched);
+ }
+ }
+ irq_destroy_ipi(call_virq, mask);
+ irq_destroy_ipi(sched_virq, mask);
+ return 0;
+}
+
+
+static int __init mips_smp_ipi_init(void)
+{
+ mips_smp_ipi_allocate(cpu_possible_mask);
+
call_desc = irq_to_desc(call_virq);
sched_desc = irq_to_desc(sched_virq);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 3de85be2486a..1f5fdee1dfc3 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/smp.h>
@@ -48,6 +49,7 @@
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/idle.h>
+#include <asm/mips-cm.h>
#include <asm/mips-r2-to-r6-emul.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
@@ -444,6 +446,8 @@ asmlinkage void do_be(struct pt_regs *regs)
if (board_be_handler)
action = board_be_handler(regs, fixup != NULL);
+ else
+ mips_cm_error_report();
switch (action) {
case MIPS_BE_DISCARD:
@@ -2091,6 +2095,14 @@ static void configure_exception_vector(void)
{
if (cpu_has_veic || cpu_has_vint) {
unsigned long sr = set_c0_status(ST0_BEV);
+ /* If available, use WG to set top bits of EBASE */
+ if (cpu_has_ebase_wg) {
+#ifdef CONFIG_64BIT
+ write_c0_ebase_64(ebase | MIPS_EBASE_WG);
+#else
+ write_c0_ebase(ebase | MIPS_EBASE_WG);
+#endif
+ }
write_c0_ebase(ebase);
write_c0_status(sr);
/* Setting vector spacing enables EI/VI mode */
@@ -2127,8 +2139,17 @@ void per_cpu_trap_init(bool is_boot_cpu)
* We shouldn't trust a secondary core has a sane EBASE register
* so use the one calculated by the boot CPU.
*/
- if (!is_boot_cpu)
+ if (!is_boot_cpu) {
+ /* If available, use WG to set top bits of EBASE */
+ if (cpu_has_ebase_wg) {
+#ifdef CONFIG_64BIT
+ write_c0_ebase_64(ebase | MIPS_EBASE_WG);
+#else
+ write_c0_ebase(ebase | MIPS_EBASE_WG);
+#endif
+ }
write_c0_ebase(ebase);
+ }
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
@@ -2209,13 +2230,39 @@ void __init trap_init(void)
if (cpu_has_veic || cpu_has_vint) {
unsigned long size = 0x200 + VECTORSPACING*64;
+ phys_addr_t ebase_pa;
+
ebase = (unsigned long)
__alloc_bootmem(size, 1 << fls(size), 0);
+
+ /*
+ * Try to ensure ebase resides in KSeg0 if possible.
+ *
+ * It shouldn't generally be in XKPhys on MIPS64 to avoid
+ * hitting a poorly defined exception base for Cache Errors.
+ * The allocation is likely to be in the low 512MB of physical,
+ * in which case we should be able to convert to KSeg0.
+ *
+ * EVA is special though as it allows segments to be rearranged
+ * and to become uncached during cache error handling.
+ */
+ ebase_pa = __pa(ebase);
+ if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
+ ebase = CKSEG0ADDR(ebase_pa);
} else {
ebase = CAC_BASE;
- if (cpu_has_mips_r2_r6)
- ebase += (read_c0_ebase() & 0x3ffff000);
+ if (cpu_has_mips_r2_r6) {
+ if (cpu_has_ebase_wg) {
+#ifdef CONFIG_64BIT
+ ebase = (read_c0_ebase_64() & ~0xfff);
+#else
+ ebase = (read_c0_ebase() & ~0xfff);
+#endif
+ } else {
+ ebase += (read_c0_ebase() & 0x3ffff000);
+ }
+ }
}
if (cpu_has_mmips) {
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
index 4c7c1558944a..dbb917403131 100644
--- a/arch/mips/kernel/uprobes.c
+++ b/arch/mips/kernel/uprobes.c
@@ -8,71 +8,12 @@
#include <asm/branch.h>
#include <asm/cpu-features.h>
#include <asm/ptrace.h>
-#include <asm/inst.h>
+
+#include "probes-common.h"
static inline int insn_has_delay_slot(const union mips_instruction insn)
{
- switch (insn.i_format.opcode) {
- /*
- * jr and jalr are in r_format format.
- */
- case spec_op:
- switch (insn.r_format.func) {
- case jalr_op:
- case jr_op:
- return 1;
- }
- break;
-
- /*
- * This group contains:
- * bltz_op, bgez_op, bltzl_op, bgezl_op,
- * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
- */
- case bcond_op:
- switch (insn.i_format.rt) {
- case bltz_op:
- case bltzl_op:
- case bgez_op:
- case bgezl_op:
- case bltzal_op:
- case bltzall_op:
- case bgezal_op:
- case bgezall_op:
- case bposge32_op:
- return 1;
- }
- break;
-
- /*
- * These are unconditional and in j_format.
- */
- case jal_op:
- case j_op:
- case beq_op:
- case beql_op:
- case bne_op:
- case bnel_op:
- case blez_op: /* not really i_format */
- case blezl_op:
- case bgtz_op:
- case bgtzl_op:
- return 1;
-
- /*
- * And now the FPA/cp1 branch instructions.
- */
- case cop1_op:
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
- case lwc2_op: /* This is bbit0 on Octeon */
- case ldc2_op: /* This is bbit032 on Octeon */
- case swc2_op: /* This is bbit1 on Octeon */
- case sdc2_op: /* This is bbit132 on Octeon */
-#endif
- return 1;
- }
-
- return 0;
+ return __insn_has_delay_slot(insn);
}
/**
@@ -95,6 +36,12 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
return -EINVAL;
inst.word = aup->insn[0];
+
+ if (__insn_is_compact_branch(inst)) {
+ pr_notice("Uprobes for compact branches are not supported\n");
+ return -EINVAL;
+ }
+
aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)];
aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */
@@ -282,19 +229,14 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len)
{
- void *kaddr;
+ unsigned long kaddr, kstart;
/* Initialize the slot */
- kaddr = kmap_atomic(page);
- memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
- kunmap_atomic(kaddr);
-
- /*
- * The MIPS version of flush_icache_range will operate safely on
- * user space addresses and more importantly, it doesn't require a
- * VMA argument.
- */
- flush_icache_range(vaddr, vaddr + len);
+ kaddr = (unsigned long)kmap_atomic(page);
+ kstart = kaddr + (vaddr & ~PAGE_MASK);
+ memcpy((void *)kstart, src, len);
+ flush_icache_range(kstart, kstart + len);
+ kunmap_atomic((void *)kaddr);
}
/**
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
index a36b77e1705c..f43629979a0e 100644
--- a/arch/mips/kvm/commpage.c
+++ b/arch/mips/kvm/commpage.c
@@ -12,7 +12,6 @@
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/bootmem.h>
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
index d280894915ed..010cef240688 100644
--- a/arch/mips/kvm/dyntrans.c
+++ b/arch/mips/kvm/dyntrans.c
@@ -13,7 +13,6 @@
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/kvm_host.h>
-#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/bootmem.h>
@@ -45,8 +44,8 @@ static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
local_irq_save(flags);
memcpy((void *)opc, (void *)&replace, sizeof(u32));
- local_flush_icache_range((unsigned long)opc,
- (unsigned long)opc + 32);
+ __local_flush_icache_user_range((unsigned long)opc,
+ (unsigned long)opc + 32);
local_irq_restore(flags);
} else {
kvm_err("%s: Invalid address: %p\n", __func__, opc);
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 4db4c0370859..8770f32c9e0b 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -13,7 +13,6 @@
#include <linux/err.h>
#include <linux/ktime.h>
#include <linux/kvm_host.h>
-#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/bootmem.h>
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index ad28dac6b7e9..e88403b3dcdd 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -11,7 +11,6 @@
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/bootmem.h>
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index ce961495b5e1..622037d851a3 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/kdebug.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/bootmem.h>
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 3a5484f9aa50..3b20441f2beb 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -11,7 +11,6 @@
#include <linux/errno.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/kvm_host.h>
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
index 4625495f9230..577ec81b557d 100644
--- a/arch/mips/lantiq/xway/vmmc.c
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -6,7 +6,7 @@
* Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/dma-mapping.h>
@@ -55,7 +55,6 @@ static const struct of_device_id vmmc_match[] = {
{ .compatible = "lantiq,vmmc-xway" },
{},
};
-MODULE_DEVICE_TABLE(of, vmmc_match);
static struct platform_driver vmmc_driver = {
.probe = vmmc_probe,
@@ -64,5 +63,4 @@ static struct platform_driver vmmc_driver = {
.of_match_table = vmmc_match,
},
};
-
-module_platform_driver(vmmc_driver);
+builtin_platform_driver(vmmc_driver);
diff --git a/arch/mips/lantiq/xway/xrx200_phy_fw.c b/arch/mips/lantiq/xway/xrx200_phy_fw.c
index 71e518c1e7e7..f0a0f2d431b2 100644
--- a/arch/mips/lantiq/xway/xrx200_phy_fw.c
+++ b/arch/mips/lantiq/xway/xrx200_phy_fw.c
@@ -1,4 +1,7 @@
/*
+ * Lantiq XRX200 PHY Firmware Loader
+ * Author: John Crispin
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
@@ -8,7 +11,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/of_platform.h>
@@ -100,7 +102,6 @@ static const struct of_device_id xway_phy_match[] = {
{ .compatible = "lantiq,phy-xrx200" },
{},
};
-MODULE_DEVICE_TABLE(of, xway_phy_match);
static struct platform_driver xway_phy_driver = {
.probe = xway_phy_fw_probe,
@@ -109,9 +110,4 @@ static struct platform_driver xway_phy_driver = {
.of_match_table = xway_phy_match,
},
};
-
-module_platform_driver(xway_phy_driver);
-
-MODULE_AUTHOR("John Crispin <john@phrozen.org>");
-MODULE_DESCRIPTION("Lantiq XRX200 PHY Firmware Loader");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(xway_phy_driver);
diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c
index 927dc94a030f..c3e22053d13e 100644
--- a/arch/mips/lib/ashldi3.c
+++ b/arch/mips/lib/ashldi3.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include "libgcc.h"
diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c
index 9fdf1a598428..17456024873d 100644
--- a/arch/mips/lib/ashrdi3.c
+++ b/arch/mips/lib/ashrdi3.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include "libgcc.h"
diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c
index e3e77aa52c95..a8114148f82a 100644
--- a/arch/mips/lib/bswapdi.c
+++ b/arch/mips/lib/bswapdi.c
@@ -1,4 +1,5 @@
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/compiler.h>
unsigned long long notrace __bswapdi2(unsigned long long u)
{
diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c
index 530a8afe6fda..106fd978317d 100644
--- a/arch/mips/lib/bswapsi.c
+++ b/arch/mips/lib/bswapsi.c
@@ -1,4 +1,5 @@
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/compiler.h>
unsigned int notrace __bswapsi2(unsigned int u)
{
diff --git a/arch/mips/lib/cmpdi2.c b/arch/mips/lib/cmpdi2.c
index 06857da96993..9d849d8743c9 100644
--- a/arch/mips/lib/cmpdi2.c
+++ b/arch/mips/lib/cmpdi2.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include "libgcc.h"
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 21d27c6819a2..2307a3cb2714 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -8,7 +8,7 @@
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2007, 2014 Maciej W. Rozycki
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/param.h>
#include <linux/smp.h>
#include <linux/stringify.h>
diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c
index fd35daa45314..8ed3f25a9047 100644
--- a/arch/mips/lib/iomap-pci.c
+++ b/arch/mips/lib/iomap-pci.c
@@ -7,9 +7,11 @@
* written by Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/pci.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/io.h>
+#ifdef CONFIG_PCI_DRIVERS_LEGACY
+
void __iomem *__pci_ioport_map(struct pci_dev *dev,
unsigned long port, unsigned int nr)
{
@@ -40,6 +42,8 @@ void __iomem *__pci_ioport_map(struct pci_dev *dev,
return (void __iomem *) (ctrl->io_map_base + port);
}
+#endif /* CONFIG_PCI_DRIVERS_LEGACY */
+
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
iounmap(addr);
diff --git a/arch/mips/lib/iomap.c b/arch/mips/lib/iomap.c
index 8e7e378ce51c..9daa92428e23 100644
--- a/arch/mips/lib/iomap.c
+++ b/arch/mips/lib/iomap.c
@@ -6,7 +6,7 @@
* (C) Copyright 2007 MIPS Technologies, Inc.
* written by Ralf Baechle <ralf@linux-mips.org>
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/io.h>
/*
diff --git a/arch/mips/lib/lshrdi3.c b/arch/mips/lib/lshrdi3.c
index 364547449c65..221167c1be55 100644
--- a/arch/mips/lib/lshrdi3.c
+++ b/arch/mips/lib/lshrdi3.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include "libgcc.h"
diff --git a/arch/mips/lib/ucmpdi2.c b/arch/mips/lib/ucmpdi2.c
index bd599f58234c..08067fa538f2 100644
--- a/arch/mips/lib/ucmpdi2.c
+++ b/arch/mips/lib/ucmpdi2.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/export.h>
#include "libgcc.h"
diff --git a/arch/mips/loongson32/Kconfig b/arch/mips/loongson32/Kconfig
index 7704f20529d6..3c0c2f2096cd 100644
--- a/arch/mips/loongson32/Kconfig
+++ b/arch/mips/loongson32/Kconfig
@@ -19,6 +19,21 @@ config LOONGSON1_LS1B
select USE_GENERIC_EARLY_PRINTK_8250
select COMMON_CLK
+config LOONGSON1_LS1C
+ bool "Loongson LS1C board"
+ select CEVT_R4K if !MIPS_EXTERNAL_TIMER
+ select CSRC_R4K if !MIPS_EXTERNAL_TIMER
+ select SYS_HAS_CPU_LOONGSON1C
+ select DMA_NONCOHERENT
+ select BOOT_ELF32
+ select IRQ_MIPS_CPU
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_SUPPORTS_MIPS16
+ select SYS_HAS_EARLY_PRINTK
+ select USE_GENERIC_EARLY_PRINTK_8250
+ select COMMON_CLK
endchoice
menuconfig CEVT_CSRC_LS1X
diff --git a/arch/mips/loongson32/Makefile b/arch/mips/loongson32/Makefile
index 5f4bd6e071ca..1ab2c5bbc066 100644
--- a/arch/mips/loongson32/Makefile
+++ b/arch/mips/loongson32/Makefile
@@ -9,3 +9,9 @@ obj-$(CONFIG_MACH_LOONGSON32) += common/
#
obj-$(CONFIG_LOONGSON1_LS1B) += ls1b/
+
+#
+# Loongson LS1C board
+#
+
+obj-$(CONFIG_LOONGSON1_LS1C) += ls1c/
diff --git a/arch/mips/loongson32/Platform b/arch/mips/loongson32/Platform
index ebb6dc290f0a..ffe01c6d0037 100644
--- a/arch/mips/loongson32/Platform
+++ b/arch/mips/loongson32/Platform
@@ -5,3 +5,4 @@ cflags-$(CONFIG_CPU_LOONGSON1) += \
platform-$(CONFIG_MACH_LOONGSON32) += loongson32/
cflags-$(CONFIG_MACH_LOONGSON32) += -I$(srctree)/arch/mips/include/asm/mach-loongson32
load-$(CONFIG_LOONGSON1_LS1B) += 0xffffffff80100000
+load-$(CONFIG_LOONGSON1_LS1C) += 0xffffffff80100000
diff --git a/arch/mips/loongson32/common/irq.c b/arch/mips/loongson32/common/irq.c
index 455a7704a90f..635a4abe1f48 100644
--- a/arch/mips/loongson32/common/irq.c
+++ b/arch/mips/loongson32/common/irq.c
@@ -62,12 +62,58 @@ static void ls1x_irq_unmask(struct irq_data *d)
| (1 << bit), LS1X_INTC_INTIEN(n));
}
+static int ls1x_irq_settype(struct irq_data *d, unsigned int type)
+{
+ unsigned int bit = (d->irq - LS1X_IRQ_BASE) & 0x1f;
+ unsigned int n = (d->irq - LS1X_IRQ_BASE) >> 5;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+ | (1 << bit), LS1X_INTC_INTPOL(n));
+ __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+ & ~(1 << bit), LS1X_INTC_INTEDGE(n));
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+ & ~(1 << bit), LS1X_INTC_INTPOL(n));
+ __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+ & ~(1 << bit), LS1X_INTC_INTEDGE(n));
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+ | (1 << bit), LS1X_INTC_INTPOL(n));
+ __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+ | (1 << bit), LS1X_INTC_INTEDGE(n));
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+ & ~(1 << bit), LS1X_INTC_INTPOL(n));
+ __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+ | (1 << bit), LS1X_INTC_INTEDGE(n));
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ __raw_writel(__raw_readl(LS1X_INTC_INTPOL(n))
+ & ~(1 << bit), LS1X_INTC_INTPOL(n));
+ __raw_writel(__raw_readl(LS1X_INTC_INTEDGE(n))
+ | (1 << bit), LS1X_INTC_INTEDGE(n));
+ break;
+ case IRQ_TYPE_NONE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static struct irq_chip ls1x_irq_chip = {
.name = "LS1X-INTC",
.irq_ack = ls1x_irq_ack,
.irq_mask = ls1x_irq_mask,
.irq_mask_ack = ls1x_irq_mask_ack,
.irq_unmask = ls1x_irq_unmask,
+ .irq_set_type = ls1x_irq_settype,
};
static void ls1x_irq_dispatch(int n)
@@ -107,7 +153,7 @@ asmlinkage void plat_irq_dispatch(void)
}
-struct irqaction cascade_irqaction = {
+static struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
@@ -120,7 +166,7 @@ static void __init ls1x_irq_init(int base)
/* Disable interrupts and clear pending,
* setup all IRQs as high level triggered
*/
- for (n = 0; n < 4; n++) {
+ for (n = 0; n < INTN; n++) {
__raw_writel(0x0, LS1X_INTC_INTIEN(n));
__raw_writel(0xffffffff, LS1X_INTC_INTCLR(n));
__raw_writel(0xffffffff, LS1X_INTC_INTPOL(n));
@@ -129,7 +175,7 @@ static void __init ls1x_irq_init(int base)
}
- for (n = base; n < LS1X_IRQS; n++) {
+ for (n = base; n < NR_IRQS; n++) {
irq_set_chip_and_handler(n, &ls1x_irq_chip,
handle_level_irq);
}
@@ -138,6 +184,9 @@ static void __init ls1x_irq_init(int base)
setup_irq(INT1_IRQ, &cascade_irqaction);
setup_irq(INT2_IRQ, &cascade_irqaction);
setup_irq(INT3_IRQ, &cascade_irqaction);
+#if defined(CONFIG_LOONGSON1_LS1C)
+ setup_irq(INT4_IRQ, &cascade_irqaction);
+#endif
}
void __init arch_init_irq(void)
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index f2c714d8fb60..beff0852c6a4 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -17,11 +17,16 @@
#include <linux/stmmac.h>
#include <linux/usb/ehci_pdriver.h>
+#include <platform.h>
#include <loongson1.h>
#include <cpufreq.h>
#include <dma.h>
#include <nand.h>
+#define LS1X_RTC_CTRL ((void __iomem *)KSEG1ADDR(LS1X_RTC_BASE + 0x40))
+#define RTC_EXTCLK_OK (BIT(5) | BIT(8))
+#define RTC_EXTCLK_EN BIT(8)
+
/* 8250/16550 compatible UART */
#define LS1X_UART(_id) \
{ \
@@ -65,6 +70,15 @@ void __init ls1x_serial_set_uartclk(struct platform_device *pdev)
p->uartclk = clk_get_rate(clk);
}
+void __init ls1x_rtc_set_extclk(struct platform_device *pdev)
+{
+ u32 val;
+
+ val = __raw_readl(LS1X_RTC_CTRL);
+ if (!(val & RTC_EXTCLK_OK))
+ __raw_writel(val | RTC_EXTCLK_EN, LS1X_RTC_CTRL);
+}
+
/* CPUFreq */
static struct plat_ls1x_cpufreq ls1x_cpufreq_pdata = {
.clk_name = "cpu_clk",
@@ -132,6 +146,7 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
val = __raw_readl(LS1X_MUX_CTRL1);
+#if defined(CONFIG_LOONGSON1_LS1B)
plat_dat = dev_get_platdata(&pdev->dev);
if (plat_dat->bus_id) {
__raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
@@ -165,6 +180,17 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
val &= ~GMAC0_SHUT;
}
__raw_writel(val, LS1X_MUX_CTRL1);
+#elif defined(CONFIG_LOONGSON1_LS1C)
+ plat_dat = dev_get_platdata(&pdev->dev);
+
+ val &= ~PHY_INTF_SELI;
+ if (plat_dat->interface == PHY_INTERFACE_MODE_RMII)
+ val |= 0x4 << PHY_INTF_SELI_SHIFT;
+ __raw_writel(val, LS1X_MUX_CTRL1);
+
+ val = __raw_readl(LS1X_MUX_CTRL0);
+ __raw_writel(val & (~GMAC_SHUT), LS1X_MUX_CTRL0);
+#endif
return 0;
}
@@ -172,7 +198,11 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
static struct plat_stmmacenet_data ls1x_eth0_pdata = {
.bus_id = 0,
.phy_addr = -1,
+#if defined(CONFIG_LOONGSON1_LS1B)
.interface = PHY_INTERFACE_MODE_MII,
+#elif defined(CONFIG_LOONGSON1_LS1C)
+ .interface = PHY_INTERFACE_MODE_RMII,
+#endif
.mdio_bus_data = &ls1x_mdio_bus_data,
.dma_cfg = &ls1x_eth_dma_cfg,
.has_gmac = 1,
@@ -203,6 +233,7 @@ struct platform_device ls1x_eth0_pdev = {
},
};
+#ifdef CONFIG_LOONGSON1_LS1B
static struct plat_stmmacenet_data ls1x_eth1_pdata = {
.bus_id = 1,
.phy_addr = -1,
@@ -236,6 +267,7 @@ struct platform_device ls1x_eth1_pdev = {
.platform_data = &ls1x_eth1_pdata,
},
};
+#endif /* CONFIG_LOONGSON1_LS1B */
/* GPIO */
static struct resource ls1x_gpio0_resources[] = {
diff --git a/arch/mips/loongson32/common/setup.c b/arch/mips/loongson32/common/setup.c
index 62f41afee241..1640744288ee 100644
--- a/arch/mips/loongson32/common/setup.c
+++ b/arch/mips/loongson32/common/setup.c
@@ -22,7 +22,11 @@ const char *get_system_type(void)
switch (processor_id & PRID_REV_MASK) {
case PRID_REV_LOONGSON1B:
+#if defined(CONFIG_LOONGSON1_LS1B)
return "LOONGSON LS1B";
+#elif defined(CONFIG_LOONGSON1_LS1C)
+ return "LOONGSON LS1C";
+#endif
default:
return "LOONGSON (unknown)";
}
diff --git a/arch/mips/loongson32/ls1c/Makefile b/arch/mips/loongson32/ls1c/Makefile
new file mode 100644
index 000000000000..a92c6cd3418d
--- /dev/null
+++ b/arch/mips/loongson32/ls1c/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for loongson1C based machines.
+#
+
+obj-y += board.o
diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c
new file mode 100644
index 000000000000..a96bed5e3ea6
--- /dev/null
+++ b/arch/mips/loongson32/ls1c/board.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016 Yang Ling <gnaygnil@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <platform.h>
+
+static struct platform_device *ls1c_platform_devices[] __initdata = {
+ &ls1x_uart_pdev,
+ &ls1x_eth0_pdev,
+ &ls1x_rtc_pdev,
+};
+
+static int __init ls1c_platform_init(void)
+{
+ ls1x_serial_set_uartclk(&ls1x_uart_pdev);
+ ls1x_rtc_set_extclk(&ls1x_rtc_pdev);
+
+ return platform_add_devices(ls1c_platform_devices,
+ ARRAY_SIZE(ls1c_platform_devices));
+}
+
+arch_initcall(ls1c_platform_init);
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 05b1d7cf9514..0e45b061e514 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -294,6 +294,8 @@ void octeon_cache_init(void)
flush_data_cache_page = octeon_flush_data_cache_page;
flush_icache_range = octeon_flush_icache_range;
local_flush_icache_range = local_octeon_flush_icache_range;
+ __flush_icache_user_range = octeon_flush_icache_range;
+ __local_flush_icache_user_range = local_octeon_flush_icache_range;
__flush_kernel_vmap_range = octeon_flush_kernel_vmap_range;
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 135ec313c1f6..21e4e662c1fa 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -325,6 +325,8 @@ void r3k_cache_init(void)
flush_cache_page = r3k_flush_cache_page;
flush_icache_range = r3k_flush_icache_range;
local_flush_icache_range = r3k_flush_icache_range;
+ __flush_icache_user_range = r3k_flush_icache_range;
+ __local_flush_icache_user_range = r3k_flush_icache_range;
__flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fa7d8d3790bf..88cfaf81c958 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -17,7 +17,7 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/bitops.h>
#include <asm/bcache.h>
@@ -722,11 +722,13 @@ struct flush_icache_range_args {
unsigned long start;
unsigned long end;
unsigned int type;
+ bool user;
};
static inline void __local_r4k_flush_icache_range(unsigned long start,
unsigned long end,
- unsigned int type)
+ unsigned int type,
+ bool user)
{
if (!cpu_has_ic_fills_f_dc) {
if (type == R4K_INDEX ||
@@ -734,7 +736,10 @@ static inline void __local_r4k_flush_icache_range(unsigned long start,
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
- protected_blast_dcache_range(start, end);
+ if (user)
+ protected_blast_dcache_range(start, end);
+ else
+ blast_dcache_range(start, end);
}
}
@@ -748,27 +753,25 @@ static inline void __local_r4k_flush_icache_range(unsigned long start,
break;
default:
- protected_blast_icache_range(start, end);
+ if (user)
+ protected_blast_icache_range(start, end);
+ else
+ blast_icache_range(start, end);
break;
}
}
-#ifdef CONFIG_EVA
- /*
- * Due to all possible segment mappings, there might cache aliases
- * caused by the bootloader being in non-EVA mode, and the CPU switching
- * to EVA during early kernel init. It's best to flush the scache
- * to avoid having secondary cores fetching stale data and lead to
- * kernel crashes.
- */
- bc_wback_inv(start, (end - start));
- __sync();
-#endif
}
static inline void local_r4k_flush_icache_range(unsigned long start,
unsigned long end)
{
- __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX);
+ __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false);
+}
+
+static inline void local_r4k_flush_icache_user_range(unsigned long start,
+ unsigned long end)
+{
+ __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true);
}
static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -777,11 +780,13 @@ static inline void local_r4k_flush_icache_range_ipi(void *args)
unsigned long start = fir_args->start;
unsigned long end = fir_args->end;
unsigned int type = fir_args->type;
+ bool user = fir_args->user;
- __local_r4k_flush_icache_range(start, end, type);
+ __local_r4k_flush_icache_range(start, end, type, user);
}
-static void r4k_flush_icache_range(unsigned long start, unsigned long end)
+static void __r4k_flush_icache_range(unsigned long start, unsigned long end,
+ bool user)
{
struct flush_icache_range_args args;
unsigned long size, cache_size;
@@ -789,6 +794,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
args.start = start;
args.end = end;
args.type = R4K_HIT | R4K_INDEX;
+ args.user = user;
/*
* Indexed cache ops require an SMP call.
@@ -814,6 +820,16 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
instruction_hazard();
}
+static void r4k_flush_icache_range(unsigned long start, unsigned long end)
+{
+ return __r4k_flush_icache_range(start, end, false);
+}
+
+static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
+{
+ return __r4k_flush_icache_range(start, end, true);
+}
+
#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
@@ -1915,9 +1931,16 @@ void r4k_cache_init(void)
flush_data_cache_page = r4k_flush_data_cache_page;
flush_icache_range = r4k_flush_icache_range;
local_flush_icache_range = local_r4k_flush_icache_range;
+ __flush_icache_user_range = r4k_flush_icache_user_range;
+ __local_flush_icache_user_range = local_r4k_flush_icache_user_range;
#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
- if (coherentio) {
+# if defined(CONFIG_DMA_PERDEV_COHERENT)
+ if (0) {
+# else
+ if ((coherentio == IO_COHERENCE_ENABLED) ||
+ ((coherentio == IO_COHERENCE_DEFAULT) && hw_coherentio)) {
+# endif
_dma_cache_wback_inv = (void *)cache_noop;
_dma_cache_wback = (void *)cache_noop;
_dma_cache_inv = (void *)cache_noop;
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 596e18458e04..5c282583edf1 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -411,6 +411,9 @@ void tx39_cache_init(void)
break;
}
+ __flush_icache_user_range = flush_icache_range;
+ __local_flush_icache_user_range = local_flush_icache_range;
+
current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index bf04c6c479a4..6db341347202 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -10,7 +10,7 @@
#include <linux/fcntl.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/mm.h>
@@ -33,6 +33,10 @@ void (*flush_icache_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(flush_icache_range);
void (*local_flush_icache_range)(unsigned long start, unsigned long end);
EXPORT_SYMBOL_GPL(local_flush_icache_range);
+void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
+EXPORT_SYMBOL_GPL(__flush_icache_user_range);
+void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
+EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
void (*__flush_cache_vmap)(void);
void (*__flush_cache_vunmap)(void);
@@ -74,7 +78,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
return -EFAULT;
- flush_icache_range(addr, addr + bytes);
+ __flush_icache_user_range(addr, addr + bytes);
return 0;
}
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index b2eadd6fa9a1..46d5696c4f27 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -11,7 +11,7 @@
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/gfp.h>
@@ -24,14 +24,15 @@
#include <dma-coherence.h>
-#ifdef CONFIG_DMA_MAYBE_COHERENT
-int coherentio = 0; /* User defined DMA coherency from command line. */
+#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
+/* User defined DMA coherency from command line. */
+enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
EXPORT_SYMBOL_GPL(coherentio);
int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
static int __init setcoherentio(char *str)
{
- coherentio = 1;
+ coherentio = IO_COHERENCE_ENABLED;
pr_info("Hardware DMA cache coherency (command line)\n");
return 0;
}
@@ -39,7 +40,7 @@ early_param("coherentio", setcoherentio);
static int __init setnocoherentio(char *str)
{
- coherentio = 0;
+ coherentio = IO_COHERENCE_DISABLED;
pr_info("Software DMA cache coherency (command line)\n");
return 0;
}
@@ -160,8 +161,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
*dma_handle = plat_map_dma_mem(dev, ret, size);
if (!plat_device_is_coherent(dev)) {
dma_cache_wback_inv((unsigned long) ret, size);
- if (!hw_coherentio)
- ret = UNCAC_ADDR(ret);
+ ret = UNCAC_ADDR(ret);
}
return ret;
@@ -189,7 +189,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
- if (!plat_device_is_coherent(dev) && !hw_coherentio)
+ if (!plat_device_is_coherent(dev))
addr = CAC_ADDR(addr);
page = virt_to_page((void *) addr);
@@ -209,7 +209,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn;
int ret = -ENXIO;
- if (!plat_device_is_coherent(dev) && !hw_coherentio)
+ if (!plat_device_is_coherent(dev))
addr = CAC_ADDR(addr);
pfn = page_to_pfn(virt_to_page((void *)addr));
diff --git a/arch/mips/mm/extable.c b/arch/mips/mm/extable.c
index 9d25d2ba4b9e..e474fa2efed4 100644
--- a/arch/mips/mm/extable.c
+++ b/arch/mips/mm/extable.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 1997, 99, 2001 - 2004 Ralf Baechle <ralf@linux-mips.org>
*/
-#include <linux/module.h>
+#include <linux/extable.h>
#include <linux/spinlock.h>
#include <asm/branch.h>
#include <asm/uaccess.h>
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 9560ad731120..d56a855828c2 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -18,7 +18,6 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 42d124fb6474..d8c3c159289a 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -287,7 +287,7 @@ slow_irqon:
pages += nr;
ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
- write, 0, pages);
+ pages, write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index d7258a103439..f13f51003bd8 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,5 +1,6 @@
#include <linux/compiler.h>
-#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/smp.h>
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 72f7478ee068..3a6edecc3f38 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -10,7 +10,7 @@
*/
#include <linux/bug.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/smp.h>
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 8d5008cbdc0f..1f189627440f 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -6,7 +6,7 @@
* (C) Copyright 1995 1996 Linus Torvalds
* (C) Copyright 2001, 2002 Ralf Baechle
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/addrspace.h>
#include <asm/byteorder.h>
#include <linux/sched.h>
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 353037699512..d08ea3ff0f53 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -10,7 +10,7 @@
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/personality.h>
#include <linux/random.h>
#include <linux/sched.h>
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index c41953ca6605..6f804f5960ab 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -12,7 +12,6 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
-#include <linux/module.h>
#include <linux/proc_fs.h>
#include <asm/bugs.h>
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index e8b335c16295..bba9c1484b41 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -14,7 +14,7 @@
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <asm/cpu.h>
#include <asm/cpu-type.h>
@@ -67,8 +67,11 @@ void local_flush_tlb_all(void)
entry = read_c0_wired();
- /* Blast 'em all away. */
- if (cpu_has_tlbinv) {
+ /*
+ * Blast 'em all away.
+ * If there are any wired entries, fall back to iterating
+ */
+ if (cpu_has_tlbinv && !entry) {
if (current_cpu_data.tlbsizevtlb) {
write_c0_index(0);
mtc0_tlbw_hazard();
diff --git a/arch/mips/mti-malta/malta-dt.c b/arch/mips/mti-malta/malta-dt.c
index 47a22889285f..4822943100f3 100644
--- a/arch/mips/mti-malta/malta-dt.c
+++ b/arch/mips/mti-malta/malta-dt.c
@@ -17,18 +17,3 @@ void __init device_tree_init(void)
{
unflatten_and_copy_device_tree();
}
-
-static const struct of_device_id bus_ids[] __initconst = {
- { .compatible = "simple-bus", },
- { .compatible = "isa", },
- {},
-};
-
-static int __init publish_devices(void)
-{
- if (!of_have_populated_dt())
- return 0;
-
- return of_platform_bus_probe(NULL, bus_ids, NULL);
-}
-device_initcall(publish_devices);
diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c
index 151f4882ec8a..c398582c316f 100644
--- a/arch/mips/mti-malta/malta-dtshim.c
+++ b/arch/mips/mti-malta/malta-dtshim.c
@@ -13,18 +13,66 @@
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
#include <linux/sizes.h>
+#include <asm/addrspace.h>
#include <asm/bootinfo.h>
#include <asm/fw/fw.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/mips-boards/malta.h>
+#include <asm/mips-cm.h>
#include <asm/page.h>
+#define ROCIT_REG_BASE 0x1f403000
+#define ROCIT_CONFIG_GEN1 (ROCIT_REG_BASE + 0x04)
+#define ROCIT_CONFIG_GEN1_MEMMAP_SHIFT 8
+#define ROCIT_CONFIG_GEN1_MEMMAP_MASK (0xf << 8)
+
static unsigned char fdt_buf[16 << 10] __initdata;
/* determined physical memory size, not overridden by command line args */
extern unsigned long physical_memsize;
-#define MAX_MEM_ARRAY_ENTRIES 1
+enum mem_map {
+ MEM_MAP_V1 = 0,
+ MEM_MAP_V2,
+};
+
+#define MAX_MEM_ARRAY_ENTRIES 2
-static unsigned __init gen_fdt_mem_array(__be32 *mem_array, unsigned long size)
+static __init int malta_scon(void)
+{
+ int scon = MIPS_REVISION_SCONID;
+
+ if (scon != MIPS_REVISION_SCON_OTHER)
+ return scon;
+
+ switch (MIPS_REVISION_CORID) {
+ case MIPS_REVISION_CORID_QED_RM5261:
+ case MIPS_REVISION_CORID_CORE_LV:
+ case MIPS_REVISION_CORID_CORE_FPGA:
+ case MIPS_REVISION_CORID_CORE_FPGAR2:
+ return MIPS_REVISION_SCON_GT64120;
+
+ case MIPS_REVISION_CORID_CORE_EMUL_BON:
+ case MIPS_REVISION_CORID_BONITO64:
+ case MIPS_REVISION_CORID_CORE_20K:
+ return MIPS_REVISION_SCON_BONITO;
+
+ case MIPS_REVISION_CORID_CORE_MSC:
+ case MIPS_REVISION_CORID_CORE_FPGA2:
+ case MIPS_REVISION_CORID_CORE_24K:
+ return MIPS_REVISION_SCON_SOCIT;
+
+ case MIPS_REVISION_CORID_CORE_FPGA3:
+ case MIPS_REVISION_CORID_CORE_FPGA4:
+ case MIPS_REVISION_CORID_CORE_FPGA5:
+ case MIPS_REVISION_CORID_CORE_EMUL_MSC:
+ default:
+ return MIPS_REVISION_SCON_ROCIT;
+ }
+}
+
+static unsigned __init gen_fdt_mem_array(__be32 *mem_array, unsigned long size,
+ enum mem_map map)
{
unsigned long size_preio;
unsigned entries;
@@ -39,11 +87,47 @@ static unsigned __init gen_fdt_mem_array(__be32 *mem_array, unsigned long size)
* DDR but limits it to 2GB.
*/
mem_array[1] = cpu_to_be32(size);
+ goto done;
+ }
+
+ size_preio = min_t(unsigned long, size, SZ_256M);
+ mem_array[1] = cpu_to_be32(size_preio);
+ size -= size_preio;
+ if (!size)
+ goto done;
+
+ if (map == MEM_MAP_V2) {
+ /*
+ * We have a flat 32 bit physical memory map with DDR filling
+ * all 4GB of the memory map, apart from the I/O region which
+ * obscures 256MB from 0x10000000-0x1fffffff.
+ *
+ * Therefore we discard the 256MB behind the I/O region.
+ */
+ if (size <= SZ_256M)
+ goto done;
+ size -= SZ_256M;
+
+ /* Make use of the memory following the I/O region */
+ entries++;
+ mem_array[2] = cpu_to_be32(PHYS_OFFSET + SZ_512M);
+ mem_array[3] = cpu_to_be32(size);
} else {
- size_preio = min_t(unsigned long, size, SZ_256M);
- mem_array[1] = cpu_to_be32(size_preio);
+ /*
+ * We have a 32 bit physical memory map with a 2GB DDR region
+ * aliased in the upper & lower halves of it. The I/O region
+ * obscures 256MB from 0x10000000-0x1fffffff in the low alias
+ * but the DDR it obscures is accessible via the high alias.
+ *
+ * Simply access everything beyond the lowest 256MB of DDR using
+ * the high alias.
+ */
+ entries++;
+ mem_array[2] = cpu_to_be32(PHYS_OFFSET + SZ_2G + SZ_256M);
+ mem_array[3] = cpu_to_be32(size);
}
+done:
BUG_ON(entries > MAX_MEM_ARRAY_ENTRIES);
return entries;
}
@@ -54,6 +138,8 @@ static void __init append_memory(void *fdt, int root_off)
unsigned long memsize;
unsigned mem_entries;
int i, err, mem_off;
+ enum mem_map mem_map;
+ u32 config;
char *var, param_name[10], *var_names[] = {
"ememsize", "memsize",
};
@@ -106,6 +192,20 @@ static void __init append_memory(void *fdt, int root_off)
/* if the user says there's more RAM than we thought, believe them */
physical_memsize = max_t(unsigned long, physical_memsize, memsize);
+ /* detect the memory map in use */
+ if (malta_scon() == MIPS_REVISION_SCON_ROCIT) {
+ /* ROCit has a register indicating the memory map in use */
+ config = readl((void __iomem *)CKSEG1ADDR(ROCIT_CONFIG_GEN1));
+ mem_map = config & ROCIT_CONFIG_GEN1_MEMMAP_MASK;
+ mem_map >>= ROCIT_CONFIG_GEN1_MEMMAP_SHIFT;
+ } else {
+ /* if not using ROCit, presume the v1 memory map */
+ mem_map = MEM_MAP_V1;
+ }
+ if (mem_map > MEM_MAP_V2)
+ panic("Unsupported physical memory map v%u detected",
+ (unsigned int)mem_map);
+
/* append memory to the DT */
mem_off = fdt_add_subnode(fdt, root_off, "memory");
if (mem_off < 0)
@@ -115,19 +215,93 @@ static void __init append_memory(void *fdt, int root_off)
if (err)
panic("Unable to set memory node device_type: %d", err);
- mem_entries = gen_fdt_mem_array(mem_array, physical_memsize);
+ mem_entries = gen_fdt_mem_array(mem_array, physical_memsize, mem_map);
err = fdt_setprop(fdt, mem_off, "reg", mem_array,
mem_entries * 2 * sizeof(mem_array[0]));
if (err)
panic("Unable to set memory regs property: %d", err);
- mem_entries = gen_fdt_mem_array(mem_array, memsize);
+ mem_entries = gen_fdt_mem_array(mem_array, memsize, mem_map);
err = fdt_setprop(fdt, mem_off, "linux,usable-memory", mem_array,
mem_entries * 2 * sizeof(mem_array[0]));
if (err)
panic("Unable to set linux,usable-memory property: %d", err);
}
+static void __init remove_gic(void *fdt)
+{
+ int err, gic_off, i8259_off, cpu_off;
+ void __iomem *biu_base;
+ uint32_t cpu_phandle, sc_cfg;
+
+ /* if we have a CM which reports a GIC is present, leave the DT alone */
+ err = mips_cm_probe();
+ if (!err && (read_gcr_gic_status() & CM_GCR_GIC_STATUS_GICEX_MSK))
+ return;
+
+ if (malta_scon() == MIPS_REVISION_SCON_ROCIT) {
+ /*
+ * On systems using the RocIT system controller a GIC may be
+ * present without a CM. Detect whether that is the case.
+ */
+ biu_base = ioremap_nocache(MSC01_BIU_REG_BASE,
+ MSC01_BIU_ADDRSPACE_SZ);
+ sc_cfg = __raw_readl(biu_base + MSC01_SC_CFG_OFS);
+ if (sc_cfg & MSC01_SC_CFG_GICPRES_MSK) {
+ /* enable the GIC at the system controller level */
+ sc_cfg |= BIT(MSC01_SC_CFG_GICENA_SHF);
+ __raw_writel(sc_cfg, biu_base + MSC01_SC_CFG_OFS);
+ return;
+ }
+ }
+
+ gic_off = fdt_node_offset_by_compatible(fdt, -1, "mti,gic");
+ if (gic_off < 0) {
+ pr_warn("malta-dtshim: unable to find DT GIC node: %d\n",
+ gic_off);
+ return;
+ }
+
+ err = fdt_nop_node(fdt, gic_off);
+ if (err)
+ pr_warn("malta-dtshim: unable to nop GIC node\n");
+
+ i8259_off = fdt_node_offset_by_compatible(fdt, -1, "intel,i8259");
+ if (i8259_off < 0) {
+ pr_warn("malta-dtshim: unable to find DT i8259 node: %d\n",
+ i8259_off);
+ return;
+ }
+
+ cpu_off = fdt_node_offset_by_compatible(fdt, -1,
+ "mti,cpu-interrupt-controller");
+ if (cpu_off < 0) {
+ pr_warn("malta-dtshim: unable to find CPU intc node: %d\n",
+ cpu_off);
+ return;
+ }
+
+ cpu_phandle = fdt_get_phandle(fdt, cpu_off);
+ if (!cpu_phandle) {
+ pr_warn("malta-dtshim: unable to get CPU intc phandle\n");
+ return;
+ }
+
+ err = fdt_setprop_u32(fdt, i8259_off, "interrupt-parent", cpu_phandle);
+ if (err) {
+ pr_warn("malta-dtshim: unable to set i8259 interrupt-parent: %d\n",
+ err);
+ return;
+ }
+
+ err = fdt_setprop_u32(fdt, i8259_off, "interrupts", 2);
+ if (err) {
+ pr_warn("malta-dtshim: unable to set i8259 interrupts: %d\n",
+ err);
+ return;
+ }
+}
+
void __init *malta_dt_shim(void *fdt)
{
int root_off, len, err;
@@ -153,6 +327,7 @@ void __init *malta_dt_shim(void *fdt)
return fdt;
append_memory(fdt_buf, root_off);
+ remove_gic(fdt_buf);
err = fdt_pack(fdt_buf);
if (err)
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index dc2c5214809d..0f3b881a3190 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/pci_regs.h>
#include <linux/serial_core.h>
#include <asm/cacheflush.h>
@@ -242,23 +243,19 @@ mips_pci_controller:
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_MEM_SHF |
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_BAR0_SHF);
#endif
-#ifndef CONFIG_EVA
- /* Fix up target memory mapping. */
- MSC_READ(MSC01_PCI_BAR0, mask);
- MSC_WRITE(MSC01_PCI_P2SCMSKL, mask & MSC01_PCI_BAR0_SIZE_MSK);
-#else
+
/*
* Setup the Malta max (2GB) memory for PCI DMA in host bridge
- * in transparent addressing mode, starting from 0x80000000.
+ * in transparent addressing mode.
*/
- mask = PHYS_OFFSET | (1<<3);
+ mask = PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH;
MSC_WRITE(MSC01_PCI_BAR0, mask);
-
- mask = PHYS_OFFSET;
MSC_WRITE(MSC01_PCI_HEAD4, mask);
+
+ mask &= MSC01_PCI_BAR0_SIZE_MSK;
MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
MSC_WRITE(MSC01_PCI_P2SCMAPL, mask);
-#endif
+
/* Don't handle target retries indefinitely. */
if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
MSC01_PCI_CFG_MAXRTRY_MSK)
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index c6a6c7afddab..cb675ec6f283 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -14,11 +14,13 @@
*/
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irqchip/mips-gic.h>
+#include <linux/of_irq.h>
#include <linux/kernel_stat.h>
#include <linux/kernel.h>
#include <linux/random.h>
@@ -37,10 +39,6 @@
#include <asm/setup.h>
#include <asm/rtlx.h>
-static void __iomem *_msc01_biu_base;
-
-static DEFINE_RAW_SPINLOCK(mips_irq_lock);
-
static inline int mips_pcibios_iack(void)
{
int irq;
@@ -85,49 +83,6 @@ static inline int mips_pcibios_iack(void)
return irq;
}
-static inline int get_int(void)
-{
- unsigned long flags;
- int irq;
- raw_spin_lock_irqsave(&mips_irq_lock, flags);
-
- irq = mips_pcibios_iack();
-
- /*
- * The only way we can decide if an interrupt is spurious
- * is by checking the 8259 registers. This needs a spinlock
- * on an SMP system, so leave it up to the generic code...
- */
-
- raw_spin_unlock_irqrestore(&mips_irq_lock, flags);
-
- return irq;
-}
-
-static void malta_hw0_irqdispatch(void)
-{
- int irq;
-
- irq = get_int();
- if (irq < 0) {
- /* interrupt has already been cleared */
- return;
- }
-
- do_IRQ(MALTA_INT_BASE + irq);
-
-#ifdef CONFIG_MIPS_VPE_APSP_API_MT
- if (aprp_hook)
- aprp_hook();
-#endif
-}
-
-static irqreturn_t i8259_handler(int irq, void *dev_id)
-{
- malta_hw0_irqdispatch();
- return IRQ_HANDLED;
-}
-
static void corehi_irqdispatch(void)
{
unsigned int intedge, intsteer, pcicmd, pcibadaddr;
@@ -240,12 +195,6 @@ static struct irqaction irq_call = {
};
#endif /* CONFIG_MIPS_MT_SMP */
-static struct irqaction i8259irq = {
- .handler = i8259_handler,
- .name = "XT-PIC cascade",
- .flags = IRQF_NO_THREAD,
-};
-
static struct irqaction corehi_irqaction = {
.handler = corehi_handler,
.name = "CoreHi",
@@ -281,28 +230,10 @@ void __init arch_init_ipiirq(int irq, struct irqaction *action)
void __init arch_init_irq(void)
{
- int corehi_irq, i8259_irq;
-
- init_i8259_irqs();
+ int corehi_irq;
- if (!cpu_has_veic)
- mips_cpu_irq_init();
-
- if (mips_cm_present()) {
- write_gcr_gic_base(GIC_BASE_ADDR | CM_GCR_GIC_BASE_GICEN_MSK);
- gic_present = 1;
- } else {
- if (mips_revision_sconid == MIPS_REVISION_SCON_ROCIT) {
- _msc01_biu_base = ioremap_nocache(MSC01_BIU_REG_BASE,
- MSC01_BIU_ADDRSPACE_SZ);
- gic_present =
- (__raw_readl(_msc01_biu_base + MSC01_SC_CFG_OFS) &
- MSC01_SC_CFG_GICPRES_MSK) >>
- MSC01_SC_CFG_GICPRES_SHF;
- }
- }
- if (gic_present)
- pr_debug("GIC present\n");
+ i8259_set_poll(mips_pcibios_iack);
+ irqchip_init();
switch (mips_revision_sconid) {
case MIPS_REVISION_SCON_SOCIT:
@@ -330,18 +261,6 @@ void __init arch_init_irq(void)
}
if (gic_present) {
- int i;
-
- gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, MIPSCPU_INT_GIC,
- MIPS_GIC_IRQ_BASE);
- if (!mips_cm_present()) {
- /* Enable the GIC */
- i = __raw_readl(_msc01_biu_base + MSC01_SC_CFG_OFS);
- __raw_writel(i | (0x1 << MSC01_SC_CFG_GICENA_SHF),
- _msc01_biu_base + MSC01_SC_CFG_OFS);
- pr_debug("GIC Enabled\n");
- }
- i8259_irq = MIPS_GIC_IRQ_BASE + GIC_INT_I8259A;
corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
} else {
#if defined(CONFIG_MIPS_MT_SMP)
@@ -361,33 +280,13 @@ void __init arch_init_irq(void)
arch_init_ipiirq(cpu_ipi_call_irq, &irq_call);
#endif
if (cpu_has_veic) {
- set_vi_handler(MSC01E_INT_I8259A,
- malta_hw0_irqdispatch);
set_vi_handler(MSC01E_INT_COREHI,
corehi_irqdispatch);
- i8259_irq = MSC01E_INT_BASE + MSC01E_INT_I8259A;
corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
} else {
- i8259_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_I8259A;
corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
}
}
- setup_irq(i8259_irq, &i8259irq);
setup_irq(corehi_irq, &corehi_irqaction);
}
-
-void malta_be_init(void)
-{
- /* Could change CM error mask register. */
-}
-
-int malta_be_handler(struct pt_regs *regs, int is_fixup)
-{
- /* This duplicates the handling in do_be which seems wrong */
- int retval = is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
-
- mips_cm_error_report();
-
- return retval;
-}
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
index e1dd1c1d3fde..516e1233d771 100644
--- a/arch/mips/mti-malta/malta-platform.c
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -23,14 +23,10 @@
*/
#include <linux/init.h>
#include <linux/serial_8250.h>
-#include <linux/mc146818rtc.h>
#include <linux/module.h>
#include <linux/irq.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
#include <asm/mips-boards/maltaint.h>
-#include <mtd/mtd-abi.h>
#define SMC_PORT(base, int) \
{ \
@@ -68,80 +64,13 @@ static struct platform_device malta_uart8250_device = {
},
};
-struct resource malta_rtc_resources[] = {
- {
- .start = RTC_PORT(0),
- .end = RTC_PORT(7),
- .flags = IORESOURCE_IO,
- }, {
- .start = RTC_IRQ,
- .end = RTC_IRQ,
- .flags = IORESOURCE_IRQ,
- }
-};
-
-static struct platform_device malta_rtc_device = {
- .name = "rtc_cmos",
- .id = -1,
- .resource = malta_rtc_resources,
- .num_resources = ARRAY_SIZE(malta_rtc_resources),
-};
-
-static struct mtd_partition malta_mtd_partitions[] = {
- {
- .name = "YAMON",
- .offset = 0x0,
- .size = 0x100000,
- .mask_flags = MTD_WRITEABLE
- }, {
- .name = "User FS",
- .offset = 0x100000,
- .size = 0x2e0000
- }, {
- .name = "Board Config",
- .offset = 0x3e0000,
- .size = 0x020000,
- .mask_flags = MTD_WRITEABLE
- }
-};
-
-static struct physmap_flash_data malta_flash_data = {
- .width = 4,
- .nr_parts = ARRAY_SIZE(malta_mtd_partitions),
- .parts = malta_mtd_partitions
-};
-
-static struct resource malta_flash_resource = {
- .start = 0x1e000000,
- .end = 0x1e3fffff,
- .flags = IORESOURCE_MEM
-};
-
-static struct platform_device malta_flash_device = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &malta_flash_data,
- },
- .num_resources = 1,
- .resource = &malta_flash_resource,
-};
-
static struct platform_device *malta_devices[] __initdata = {
&malta_uart8250_device,
- &malta_rtc_device,
- &malta_flash_device,
};
static int __init malta_add_devices(void)
{
- int err;
-
- err = platform_add_devices(malta_devices, ARRAY_SIZE(malta_devices));
- if (err)
- return err;
-
- return 0;
+ return platform_add_devices(malta_devices, ARRAY_SIZE(malta_devices));
}
device_initcall(malta_add_devices);
diff --git a/arch/mips/mti-malta/malta-reset.c b/arch/mips/mti-malta/malta-reset.c
index 2fd2cc2c5034..dd6f62ad4417 100644
--- a/arch/mips/mti-malta/malta-reset.c
+++ b/arch/mips/mti-malta/malta-reset.c
@@ -8,38 +8,21 @@
*/
#include <linux/io.h>
#include <linux/pm.h>
+#include <linux/reboot.h>
#include <asm/reboot.h>
#include <asm/mach-malta/malta-pm.h>
-#define SOFTRES_REG 0x1f000500
-#define GORESET 0x42
-
-static void mips_machine_restart(char *command)
-{
- unsigned int __iomem *softres_reg =
- ioremap(SOFTRES_REG, sizeof(unsigned int));
-
- __raw_writel(GORESET, softres_reg);
-}
-
-static void mips_machine_halt(void)
-{
- while (true);
-}
-
static void mips_machine_power_off(void)
{
mips_pm_suspend(PIIX4_FUNC3IO_PMCNTRL_SUS_TYP_SOFF);
pr_info("Failed to power down, resetting\n");
- mips_machine_restart(NULL);
+ machine_restart(NULL);
}
static int __init mips_reboot_setup(void)
{
- _machine_restart = mips_machine_restart;
- _machine_halt = mips_machine_halt;
pm_power_off = mips_machine_power_off;
return 0;
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 7e7364b0501e..a01d5debfcaf 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -42,9 +42,6 @@
#define ROCIT_CONFIG_GEN0 0x1f403000
#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7)
-extern void malta_be_init(void);
-extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
-
static struct resource standard_io_resources[] = {
{
.name = "dma1",
@@ -154,12 +151,12 @@ static void __init plat_setup_iocoherency(void)
* coherency instead.
*/
if (plat_enable_iocoherency()) {
- if (coherentio == 0)
+ if (coherentio == IO_COHERENCE_DISABLED)
pr_info("Hardware DMA cache coherency disabled\n");
else
pr_info("Hardware DMA cache coherency enabled\n");
} else {
- if (coherentio == 1)
+ if (coherentio == IO_COHERENCE_ENABLED)
pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
else
pr_info("Software DMA cache coherency enabled\n");
@@ -301,7 +298,4 @@ void __init plat_mem_setup(void)
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
screen_info_setup();
#endif
-
- board_be_init = malta_be_init;
- board_be_handler = malta_be_handler;
}
diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile
deleted file mode 100644
index 7a584e0bf933..000000000000
--- a/arch/mips/mti-sead3/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Carsten Langgaard, carstenl@mips.com
-# Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
-#
-# Copyright (C) 2008 Wind River Systems, Inc.
-# written by Ralf Baechle <ralf@linux-mips.org>
-#
-# Copyright (C) 2012 MIPS Technoligies, Inc. All rights reserved.
-# Steven J. Hill <sjhill@mips.com>
-#
-obj-y := sead3-lcd.o sead3-display.o sead3-init.o \
- sead3-int.o sead3-platform.o sead3-reset.o \
- sead3-setup.o sead3-time.o
-
-obj-$(CONFIG_EARLY_PRINTK) += sead3-console.o
diff --git a/arch/mips/mti-sead3/Platform b/arch/mips/mti-sead3/Platform
deleted file mode 100644
index 387092427145..000000000000
--- a/arch/mips/mti-sead3/Platform
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# MIPS SEAD-3 board
-#
-platform-$(CONFIG_MIPS_SEAD3) += mti-sead3/
-cflags-$(CONFIG_MIPS_SEAD3) += -I$(srctree)/arch/mips/include/asm/mach-sead3
-load-$(CONFIG_MIPS_SEAD3) += 0xffffffff80100000
-all-$(CONFIG_MIPS_SEAD3) := $(COMPRESSION_FNAME).srec
diff --git a/arch/mips/mti-sead3/sead3-console.c b/arch/mips/mti-sead3/sead3-console.c
deleted file mode 100644
index 031f47d69770..000000000000
--- a/arch/mips/mti-sead3/sead3-console.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/serial_reg.h>
-#include <linux/io.h>
-
-#define SEAD_UART1_REGS_BASE 0xbf000800 /* ttyS1 = DB9 port */
-#define SEAD_UART0_REGS_BASE 0xbf000900 /* ttyS0 = USB port */
-#define PORT(base_addr, offset) ((unsigned int __iomem *)(base_addr+(offset)*4))
-
-static char console_port = 1;
-
-static inline unsigned int serial_in(int offset, unsigned int base_addr)
-{
- return __raw_readl(PORT(base_addr, offset)) & 0xff;
-}
-
-static inline void serial_out(int offset, int value, unsigned int base_addr)
-{
- __raw_writel(value, PORT(base_addr, offset));
-}
-
-void __init fw_init_early_console(char port)
-{
- console_port = port;
-}
-
-int prom_putchar(char c)
-{
- unsigned int base_addr;
-
- base_addr = console_port ? SEAD_UART1_REGS_BASE : SEAD_UART0_REGS_BASE;
-
- while ((serial_in(UART_LSR, base_addr) & UART_LSR_THRE) == 0)
- ;
-
- serial_out(UART_TX, c, base_addr);
-
- return 1;
-}
diff --git a/arch/mips/mti-sead3/sead3-display.c b/arch/mips/mti-sead3/sead3-display.c
deleted file mode 100644
index 94875991907b..000000000000
--- a/arch/mips/mti-sead3/sead3-display.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/timer.h>
-#include <linux/io.h>
-#include <asm/mips-boards/generic.h>
-
-static unsigned int display_count;
-static unsigned int max_display_count;
-
-#define LCD_DISPLAY_POS_BASE 0x1f000400
-#define DISPLAY_LCDINSTRUCTION (0*2)
-#define DISPLAY_LCDDATA (1*2)
-#define DISPLAY_CPLDSTATUS (2*2)
-#define DISPLAY_CPLDDATA (3*2)
-#define LCD_SETDDRAM 0x80
-#define LCD_IR_BF 0x80
-
-const char display_string[] = " LINUX ON SEAD3 ";
-
-static void scroll_display_message(unsigned long data);
-static DEFINE_TIMER(mips_scroll_timer, scroll_display_message, HZ, 0);
-
-static void lcd_wait(unsigned int __iomem *display)
-{
- /* Wait for CPLD state machine to become idle. */
- do { } while (__raw_readl(display + DISPLAY_CPLDSTATUS) & 1);
-
- do {
- __raw_readl(display + DISPLAY_LCDINSTRUCTION);
-
- /* Wait for CPLD state machine to become idle. */
- do { } while (__raw_readl(display + DISPLAY_CPLDSTATUS) & 1);
- } while (__raw_readl(display + DISPLAY_CPLDDATA) & LCD_IR_BF);
-}
-
-void mips_display_message(const char *str)
-{
- static unsigned int __iomem *display;
- char ch;
- int i;
-
- if (unlikely(display == NULL))
- display = ioremap_nocache(LCD_DISPLAY_POS_BASE,
- (8 * sizeof(int)));
-
- for (i = 0; i < 16; i++) {
- if (*str)
- ch = *str++;
- else
- ch = ' ';
- lcd_wait(display);
- __raw_writel((LCD_SETDDRAM | i),
- (display + DISPLAY_LCDINSTRUCTION));
- lcd_wait(display);
- __raw_writel(ch, display + DISPLAY_LCDDATA);
- }
-}
-
-static void scroll_display_message(unsigned long data)
-{
- mips_display_message(&display_string[display_count++]);
- if (display_count == max_display_count)
- display_count = 0;
- mod_timer(&mips_scroll_timer, jiffies + HZ);
-}
-
-void mips_scroll_message(void)
-{
- del_timer_sync(&mips_scroll_timer);
- max_display_count = strlen(display_string) + 1 - 16;
- mod_timer(&mips_scroll_timer, jiffies + 1);
-}
diff --git a/arch/mips/mti-sead3/sead3-init.c b/arch/mips/mti-sead3/sead3-init.c
deleted file mode 100644
index 3572ea30173e..000000000000
--- a/arch/mips/mti-sead3/sead3-init.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/io.h>
-
-#include <asm/bootinfo.h>
-#include <asm/cacheflush.h>
-#include <asm/traps.h>
-#include <asm/mips-boards/generic.h>
-#include <asm/fw/fw.h>
-
-extern char except_vec_nmi;
-extern char except_vec_ejtag_debug;
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-static void __init console_config(void)
-{
- char console_string[40];
- int baud = 0;
- char parity = '\0', bits = '\0', flow = '\0';
- char *s;
-
- if ((strstr(fw_getcmdline(), "console=")) == NULL) {
- s = fw_getenv("modetty0");
- if (s) {
- while (*s >= '0' && *s <= '9')
- baud = baud*10 + *s++ - '0';
- if (*s == ',')
- s++;
- if (*s)
- parity = *s++;
- if (*s == ',')
- s++;
- if (*s)
- bits = *s++;
- if (*s == ',')
- s++;
- if (*s == 'h')
- flow = 'r';
- }
- if (baud == 0)
- baud = 38400;
- if (parity != 'n' && parity != 'o' && parity != 'e')
- parity = 'n';
- if (bits != '7' && bits != '8')
- bits = '8';
- if (flow == '\0')
- flow = 'r';
- sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
- parity, bits, flow);
- strcat(fw_getcmdline(), console_string);
- }
-}
-#endif
-
-static void __init mips_nmi_setup(void)
-{
- void *base;
-
- base = cpu_has_veic ?
- (void *)(CAC_BASE + 0xa80) :
- (void *)(CAC_BASE + 0x380);
-#ifdef CONFIG_CPU_MICROMIPS
- /*
- * Decrement the exception vector address by one for microMIPS.
- */
- memcpy(base, (&except_vec_nmi - 1), 0x80);
-
- /*
- * This is a hack. We do not know if the boot loader was built with
- * microMIPS instructions or not. If it was not, the NMI exception
- * code at 0x80000a80 will be taken in MIPS32 mode. The hand coded
- * assembly below forces us into microMIPS mode if we are a pure
- * microMIPS kernel. The assembly instructions are:
- *
- * 3C1A8000 lui k0,0x8000
- * 375A0381 ori k0,k0,0x381
- * 03400008 jr k0
- * 00000000 nop
- *
- * The mode switch occurs by jumping to the unaligned exception
- * vector address at 0x80000381 which would have been 0x80000380
- * in MIPS32 mode. The jump to the unaligned address transitions
- * us into microMIPS mode.
- */
- if (!cpu_has_veic) {
- void *base2 = (void *)(CAC_BASE + 0xa80);
- *((unsigned int *)base2) = 0x3c1a8000;
- *((unsigned int *)base2 + 1) = 0x375a0381;
- *((unsigned int *)base2 + 2) = 0x03400008;
- *((unsigned int *)base2 + 3) = 0x00000000;
- flush_icache_range((unsigned long)base2,
- (unsigned long)base2 + 0x10);
- }
-#else
- memcpy(base, &except_vec_nmi, 0x80);
-#endif
- flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
-}
-
-static void __init mips_ejtag_setup(void)
-{
- void *base;
-
- base = cpu_has_veic ?
- (void *)(CAC_BASE + 0xa00) :
- (void *)(CAC_BASE + 0x300);
-#ifdef CONFIG_CPU_MICROMIPS
- /* Deja vu... */
- memcpy(base, (&except_vec_ejtag_debug - 1), 0x80);
- if (!cpu_has_veic) {
- void *base2 = (void *)(CAC_BASE + 0xa00);
- *((unsigned int *)base2) = 0x3c1a8000;
- *((unsigned int *)base2 + 1) = 0x375a0301;
- *((unsigned int *)base2 + 2) = 0x03400008;
- *((unsigned int *)base2 + 3) = 0x00000000;
- flush_icache_range((unsigned long)base2,
- (unsigned long)base2 + 0x10);
- }
-#else
- memcpy(base, &except_vec_ejtag_debug, 0x80);
-#endif
- flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
-}
-
-void __init prom_init(void)
-{
- board_nmi_handler_setup = mips_nmi_setup;
- board_ejtag_handler_setup = mips_ejtag_setup;
-
- fw_init_cmdline();
-#ifdef CONFIG_EARLY_PRINTK
- if ((strstr(fw_getcmdline(), "console=ttyS0")) != NULL)
- fw_init_early_console(0);
- else if ((strstr(fw_getcmdline(), "console=ttyS1")) != NULL)
- fw_init_early_console(1);
-#endif
-#ifdef CONFIG_SERIAL_8250_CONSOLE
- if ((strstr(fw_getcmdline(), "console=")) == NULL)
- strcat(fw_getcmdline(), " console=ttyS0,38400n8r");
- console_config();
-#endif
-}
-
-void __init prom_free_prom_memory(void)
-{
-}
diff --git a/arch/mips/mti-sead3/sead3-int.c b/arch/mips/mti-sead3/sead3-int.c
deleted file mode 100644
index e31e17f81eef..000000000000
--- a/arch/mips/mti-sead3/sead3-int.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/irqchip/mips-gic.h>
-#include <linux/io.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/setup.h>
-
-#include <asm/mips-boards/sead3int.h>
-
-#define SEAD_CONFIG_GIC_PRESENT_SHF 1
-#define SEAD_CONFIG_GIC_PRESENT_MSK (1 << SEAD_CONFIG_GIC_PRESENT_SHF)
-#define SEAD_CONFIG_BASE 0x1b100110
-#define SEAD_CONFIG_SIZE 4
-
-static void __iomem *sead3_config_reg;
-
-void __init arch_init_irq(void)
-{
- if (!cpu_has_veic)
- mips_cpu_irq_init();
-
- sead3_config_reg = ioremap_nocache(SEAD_CONFIG_BASE, SEAD_CONFIG_SIZE);
- gic_present = (__raw_readl(sead3_config_reg) &
- SEAD_CONFIG_GIC_PRESENT_MSK) >>
- SEAD_CONFIG_GIC_PRESENT_SHF;
- pr_info("GIC: %spresent\n", (gic_present) ? "" : "not ");
- pr_info("EIC: %s\n",
- (current_cpu_data.options & MIPS_CPU_VEIC) ? "on" : "off");
-
- if (gic_present)
- gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, CPU_INT_GIC,
- MIPS_GIC_IRQ_BASE);
-}
-
diff --git a/arch/mips/mti-sead3/sead3-lcd.c b/arch/mips/mti-sead3/sead3-lcd.c
deleted file mode 100644
index 10b10ed21f77..000000000000
--- a/arch/mips/mti-sead3/sead3-lcd.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/platform_device.h>
-
-static struct resource __initdata sead3_lcd_resource = {
- .start = 0x1f000400,
- .end = 0x1f00041f,
- .flags = IORESOURCE_MEM,
-};
-
-static __init int sead3_lcd_add(void)
-{
- struct platform_device *pdev;
- int retval;
-
- /* SEAD-3 and Cobalt platforms use same display type. */
- pdev = platform_device_alloc("cobalt-lcd", -1);
- if (!pdev)
- return -ENOMEM;
-
- retval = platform_device_add_resources(pdev, &sead3_lcd_resource, 1);
- if (retval)
- goto err_free_device;
-
- retval = platform_device_add(pdev);
- if (retval)
- goto err_free_device;
-
- return 0;
-
-err_free_device:
- platform_device_put(pdev);
-
- return retval;
-}
-
-device_initcall(sead3_lcd_add);
diff --git a/arch/mips/mti-sead3/sead3-platform.c b/arch/mips/mti-sead3/sead3-platform.c
deleted file mode 100644
index 73b73efbfb05..000000000000
--- a/arch/mips/mti-sead3/sead3-platform.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/irqchip/mips-gic.h>
-#include <linux/leds.h>
-#include <linux/mtd/physmap.h>
-#include <linux/platform_device.h>
-#include <linux/serial_8250.h>
-#include <linux/smsc911x.h>
-
-#include <asm/mips-boards/sead3int.h>
-
-#define UART(base) \
-{ \
- .mapbase = base, \
- .irq = -1, \
- .uartclk = 14745600, \
- .iotype = UPIO_MEM32, \
- .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, \
- .regshift = 2, \
-}
-
-static struct plat_serial8250_port uart8250_data[] = {
- UART(0x1f000900), /* ttyS0 = USB */
- UART(0x1f000800), /* ttyS1 = RS232 */
- { },
-};
-
-static struct platform_device uart8250_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM2,
- .dev = {
- .platform_data = uart8250_data,
- },
-};
-
-static struct smsc911x_platform_config sead3_smsc911x_data = {
- .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
- .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
- .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
- .phy_interface = PHY_INTERFACE_MODE_MII,
-};
-
-static struct resource sead3_net_resources[] = {
- {
- .start = 0x1f010000,
- .end = 0x1f01ffff,
- .flags = IORESOURCE_MEM
- }, {
- .flags = IORESOURCE_IRQ
- }
-};
-
-static struct platform_device sead3_net_device = {
- .name = "smsc911x",
- .id = 0,
- .dev = {
- .platform_data = &sead3_smsc911x_data,
- },
- .num_resources = ARRAY_SIZE(sead3_net_resources),
- .resource = sead3_net_resources
-};
-
-static struct mtd_partition sead3_mtd_partitions[] = {
- {
- .name = "User FS",
- .offset = 0x00000000,
- .size = 0x01fc0000,
- }, {
- .name = "Board Config",
- .offset = 0x01fc0000,
- .size = 0x00040000,
- .mask_flags = MTD_WRITEABLE
- },
-};
-
-static struct physmap_flash_data sead3_flash_data = {
- .width = 4,
- .nr_parts = ARRAY_SIZE(sead3_mtd_partitions),
- .parts = sead3_mtd_partitions
-};
-
-static struct resource sead3_flash_resource = {
- .start = 0x1c000000,
- .end = 0x1dffffff,
- .flags = IORESOURCE_MEM
-};
-
-static struct platform_device sead3_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &sead3_flash_data,
- },
- .num_resources = 1,
- .resource = &sead3_flash_resource,
-};
-
-#define LEDFLAGS(bits, shift) \
- ((bits << 8) | (shift << 8))
-
-#define LEDBITS(id, shift, bits) \
- .name = id #shift, \
- .flags = LEDFLAGS(bits, shift)
-
-static struct led_info led_data_info[] = {
- { LEDBITS("bit", 0, 1) },
- { LEDBITS("bit", 1, 1) },
- { LEDBITS("bit", 2, 1) },
- { LEDBITS("bit", 3, 1) },
- { LEDBITS("bit", 4, 1) },
- { LEDBITS("bit", 5, 1) },
- { LEDBITS("bit", 6, 1) },
- { LEDBITS("bit", 7, 1) },
- { LEDBITS("all", 0, 8) },
-};
-
-static struct led_platform_data led_data = {
- .num_leds = ARRAY_SIZE(led_data_info),
- .leds = led_data_info
-};
-
-static struct resource pled_resources[] = {
- {
- .start = 0x1f000210,
- .end = 0x1f000217,
- .flags = IORESOURCE_MEM
- }
-};
-
-static struct platform_device pled_device = {
- .name = "sead3::pled",
- .id = 0,
- .dev = {
- .platform_data = &led_data,
- },
- .num_resources = ARRAY_SIZE(pled_resources),
- .resource = pled_resources
-};
-
-
-static struct resource fled_resources[] = {
- {
- .start = 0x1f000218,
- .end = 0x1f00021f,
- .flags = IORESOURCE_MEM
- }
-};
-
-static struct platform_device fled_device = {
- .name = "sead3::fled",
- .id = 0,
- .dev = {
- .platform_data = &led_data,
- },
- .num_resources = ARRAY_SIZE(fled_resources),
- .resource = fled_resources
-};
-
-static struct platform_device sead3_led_device = {
- .name = "sead3-led",
- .id = -1,
-};
-
-static struct resource ehci_resources[] = {
- {
- .start = 0x1b200000,
- .end = 0x1b200fff,
- .flags = IORESOURCE_MEM
- }, {
- .flags = IORESOURCE_IRQ
- }
-};
-
-static u64 sead3_usbdev_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device ehci_device = {
- .name = "sead3-ehci",
- .id = 0,
- .dev = {
- .dma_mask = &sead3_usbdev_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32)
- },
- .num_resources = ARRAY_SIZE(ehci_resources),
- .resource = ehci_resources
-};
-
-static struct platform_device *sead3_platform_devices[] __initdata = {
- &uart8250_device,
- &sead3_flash,
- &pled_device,
- &fled_device,
- &sead3_led_device,
- &ehci_device,
- &sead3_net_device,
-};
-
-static int __init sead3_platforms_device_init(void)
-{
- if (gic_present) {
- uart8250_data[0].irq = MIPS_GIC_IRQ_BASE + GIC_INT_UART0;
- uart8250_data[1].irq = MIPS_GIC_IRQ_BASE + GIC_INT_UART1;
- ehci_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_EHCI;
- sead3_net_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_NET;
- } else {
- uart8250_data[0].irq = MIPS_CPU_IRQ_BASE + CPU_INT_UART0;
- uart8250_data[1].irq = MIPS_CPU_IRQ_BASE + CPU_INT_UART1;
- ehci_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_EHCI;
- sead3_net_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_NET;
- }
-
- return platform_add_devices(sead3_platform_devices,
- ARRAY_SIZE(sead3_platform_devices));
-}
-
-device_initcall(sead3_platforms_device_init);
diff --git a/arch/mips/mti-sead3/sead3-reset.c b/arch/mips/mti-sead3/sead3-reset.c
deleted file mode 100644
index e6fb24414a70..000000000000
--- a/arch/mips/mti-sead3/sead3-reset.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/io.h>
-#include <linux/pm.h>
-
-#include <asm/reboot.h>
-
-#define SOFTRES_REG 0x1f000050
-#define GORESET 0x4d
-
-static void mips_machine_restart(char *command)
-{
- unsigned int __iomem *softres_reg =
- ioremap(SOFTRES_REG, sizeof(unsigned int));
-
- __raw_writel(GORESET, softres_reg);
-}
-
-static void mips_machine_halt(void)
-{
- unsigned int __iomem *softres_reg =
- ioremap(SOFTRES_REG, sizeof(unsigned int));
-
- __raw_writel(GORESET, softres_reg);
-}
-
-static int __init mips_reboot_setup(void)
-{
- _machine_restart = mips_machine_restart;
- _machine_halt = mips_machine_halt;
- pm_power_off = mips_machine_halt;
-
- return 0;
-}
-arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
deleted file mode 100644
index edfcaf06680d..000000000000
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Copyright (C) 2013 Imagination Technologies Ltd.
- */
-#include <linux/init.h>
-#include <linux/libfdt.h>
-#include <linux/of_fdt.h>
-
-#include <asm/prom.h>
-#include <asm/fw/fw.h>
-
-#include <asm/mips-boards/generic.h>
-
-const char *get_system_type(void)
-{
- return "MIPS SEAD3";
-}
-
-static uint32_t get_memsize_from_cmdline(void)
-{
- int memsize = 0;
- char *p = arcs_cmdline;
- char *s = "memsize=";
-
- p = strstr(p, s);
- if (p) {
- p += strlen(s);
- memsize = memparse(p, NULL);
- }
-
- return memsize;
-}
-
-static uint32_t get_memsize_from_env(void)
-{
- int memsize = 0;
- char *p;
-
- p = fw_getenv("memsize");
- if (p)
- memsize = memparse(p, NULL);
-
- return memsize;
-}
-
-static uint32_t get_memsize(void)
-{
- uint32_t memsize;
-
- memsize = get_memsize_from_cmdline();
- if (memsize)
- return memsize;
-
- return get_memsize_from_env();
-}
-
-static void __init parse_memsize_param(void)
-{
- int offset;
- const uint64_t *prop_value;
- int prop_len;
- uint32_t memsize = get_memsize();
-
- if (!memsize)
- return;
-
- offset = fdt_path_offset(__dtb_start, "/memory");
- if (offset > 0) {
- uint64_t new_value;
- /*
- * reg contains 2 32-bits BE values, offset and size. We just
- * want to replace the size value without affecting the offset
- */
- prop_value = fdt_getprop(__dtb_start, offset, "reg", &prop_len);
- new_value = be64_to_cpu(*prop_value);
- new_value = (new_value & ~0xffffffffllu) | memsize;
- fdt_setprop_inplace_u64(__dtb_start, offset, "reg", new_value);
- }
-}
-
-void __init *plat_get_fdt(void)
-{
- return (void *)__dtb_start;
-}
-
-void __init plat_mem_setup(void)
-{
- /* allow command line/bootloader env to override memory size in DT */
- parse_memsize_param();
-
- /*
- * Load the builtin devicetree. This causes the chosen node to be
- * parsed resulting in our memory appearing
- */
- __dt_setup_arch(__dtb_start);
-}
-
-void __init device_tree_init(void)
-{
- if (!initial_boot_params)
- return;
-
- unflatten_and_copy_device_tree();
-}
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
deleted file mode 100644
index a120b7a5a8fe..000000000000
--- a/arch/mips/mti-sead3/sead3-time.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/irqchip/mips-gic.h>
-
-#include <asm/cpu.h>
-#include <asm/setup.h>
-#include <asm/time.h>
-#include <asm/irq.h>
-#include <asm/mips-boards/generic.h>
-
-static void __iomem *status_reg = (void __iomem *)0xbf000410;
-
-/*
- * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect.
- */
-static unsigned int __init estimate_cpu_frequency(void)
-{
- unsigned int prid = read_c0_prid() & (PRID_COMP_MASK | PRID_IMP_MASK);
- unsigned int tick = 0;
- unsigned int freq;
- unsigned int orig;
- unsigned long flags;
-
- local_irq_save(flags);
-
- orig = readl(status_reg) & 0x2; /* get original sample */
- /* wait for transition */
- while ((readl(status_reg) & 0x2) == orig)
- ;
- orig = orig ^ 0x2; /* flip the bit */
-
- write_c0_count(0);
-
- /* wait 1 second (the sampling clock transitions every 10ms) */
- while (tick < 100) {
- /* wait for transition */
- while ((readl(status_reg) & 0x2) == orig)
- ;
- orig = orig ^ 0x2; /* flip the bit */
- tick++;
- }
-
- freq = read_c0_count();
-
- local_irq_restore(flags);
-
- mips_hpt_frequency = freq;
-
- /* Adjust for processor */
- if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
- (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
- freq *= 2;
-
- freq += 5000; /* rounding */
- freq -= freq%10000;
-
- return freq ;
-}
-
-void read_persistent_clock(struct timespec *ts)
-{
- ts->tv_sec = 0;
- ts->tv_nsec = 0;
-}
-
-int get_c0_perfcount_int(void)
-{
- if (gic_present)
- return gic_get_c0_perfcount_int();
- if (cp0_perfcount_irq >= 0)
- return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
- return -1;
-}
-EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
-
-unsigned int get_c0_compare_int(void)
-{
- if (gic_present)
- return gic_get_c0_compare_int();
- return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
-}
-
-void __init plat_time_init(void)
-{
- unsigned int est_freq;
-
- est_freq = estimate_cpu_frequency();
-
- pr_debug("CPU frequency %d.%02d MHz\n", (est_freq / 1000000),
- (est_freq % 1000000) * 100 / 1000000);
-
- mips_scroll_message();
-}
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 139ad1d7ab5e..4b821481dd44 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -3,6 +3,8 @@
#
obj-y += pci.o
+obj-$(CONFIG_PCI_DRIVERS_LEGACY)+= pci-legacy.o
+obj-$(CONFIG_PCI_DRIVERS_GENERIC)+= pci-generic.o
#
# PCI bus host bridge specific code
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index c8994c156e2d..e99ca7702d8a 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -429,7 +429,8 @@ static int alchemy_pci_probe(struct platform_device *pdev)
/* Au1500 revisions older than AD have borked coherent PCI */
if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) &&
- (read_c0_prid() < 0x01030202) && !coherentio) {
+ (read_c0_prid() < 0x01030202) &&
+ (coherentio == IO_COHERENCE_DISABLED)) {
val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
val |= PCI_CONFIG_NC;
__raw_writel(val, ctx->regs + PCI_REG_CONFIG);
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 7db963deec73..bdf87b43633f 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -18,7 +18,7 @@
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <asm/mach-ath79/ar71xx_regs.h>
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 2013dad700df..1e23c8d587bd 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -11,7 +11,7 @@
#include <linux/irq.h>
#include <linux/pci.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/platform_device.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
diff --git a/arch/mips/pci/pci-generic.c b/arch/mips/pci/pci-generic.c
new file mode 100644
index 000000000000..dce304dc3d62
--- /dev/null
+++ b/arch/mips/pci/pci-generic.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * pcibios_align_resource taken from arch/arm/kernel/bios32.c.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/pci.h>
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ struct pci_dev *dev = data;
+ resource_size_t start = res->start;
+ struct pci_host_bridge *host_bridge;
+
+ if (res->flags & IORESOURCE_IO && start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+
+ start = (start + align - 1) & ~(align - 1);
+
+ host_bridge = pci_find_host_bridge(dev->bus);
+
+ if (host_bridge->align_resource)
+ return host_bridge->align_resource(dev, res,
+ start, size, align);
+
+ return start;
+}
+
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+ pci_read_bridge_bases(bus);
+}
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index b9deab17ccf2..f18f887f481d 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -13,7 +13,6 @@
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
-#include <linux/module.h>
#include <linux/clk.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
@@ -234,7 +233,6 @@ static const struct of_device_id ltq_pci_match[] = {
{ .compatible = "lantiq,pci-xway" },
{},
};
-MODULE_DEVICE_TABLE(of, ltq_pci_match);
static struct platform_driver ltq_pci_driver = {
.probe = ltq_pci_probe,
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
new file mode 100644
index 000000000000..014649be158d
--- /dev/null
+++ b/arch/mips/pci/pci-legacy.c
@@ -0,0 +1,302 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Copyright (C) 2003, 04, 11 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2011 Wind River Systems,
+ * written by Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/of_address.h>
+
+#include <asm/cpu-info.h>
+
+/*
+ * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource
+ * assignments.
+ */
+
+/*
+ * The PCI controller list.
+ */
+static LIST_HEAD(controllers);
+
+static int pci_initialized;
+
+/*
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might have be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ struct pci_dev *dev = data;
+ struct pci_controller *hose = dev->sysdata;
+ resource_size_t start = res->start;
+
+ if (res->flags & IORESOURCE_IO) {
+ /* Make sure we start at our min on all hoses */
+ if (start < PCIBIOS_MIN_IO + hose->io_resource->start)
+ start = PCIBIOS_MIN_IO + hose->io_resource->start;
+
+ /*
+ * Put everything into 0x00-0xff region modulo 0x400
+ */
+ if (start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+ } else if (res->flags & IORESOURCE_MEM) {
+ /* Make sure we start at our min on all hoses */
+ if (start < PCIBIOS_MIN_MEM + hose->mem_resource->start)
+ start = PCIBIOS_MIN_MEM + hose->mem_resource->start;
+ }
+
+ return start;
+}
+
+static void pcibios_scanbus(struct pci_controller *hose)
+{
+ static int next_busno;
+ static int need_domain_info;
+ LIST_HEAD(resources);
+ struct pci_bus *bus;
+
+ if (hose->get_busno && pci_has_flag(PCI_PROBE_ONLY))
+ next_busno = (*hose->get_busno)();
+
+ pci_add_resource_offset(&resources,
+ hose->mem_resource, hose->mem_offset);
+ pci_add_resource_offset(&resources,
+ hose->io_resource, hose->io_offset);
+ pci_add_resource_offset(&resources,
+ hose->busn_resource, hose->busn_offset);
+ bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose,
+ &resources);
+ hose->bus = bus;
+
+ need_domain_info = need_domain_info || pci_domain_nr(bus);
+ set_pci_need_domain_info(hose, need_domain_info);
+
+ if (!bus) {
+ pci_free_resource_list(&resources);
+ return;
+ }
+
+ next_busno = bus->busn_res.end + 1;
+ /* Don't allow 8-bit bus number overflow inside the hose -
+ reserve some space for bridges. */
+ if (next_busno > 224) {
+ next_busno = 0;
+ need_domain_info = 1;
+ }
+
+ /*
+ * We insert PCI resources into the iomem_resource and
+ * ioport_resource trees in either pci_bus_claim_resources()
+ * or pci_bus_assign_resources().
+ */
+ if (pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_claim_resources(bus);
+ } else {
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+ }
+ pci_bus_add_devices(bus);
+}
+
+#ifdef CONFIG_OF
+void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+
+ pr_info("PCI host bridge %s ranges:\n", node->full_name);
+ hose->of_node = node;
+
+ if (of_pci_range_parser_init(&parser, node))
+ return;
+
+ for_each_of_pci_range(&parser, &range) {
+ struct resource *res = NULL;
+
+ switch (range.flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_IO:
+ pr_info(" IO 0x%016llx..0x%016llx\n",
+ range.cpu_addr,
+ range.cpu_addr + range.size - 1);
+ hose->io_map_base =
+ (unsigned long)ioremap(range.cpu_addr,
+ range.size);
+ res = hose->io_resource;
+ break;
+ case IORESOURCE_MEM:
+ pr_info(" MEM 0x%016llx..0x%016llx\n",
+ range.cpu_addr,
+ range.cpu_addr + range.size - 1);
+ res = hose->mem_resource;
+ break;
+ }
+ if (res != NULL)
+ of_pci_range_to_resource(&range, node, res);
+ }
+}
+
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ return of_node_get(hose->of_node);
+}
+#endif
+
+static DEFINE_MUTEX(pci_scan_mutex);
+
+void register_pci_controller(struct pci_controller *hose)
+{
+ struct resource *parent;
+
+ parent = hose->mem_resource->parent;
+ if (!parent)
+ parent = &iomem_resource;
+
+ if (request_resource(parent, hose->mem_resource) < 0)
+ goto out;
+
+ parent = hose->io_resource->parent;
+ if (!parent)
+ parent = &ioport_resource;
+
+ if (request_resource(parent, hose->io_resource) < 0) {
+ release_resource(hose->mem_resource);
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&hose->list);
+ list_add(&hose->list, &controllers);
+
+ /*
+ * Do not panic here but later - this might happen before console init.
+ */
+ if (!hose->io_map_base) {
+ printk(KERN_WARNING
+ "registering PCI controller with io_map_base unset\n");
+ }
+
+ /*
+ * Scan the bus if it is register after the PCI subsystem
+ * initialization.
+ */
+ if (pci_initialized) {
+ mutex_lock(&pci_scan_mutex);
+ pcibios_scanbus(hose);
+ mutex_unlock(&pci_scan_mutex);
+ }
+
+ return;
+
+out:
+ printk(KERN_WARNING
+ "Skipping PCI bus scan due to resource conflict\n");
+}
+
+static int __init pcibios_init(void)
+{
+ struct pci_controller *hose;
+
+ /* Scan all of the recorded PCI controllers. */
+ list_for_each_entry(hose, &controllers, list)
+ pcibios_scanbus(hose);
+
+ pci_fixup_irqs(pci_common_swizzle, pcibios_map_irq);
+
+ pci_initialized = 1;
+
+ return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+static int pcibios_enable_resources(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ int idx;
+ struct resource *r;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ for (idx=0; idx < PCI_NUM_RESOURCES; idx++) {
+ /* Only set up the requested stuff */
+ if (!(mask & (1<<idx)))
+ continue;
+
+ r = &dev->resource[idx];
+ if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+ continue;
+ if ((idx == PCI_ROM_RESOURCE) &&
+ (!(r->flags & IORESOURCE_ROM_ENABLE)))
+ continue;
+ if (!r->start && r->end) {
+ printk(KERN_ERR "PCI: Device %s not available "
+ "because of resource collisions\n",
+ pci_name(dev));
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ if (cmd != old_cmd) {
+ printk("PCI: Enabling device %s (%04x -> %04x)\n",
+ pci_name(dev), old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ int err;
+
+ if ((err = pcibios_enable_resources(dev, mask)) < 0)
+ return err;
+
+ return pcibios_plat_dev_init(dev);
+}
+
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+ struct pci_dev *dev = bus->self;
+
+ if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ pci_read_bridge_bases(bus);
+ }
+}
+
+char * (*pcibios_plat_setup)(char *str) __initdata;
+
+char *__init pcibios_setup(char *str)
+{
+ if (pcibios_plat_setup)
+ return pcibios_plat_setup(str);
+ return str;
+}
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
index 6ce816201699..628c5132b3d8 100644
--- a/arch/mips/pci/pci-mt7620.c
+++ b/arch/mips/pci/pci-mt7620.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
@@ -407,13 +406,11 @@ static const struct of_device_id mt7620_pci_ids[] = {
{ .compatible = "mediatek,mt7620-pci" },
{},
};
-MODULE_DEVICE_TABLE(of, mt7620_pci_ids);
static struct platform_driver mt7620_pci_driver = {
.probe = mt7620_pci_probe,
.driver = {
.name = "mt7620-pci",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(mt7620_pci_ids),
},
};
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index c258cd406fbb..308d051fc45c 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -204,6 +204,8 @@ const char *octeon_get_pci_interrupts(void)
* Interrupt Number (INTA# = 0, INTB# = 1, INTC# = 2, and
* INTD# = 3)
*/
+ if (of_machine_is_compatible("dlink,dsr-500n"))
+ return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
switch (octeon_bootinfo->board_type) {
case CVMX_BOARD_TYPE_NAO38:
/* This is really the NAC38 */
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index f2a1050168d9..d6360fe73d05 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -16,7 +16,6 @@
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/init.h>
-#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
@@ -260,7 +259,6 @@ static const struct of_device_id rt288x_pci_match[] = {
{ .compatible = "ralink,rt288x-pci" },
{},
};
-MODULE_DEVICE_TABLE(of, rt288x_pci_match);
static struct platform_driver rt288x_pci_driver = {
.probe = rt288x_pci_probe,
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 53a42b07008b..3520e9b414e7 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -16,7 +16,6 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
@@ -580,7 +579,6 @@ static const struct of_device_id rt3883_pci_ids[] = {
{ .compatible = "ralink,rt3883-pci" },
{},
};
-MODULE_DEVICE_TABLE(of, rt3883_pci_ids);
static struct platform_driver rt3883_pci_driver = {
.probe = rt3883_pci_probe,
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index b4c02f29663e..f6325fa657fb 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -20,208 +20,13 @@
#include <asm/cpu-info.h>
-/*
- * If PCI_PROBE_ONLY in pci_flags is set, we don't change any PCI resource
- * assignments.
- */
-
-/*
- * The PCI controller list.
- */
-
-static struct pci_controller *hose_head, **hose_tail = &hose_head;
-
unsigned long PCIBIOS_MIN_IO;
-unsigned long PCIBIOS_MIN_MEM;
-
-static int pci_initialized;
-
-/*
- * We need to avoid collisions with `mirrored' VGA ports
- * and other strange ISA hardware, so we always want the
- * addresses to be allocated in the 0x000-0x0ff region
- * modulo 0x400.
- *
- * Why? Because some silly external IO cards only decode
- * the low 10 bits of the IO address. The 0x00-0xff region
- * is reserved for motherboard devices that decode all 16
- * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
- * but we want to try to avoid allocating at 0x2900-0x2bff
- * which might have be mirrored at 0x0100-0x03ff..
- */
-resource_size_t
-pcibios_align_resource(void *data, const struct resource *res,
- resource_size_t size, resource_size_t align)
-{
- struct pci_dev *dev = data;
- struct pci_controller *hose = dev->sysdata;
- resource_size_t start = res->start;
-
- if (res->flags & IORESOURCE_IO) {
- /* Make sure we start at our min on all hoses */
- if (start < PCIBIOS_MIN_IO + hose->io_resource->start)
- start = PCIBIOS_MIN_IO + hose->io_resource->start;
-
- /*
- * Put everything into 0x00-0xff region modulo 0x400
- */
- if (start & 0x300)
- start = (start + 0x3ff) & ~0x3ff;
- } else if (res->flags & IORESOURCE_MEM) {
- /* Make sure we start at our min on all hoses */
- if (start < PCIBIOS_MIN_MEM + hose->mem_resource->start)
- start = PCIBIOS_MIN_MEM + hose->mem_resource->start;
- }
-
- return start;
-}
-
-static void pcibios_scanbus(struct pci_controller *hose)
-{
- static int next_busno;
- static int need_domain_info;
- LIST_HEAD(resources);
- struct pci_bus *bus;
-
- if (hose->get_busno && pci_has_flag(PCI_PROBE_ONLY))
- next_busno = (*hose->get_busno)();
-
- pci_add_resource_offset(&resources,
- hose->mem_resource, hose->mem_offset);
- pci_add_resource_offset(&resources,
- hose->io_resource, hose->io_offset);
- pci_add_resource_offset(&resources,
- hose->busn_resource, hose->busn_offset);
- bus = pci_scan_root_bus(NULL, next_busno, hose->pci_ops, hose,
- &resources);
- hose->bus = bus;
-
- need_domain_info = need_domain_info || hose->index;
- hose->need_domain_info = need_domain_info;
-
- if (!bus) {
- pci_free_resource_list(&resources);
- return;
- }
-
- next_busno = bus->busn_res.end + 1;
- /* Don't allow 8-bit bus number overflow inside the hose -
- reserve some space for bridges. */
- if (next_busno > 224) {
- next_busno = 0;
- need_domain_info = 1;
- }
-
- /*
- * We insert PCI resources into the iomem_resource and
- * ioport_resource trees in either pci_bus_claim_resources()
- * or pci_bus_assign_resources().
- */
- if (pci_has_flag(PCI_PROBE_ONLY)) {
- pci_bus_claim_resources(bus);
- } else {
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
- }
- pci_bus_add_devices(bus);
-}
-
-#ifdef CONFIG_OF
-void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
-{
- struct of_pci_range range;
- struct of_pci_range_parser parser;
-
- pr_info("PCI host bridge %s ranges:\n", node->full_name);
- hose->of_node = node;
-
- if (of_pci_range_parser_init(&parser, node))
- return;
-
- for_each_of_pci_range(&parser, &range) {
- struct resource *res = NULL;
-
- switch (range.flags & IORESOURCE_TYPE_BITS) {
- case IORESOURCE_IO:
- pr_info(" IO 0x%016llx..0x%016llx\n",
- range.cpu_addr,
- range.cpu_addr + range.size - 1);
- hose->io_map_base =
- (unsigned long)ioremap(range.cpu_addr,
- range.size);
- res = hose->io_resource;
- break;
- case IORESOURCE_MEM:
- pr_info(" MEM 0x%016llx..0x%016llx\n",
- range.cpu_addr,
- range.cpu_addr + range.size - 1);
- res = hose->mem_resource;
- break;
- }
- if (res != NULL)
- of_pci_range_to_resource(&range, node, res);
- }
-}
-
-struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
-{
- struct pci_controller *hose = bus->sysdata;
-
- return of_node_get(hose->of_node);
-}
-#endif
-
-static DEFINE_MUTEX(pci_scan_mutex);
-
-void register_pci_controller(struct pci_controller *hose)
-{
- struct resource *parent;
-
- parent = hose->mem_resource->parent;
- if (!parent)
- parent = &iomem_resource;
-
- if (request_resource(parent, hose->mem_resource) < 0)
- goto out;
-
- parent = hose->io_resource->parent;
- if (!parent)
- parent = &ioport_resource;
-
- if (request_resource(parent, hose->io_resource) < 0) {
- release_resource(hose->mem_resource);
- goto out;
- }
-
- *hose_tail = hose;
- hose_tail = &hose->next;
-
- /*
- * Do not panic here but later - this might happen before console init.
- */
- if (!hose->io_map_base) {
- printk(KERN_WARNING
- "registering PCI controller with io_map_base unset\n");
- }
-
- /*
- * Scan the bus if it is register after the PCI subsystem
- * initialization.
- */
- if (pci_initialized) {
- mutex_lock(&pci_scan_mutex);
- pcibios_scanbus(hose);
- mutex_unlock(&pci_scan_mutex);
- }
-
- return;
+EXPORT_SYMBOL(PCIBIOS_MIN_IO);
-out:
- printk(KERN_WARNING
- "Skipping PCI bus scan due to resource conflict\n");
-}
+unsigned long PCIBIOS_MIN_MEM;
+EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
-static void __init pcibios_set_cache_line_size(void)
+static int __init pcibios_set_cache_line_size(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int lsize;
@@ -239,92 +44,9 @@ static void __init pcibios_set_cache_line_size(void)
pci_dfl_cache_line_size = lsize >> 2;
pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
-}
-
-static int __init pcibios_init(void)
-{
- struct pci_controller *hose;
-
- pcibios_set_cache_line_size();
-
- /* Scan all of the recorded PCI controllers. */
- for (hose = hose_head; hose; hose = hose->next)
- pcibios_scanbus(hose);
-
- pci_fixup_irqs(pci_common_swizzle, pcibios_map_irq);
-
- pci_initialized = 1;
-
- return 0;
-}
-
-subsys_initcall(pcibios_init);
-
-static int pcibios_enable_resources(struct pci_dev *dev, int mask)
-{
- u16 cmd, old_cmd;
- int idx;
- struct resource *r;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- old_cmd = cmd;
- for (idx=0; idx < PCI_NUM_RESOURCES; idx++) {
- /* Only set up the requested stuff */
- if (!(mask & (1<<idx)))
- continue;
-
- r = &dev->resource[idx];
- if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
- continue;
- if ((idx == PCI_ROM_RESOURCE) &&
- (!(r->flags & IORESOURCE_ROM_ENABLE)))
- continue;
- if (!r->start && r->end) {
- printk(KERN_ERR "PCI: Device %s not available "
- "because of resource collisions\n",
- pci_name(dev));
- return -EINVAL;
- }
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
- if (cmd != old_cmd) {
- printk("PCI: Enabling device %s (%04x -> %04x)\n",
- pci_name(dev), old_cmd, cmd);
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- }
return 0;
}
-
-unsigned int pcibios_assign_all_busses(void)
-{
- return 1;
-}
-
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- int err;
-
- if ((err = pcibios_enable_resources(dev, mask)) < 0)
- return err;
-
- return pcibios_plat_dev_init(dev);
-}
-
-void pcibios_fixup_bus(struct pci_bus *bus)
-{
- struct pci_dev *dev = bus->self;
-
- if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
- (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
- pci_read_bridge_bases(bus);
- }
-}
-
-EXPORT_SYMBOL(PCIBIOS_MIN_IO);
-EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
+arch_initcall(pcibios_set_cache_line_size);
void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc, resource_size_t *start,
@@ -359,12 +81,3 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
-
-char * (*pcibios_plat_setup)(char *str) __initdata;
-
-char *__init pcibios_setup(char *str)
-{
- if (pcibios_plat_setup)
- return pcibios_plat_setup(str);
- return str;
-}
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 99f3db4f0a9b..9f672ceb089b 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -11,7 +11,7 @@
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-npei-defs.h>
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index 3cd357737a26..7cf4eb50fc72 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -232,12 +232,8 @@ static struct platform_device *pnx833x_platform_devices[] __initdata = {
static int __init pnx833x_platform_init(void)
{
- int res;
-
- res = platform_add_devices(pnx833x_platform_devices,
- ARRAY_SIZE(pnx833x_platform_devices));
-
- return res;
+ return platform_add_devices(pnx833x_platform_devices,
+ ARRAY_SIZE(pnx833x_platform_devices));
}
arch_initcall(pnx833x_platform_init);
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index b0343ff336c5..8077ff39bdea 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -1,4 +1,7 @@
/*
+ * Ralink RT2880 timer
+ * Author: John Crispin
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
@@ -6,7 +9,6 @@
* Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
@@ -152,33 +154,17 @@ static int rt_timer_probe(struct platform_device *pdev)
return 0;
}
-static int rt_timer_remove(struct platform_device *pdev)
-{
- struct rt_timer *rt = platform_get_drvdata(pdev);
-
- rt_timer_disable(rt);
- rt_timer_free(rt);
-
- return 0;
-}
-
static const struct of_device_id rt_timer_match[] = {
{ .compatible = "ralink,rt2880-timer" },
{},
};
-MODULE_DEVICE_TABLE(of, rt_timer_match);
static struct platform_driver rt_timer_driver = {
.probe = rt_timer_probe,
- .remove = rt_timer_remove,
.driver = {
- .name = "rt-timer",
- .of_match_table = rt_timer_match
+ .name = "rt-timer",
+ .of_match_table = rt_timer_match,
+ .suppress_bind_attrs = true,
},
};
-
-module_platform_driver(rt_timer_driver);
-
-MODULE_DESCRIPTION("Ralink RT2880 timer");
-MODULE_AUTHOR("John Crispin <john@phrozen.org");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(rt_timer_driver);
diff --git a/arch/mips/txx9/Kconfig b/arch/mips/txx9/Kconfig
index 8c337d60f790..42923478d45c 100644
--- a/arch/mips/txx9/Kconfig
+++ b/arch/mips/txx9/Kconfig
@@ -20,7 +20,7 @@ config MACH_TXX9
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_BIG_ENDIAN
- select HAVE_CLK
+ select COMMON_CLK
config TOSHIBA_JMR3927
bool "Toshiba JMR-TX3927 board"
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index 1f6bc9a3036c..285d84e5c7b9 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -29,12 +29,8 @@ static int __init
early_read_config_word(struct pci_controller *hose,
int top_bus, int bus, int devfn, int offset, u16 *value)
{
- struct pci_dev fake_dev;
struct pci_bus fake_bus;
- fake_dev.bus = &fake_bus;
- fake_dev.sysdata = hose;
- fake_dev.devfn = devfn;
fake_bus.number = bus;
fake_bus.sysdata = hose;
fake_bus.ops = hose->pci_ops;
@@ -45,7 +41,7 @@ early_read_config_word(struct pci_controller *hose,
else
fake_bus.parent = NULL;
- return pci_read_config_word(&fake_dev, offset, value);
+ return pci_bus_read_config_word(&fake_bus, devfn, offset, value);
}
int __init txx9_pci66_check(struct pci_controller *hose, int top_bus,
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index ada92db92f87..a1d98b5c8fd6 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -15,7 +15,8 @@
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/module.h>
-#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
@@ -83,40 +84,6 @@ int txx9_ccfg_toeon __initdata;
int txx9_ccfg_toeon __initdata = 1;
#endif
-/* Minimum CLK support */
-
-struct clk *clk_get(struct device *dev, const char *id)
-{
- if (!strcmp(id, "spi-baseclk"))
- return (struct clk *)((unsigned long)txx9_gbus_clock / 2 / 2);
- if (!strcmp(id, "imbus_clk"))
- return (struct clk *)((unsigned long)txx9_gbus_clock / 2);
- return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(clk_get);
-
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- return (unsigned long)clk;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-void clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_put);
-
#define BOARD_VEC(board) extern struct txx9_board_vec board;
#include <asm/txx9/boards.h>
#undef BOARD_VEC
@@ -560,8 +527,41 @@ void __init plat_time_init(void)
txx9_board_vec->time_init();
}
+static void txx9_clk_init(void)
+{
+ struct clk_hw *hw;
+ int error;
+
+ hw = clk_hw_register_fixed_rate(NULL, "gbus", NULL, 0, txx9_gbus_clock);
+ if (IS_ERR(hw)) {
+ error = PTR_ERR(hw);
+ goto fail;
+ }
+
+ hw = clk_hw_register_fixed_factor(NULL, "imbus", "gbus", 0, 1, 2);
+ error = clk_hw_register_clkdev(hw, "imbus_clk", NULL);
+ if (error)
+ goto fail;
+
+#ifdef CONFIG_CPU_TX49XX
+ if (TX4938_REV_PCODE() == 0x4938) {
+ hw = clk_hw_register_fixed_factor(NULL, "spi", "gbus", 0, 1, 4);
+ error = clk_hw_register_clkdev(hw, "spi-baseclk", NULL);
+ if (error)
+ goto fail;
+ }
+#endif
+
+ return;
+
+fail:
+ pr_err("Failed to register clocks: %d\n", error);
+}
+
static int __init _txx9_arch_init(void)
{
+ txx9_clk_init();
+
if (txx9_board_vec->arch_init)
txx9_board_vec->arch_init();
return 0;
diff --git a/arch/mips/txx9/generic/setup_tx3927.c b/arch/mips/txx9/generic/setup_tx3927.c
index 110e05c3eb8f..d3b83a92cf26 100644
--- a/arch/mips/txx9/generic/setup_tx3927.c
+++ b/arch/mips/txx9/generic/setup_tx3927.c
@@ -92,7 +92,6 @@ void __init tx3927_setup(void)
/* PIO */
__raw_writel(0, &tx3927_pioptr->maskcpu);
__raw_writel(0, &tx3927_pioptr->maskext);
- txx9_gpio_init(TX3927_PIO_REG, 0, 16);
conf = read_c0_conf();
if (conf & TX39_CONF_DCE) {
diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c
index a4664cb6c1e1..8d8011570b1d 100644
--- a/arch/mips/txx9/generic/setup_tx4927.c
+++ b/arch/mips/txx9/generic/setup_tx4927.c
@@ -215,7 +215,6 @@ void __init tx4927_setup(void)
txx9_tmr_init(TX4927_TMR_REG(i) & 0xfffffffffULL);
/* PIO */
- txx9_gpio_init(TX4927_PIO_REG & 0xfffffffffULL, 0, TX4927_NUM_PIO);
__raw_writel(0, &tx4927_pioptr->maskcpu);
__raw_writel(0, &tx4927_pioptr->maskext);
diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c
index 58cdb2aba5e1..ba265bf1fd06 100644
--- a/arch/mips/txx9/generic/setup_tx4938.c
+++ b/arch/mips/txx9/generic/setup_tx4938.c
@@ -241,7 +241,6 @@ void __init tx4938_setup(void)
txx9_tmr_init(TX4938_TMR_REG(i) & 0xfffffffffULL);
/* PIO */
- txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, TX4938_NUM_PIO);
__raw_writel(0, &tx4938_pioptr->maskcpu);
__raw_writel(0, &tx4938_pioptr->maskext);
diff --git a/arch/mips/txx9/jmr3927/setup.c b/arch/mips/txx9/jmr3927/setup.c
index 3206f76f300b..a455166dc6d4 100644
--- a/arch/mips/txx9/jmr3927/setup.c
+++ b/arch/mips/txx9/jmr3927/setup.c
@@ -142,8 +142,6 @@ static void __init jmr3927_board_init(void)
/* PIO[15:12] connected to LEDs */
__raw_writel(0x0000f000, &tx3927_pioptr->dir);
- gpio_request(11, "dipsw1");
- gpio_request(10, "dipsw2");
jmr3927_pci_setup();
@@ -204,6 +202,14 @@ static void __init jmr3927_device_init(void)
txx9_iocled_init(iocled_base, -1, 8, 1, "green", NULL);
}
+static void __init jmr3927_arch_init(void)
+{
+ txx9_gpio_init(TX3927_PIO_REG, 0, 16);
+
+ gpio_request(11, "dipsw1");
+ gpio_request(10, "dipsw2");
+}
+
struct txx9_board_vec jmr3927_vec __initdata = {
.system = "Toshiba JMR_TX3927",
.prom_init = jmr3927_prom_init,
@@ -211,6 +217,7 @@ struct txx9_board_vec jmr3927_vec __initdata = {
.irq_setup = jmr3927_irq_setup,
.time_init = jmr3927_time_init,
.device_init = jmr3927_device_init,
+ .arch_init = jmr3927_arch_init,
#ifdef CONFIG_PCI
.pci_map_irq = jmr3927_pci_map_irq,
#endif
diff --git a/arch/mips/txx9/rbtx4927/setup.c b/arch/mips/txx9/rbtx4927/setup.c
index 3c516ef625e5..f5b367e20dff 100644
--- a/arch/mips/txx9/rbtx4927/setup.c
+++ b/arch/mips/txx9/rbtx4927/setup.c
@@ -52,6 +52,7 @@
#include <linux/leds.h>
#include <asm/io.h>
#include <asm/reboot.h>
+#include <asm/txx9pio.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/pci.h>
#include <asm/txx9/rbtx4927.h>
@@ -151,20 +152,37 @@ static void __init tx4937_pci_setup(void)
}
tx4938_setup_pcierr_irq();
}
+#else
+static inline void tx4927_pci_setup(void) {}
+static inline void tx4937_pci_setup(void) {}
+#endif /* CONFIG_PCI */
+
+static void __init rbtx4927_gpio_init(void)
+{
+ /* TX4927-SIO DTR on (PIO[15]) */
+ gpio_request(15, "sio-dtr");
+ gpio_direction_output(15, 1);
+
+ tx4927_sio_init(0, 0);
+}
static void __init rbtx4927_arch_init(void)
{
+ txx9_gpio_init(TX4927_PIO_REG & 0xfffffffffULL, 0, TX4927_NUM_PIO);
+
+ rbtx4927_gpio_init();
+
tx4927_pci_setup();
}
static void __init rbtx4937_arch_init(void)
{
+ txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, TX4938_NUM_PIO);
+
+ rbtx4927_gpio_init();
+
tx4937_pci_setup();
}
-#else
-#define rbtx4927_arch_init NULL
-#define rbtx4937_arch_init NULL
-#endif /* CONFIG_PCI */
static void toshiba_rbtx4927_restart(char *command)
{
@@ -205,12 +223,6 @@ static void __init rbtx4927_mem_setup(void)
#else
set_io_port_base(KSEG1 + RBTX4927_ISA_IO_OFFSET);
#endif
-
- /* TX4927-SIO DTR on (PIO[15]) */
- gpio_request(15, "sio-dtr");
- gpio_direction_output(15, 1);
-
- tx4927_sio_init(0, 0);
}
static void __init rbtx4927_clock_init(void)
diff --git a/arch/mips/txx9/rbtx4938/setup.c b/arch/mips/txx9/rbtx4938/setup.c
index 54de66837103..07939ed6b22f 100644
--- a/arch/mips/txx9/rbtx4938/setup.c
+++ b/arch/mips/txx9/rbtx4938/setup.c
@@ -336,6 +336,7 @@ static void __init rbtx4938_mtd_init(void)
static void __init rbtx4938_arch_init(void)
{
+ txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, TX4938_NUM_PIO);
gpiochip_add_data(&rbtx4938_spi_gpio_chip, NULL);
rbtx4938_pci_setup();
rbtx4938_spi_init();
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 3b4538ec0102..c3dc12a8b7d9 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -13,8 +13,6 @@ cflags-vdso := $(ccflags-vdso) \
-DDISABLE_BRANCH_PROFILING \
$(call cc-option, -fno-stack-protector)
aflags-vdso := $(ccflags-vdso) \
- $(filter -I%,$(KBUILD_CFLAGS)) \
- $(filter -E%,$(KBUILD_CFLAGS)) \
-D__ASSEMBLY__ -Wa,-gdwarf-2
#
@@ -82,7 +80,7 @@ obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o)
$(obj-vdso): KBUILD_CFLAGS := $(cflags-vdso) $(native-abi)
$(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi)
-$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(native-abi)
+$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) $(native-abi)
$(obj)/vdso.so.dbg.raw: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 50d020ac0f48..617dece67924 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -318,12 +318,12 @@ mpc85xx_smp_defconfig:
PHONY += corenet32_smp_defconfig
corenet32_smp_defconfig:
$(call merge_into_defconfig,corenet_basic_defconfig,\
- 85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw)
+ 85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw dpaa)
PHONY += corenet64_smp_defconfig
corenet64_smp_defconfig:
$(call merge_into_defconfig,corenet_basic_defconfig,\
- 85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw)
+ 85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw dpaa)
PHONY += mpc86xx_defconfig
mpc86xx_defconfig:
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index f7a184b6c35b..57d42d129033 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -32,9 +32,16 @@ static struct addr_range prep_kernel(void)
void *addr = 0;
struct elf_info ei;
long len;
+ int uncompressed_image = 0;
- partial_decompress(vmlinuz_addr, vmlinuz_size,
+ len = partial_decompress(vmlinuz_addr, vmlinuz_size,
elfheader, sizeof(elfheader), 0);
+ /* assume uncompressed data if -1 is returned */
+ if (len == -1) {
+ uncompressed_image = 1;
+ memcpy(elfheader, vmlinuz_addr, sizeof(elfheader));
+ printf("No valid compressed data found, assume uncompressed data\n\r");
+ }
if (!parse_elf64(elfheader, &ei) && !parse_elf32(elfheader, &ei))
fatal("Error: not a valid PPC32 or PPC64 ELF file!\n\r");
@@ -67,6 +74,13 @@ static struct addr_range prep_kernel(void)
"device tree\n\r");
}
+ if (uncompressed_image) {
+ memcpy(addr, vmlinuz_addr + ei.elfoffset, ei.loadsize);
+ printf("0x%lx bytes of uncompressed data copied\n\r",
+ ei.loadsize);
+ goto out;
+ }
+
/* Finally, decompress the kernel */
printf("Decompressing (0x%p <- 0x%p:0x%p)...\n\r", addr,
vmlinuz_addr, vmlinuz_addr+vmlinuz_size);
@@ -82,7 +96,7 @@ static struct addr_range prep_kernel(void)
len, ei.loadsize);
printf("Done! Decompressed 0x%lx bytes\n\r", len);
-
+out:
flush_cache(addr, ei.loadsize);
return (struct addr_range){addr, ei.memsize};
diff --git a/arch/powerpc/configs/dpaa.config b/arch/powerpc/configs/dpaa.config
new file mode 100644
index 000000000000..efa99c048543
--- /dev/null
+++ b/arch/powerpc/configs/dpaa.config
@@ -0,0 +1 @@
+CONFIG_FSL_DPAA=y
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index ab9f4e0ed4cf..5c4fbc80dc6c 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,5 +1,6 @@
generic-y += clkdev.h
generic-y += div64.h
+generic-y += export.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += local64.h
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index ee655ed1ff1b..1e8fceb308a5 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -53,10 +53,8 @@ static inline __sum16 csum_fold(__wsum sum)
return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
}
-static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
#ifdef __powerpc64__
unsigned long s = (__force u32)sum;
@@ -83,10 +81,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
- unsigned short len,
- unsigned short proto,
- __wsum sum)
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
+ __u8 proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index 01b8a13f0224..3919332965af 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -26,7 +26,7 @@ extern u64 pnv_first_deep_stop_state;
std r0,0(r1); \
ptesync; \
ld r0,0(r1); \
-1: cmp cr0,r0,r0; \
+1: cmpd cr0,r0,r0; \
bne 1b; \
IDLE_INST; \
b .
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index f752e6f7cfbe..ab68d0ee7725 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -43,6 +43,7 @@ extern int machine_check_e500mc(struct pt_regs *regs);
extern int machine_check_e500(struct pt_regs *regs);
extern int machine_check_e200(struct pt_regs *regs);
extern int machine_check_47x(struct pt_regs *regs);
+int machine_check_8xx(struct pt_regs *regs);
extern void cpu_down_flush_e500v2(void);
extern void cpu_down_flush_e500mc(void);
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 2e4e7d878c8e..84d49b197c32 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -93,6 +93,10 @@
ld reg,PACAKBASE(r13); /* get high part of &label */ \
ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l;
+#define __LOAD_HANDLER(reg, label) \
+ ld reg,PACAKBASE(r13); \
+ ori reg,reg,(ABS_ADDR(label))@l;
+
/* Exception register prefixes */
#define EXC_HV H
#define EXC_STD
@@ -208,6 +212,18 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define kvmppc_interrupt kvmppc_interrupt_pr
#endif
+#ifdef CONFIG_RELOCATABLE
+#define BRANCH_TO_COMMON(reg, label) \
+ __LOAD_HANDLER(reg, label); \
+ mtctr reg; \
+ bctr
+
+#else
+#define BRANCH_TO_COMMON(reg, label) \
+ b label
+
+#endif
+
#define __KVM_HANDLER_PROLOG(area, n) \
BEGIN_FTR_SECTION_NESTED(947) \
ld r10,area+EX_CFAR(r13); \
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index c7d82ff62a33..eba60416536e 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -155,6 +155,8 @@ static inline unsigned long arch_local_irq_save(void)
unsigned long flags = arch_local_save_flags();
#ifdef CONFIG_BOOKE
asm volatile("wrteei 0" : : : "memory");
+#elif defined(CONFIG_PPC_8xx)
+ wrtspr(SPRN_EID);
#else
SET_MSR_EE(flags & ~MSR_EE);
#endif
@@ -165,6 +167,8 @@ static inline void arch_local_irq_disable(void)
{
#ifdef CONFIG_BOOKE
asm volatile("wrteei 0" : : : "memory");
+#elif defined(CONFIG_PPC_8xx)
+ wrtspr(SPRN_EID);
#else
arch_local_irq_save();
#endif
@@ -174,6 +178,8 @@ static inline void arch_local_irq_enable(void)
{
#ifdef CONFIG_BOOKE
asm volatile("wrteei 1" : : : "memory");
+#elif defined(CONFIG_PPC_8xx)
+ wrtspr(SPRN_EIE);
#else
unsigned long msr = mfmsr();
SET_MSR_EE(msr | MSR_EE);
diff --git a/arch/powerpc/include/asm/libata-portmap.h b/arch/powerpc/include/asm/libata-portmap.h
index 4d8518049f4d..4396db57b8be 100644
--- a/arch/powerpc/include/asm/libata-portmap.h
+++ b/arch/powerpc/include/asm/libata-portmap.h
@@ -1,12 +1,8 @@
#ifndef __ASM_POWERPC_LIBATA_PORTMAP_H
#define __ASM_POWERPC_LIBATA_PORTMAP_H
-#define ATA_PRIMARY_CMD 0x1F0
-#define ATA_PRIMARY_CTL 0x3F6
#define ATA_PRIMARY_IRQ(dev) pci_get_legacy_ide_irq(dev, 0)
-#define ATA_SECONDARY_CMD 0x170
-#define ATA_SECONDARY_CTL 0x376
#define ATA_SECONDARY_IRQ(dev) pci_get_legacy_ide_irq(dev, 1)
#endif
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 54ff8ce7fa96..0132831b3081 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -152,6 +152,7 @@
#define PPC_INST_LWSYNC 0x7c2004ac
#define PPC_INST_SYNC 0x7c0004ac
#define PPC_INST_SYNC_MASK 0xfc0007fe
+#define PPC_INST_ISYNC 0x4c00012c
#define PPC_INST_LXVD2X 0x7c000698
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 2a620789954b..9cd4e8cbc78c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1250,6 +1250,8 @@ static inline void mtmsr_isync(unsigned long val)
: "r" ((unsigned long)(v)) \
: "memory")
#endif
+#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",0" : \
+ : : "memory")
extern unsigned long msr_check_and_set(unsigned long bits);
extern bool strict_msr_control;
diff --git a/arch/powerpc/include/asm/reg_8xx.h b/arch/powerpc/include/asm/reg_8xx.h
index 94d01f81e668..0197e12f7d48 100644
--- a/arch/powerpc/include/asm/reg_8xx.h
+++ b/arch/powerpc/include/asm/reg_8xx.h
@@ -25,6 +25,10 @@
#define SPRN_MD_RAM0 825
#define SPRN_MD_RAM1 826
+/* Special MSR manipulation registers */
+#define SPRN_EIE 80 /* External interrupt enable (EE=1, RI=1) */
+#define SPRN_EID 81 /* External interrupt disable (EE=0, RI=1) */
+
/* Commands. Only the first few are available to the instruction cache.
*/
#define IDC_ENABLE 0x02000000 /* Cache enable */
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index f6f68f73e858..99e1397b71da 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -52,11 +52,23 @@ static inline int mm_is_core_local(struct mm_struct *mm)
return cpumask_subset(mm_cpumask(mm),
topology_sibling_cpumask(smp_processor_id()));
}
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ return cpumask_equal(mm_cpumask(mm),
+ cpumask_of(smp_processor_id()));
+}
+
#else
static inline int mm_is_core_local(struct mm_struct *mm)
{
return 1;
}
+
+static inline int mm_is_thread_local(struct mm_struct *mm)
+{
+ return 1;
+}
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index cf12c580f6b2..e8cdfec8d512 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -16,6 +16,10 @@
#define __NR__exit __NR_exit
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index aded29ad2e8f..1925341dbb9c 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -14,6 +14,11 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
endif
+CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
@@ -90,10 +95,6 @@ obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
obj-$(CONFIG_KGDB) += kgdb.o
-obj-$(CONFIG_MODULES) += ppc_ksyms.o
-ifeq ($(CONFIG_PPC32),y)
-obj-$(CONFIG_MODULES) += ppc_ksyms_32.o
-endif
obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 6c4646ac9234..6a82ef039c50 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1248,6 +1248,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTR_TYPE_8xx,
.icache_bsize = 16,
.dcache_bsize = 16,
+ .machine_check = machine_check_8xx,
.platform = "ppc823",
},
#endif /* CONFIG_8xx */
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 83428a283fa0..3841d749a430 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -33,6 +33,7 @@
#include <asm/unistd.h>
#include <asm/ftrace.h>
#include <asm/ptrace.h>
+#include <asm/export.h>
/*
* MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
@@ -1358,6 +1359,7 @@ _GLOBAL(_mcount)
MCOUNT_RESTORE_FRAME
bctr
#endif
+EXPORT_SYMBOL(_mcount)
_GLOBAL(ftrace_stub)
blr
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 51df82b61084..6432d4bf08c8 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -38,6 +38,7 @@
#include <asm/context_tracking.h>
#include <asm/tm.h>
#include <asm/ppc-opcode.h>
+#include <asm/export.h>
/*
* System calls.
@@ -1177,6 +1178,7 @@ _GLOBAL(enter_prom)
#ifdef CONFIG_DYNAMIC_FTRACE
_GLOBAL(mcount)
_GLOBAL(_mcount)
+EXPORT_SYMBOL(_mcount)
mflr r12
mtctr r12
mtlr r0
@@ -1413,6 +1415,7 @@ livepatch_handler:
#else
_GLOBAL_TOC(_mcount)
+EXPORT_SYMBOL(_mcount)
/* Taken from output of objdump from lib64/glibc */
mflr r3
ld r11, 0(r1)
diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S
index 9f1ebf7338f1..52ca2471ee1a 100644
--- a/arch/powerpc/kernel/epapr_hcalls.S
+++ b/arch/powerpc/kernel/epapr_hcalls.S
@@ -16,6 +16,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-compat.h>
#include <asm/asm-offsets.h>
+#include <asm/export.h>
#ifndef CONFIG_PPC64
/* epapr_ev_idle() was derived from e500_idle() */
@@ -53,3 +54,4 @@ epapr_hypercall_start:
nop
nop
blr
+EXPORT_SYMBOL(epapr_hypercall_start)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 08992f8f5036..08ba447a4b3d 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -95,19 +95,35 @@ __start_interrupts:
/* No virt vectors corresponding with 0x0..0x100 */
EXC_VIRT_NONE(0x4000, 0x4100)
-EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
- SET_SCRATCH0(r13)
+
#ifdef CONFIG_PPC_P7_NAP
-BEGIN_FTR_SECTION
- /* Running native on arch 2.06 or later, check if we are
- * waking up from nap/sleep/winkle.
+ /*
+ * If running native on arch 2.06 or later, check if we are waking up
+ * from nap/sleep/winkle, and branch to idle handler.
*/
- mfspr r13,SPRN_SRR1
- rlwinm. r13,r13,47-31,30,31
- beq 9f
+#define IDLETEST(n) \
+ BEGIN_FTR_SECTION ; \
+ mfspr r10,SPRN_SRR1 ; \
+ rlwinm. r10,r10,47-31,30,31 ; \
+ beq- 1f ; \
+ cmpwi cr3,r10,2 ; \
+ BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \
+1: \
+ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+#else
+#define IDLETEST NOTEST
+#endif
- cmpwi cr3,r13,2
- GET_PACA(r13)
+EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+ IDLETEST, 0x100)
+
+EXC_REAL_END(system_reset, 0x100, 0x200)
+EXC_VIRT_NONE(0x4100, 0x4200)
+
+#ifdef CONFIG_PPC_P7_NAP
+EXC_COMMON_BEGIN(system_reset_idle_common)
bl pnv_restore_hyp_resource
li r0,PNV_THREAD_RUNNING
@@ -130,14 +146,8 @@ BEGIN_FTR_SECTION
blt cr3,2f
b pnv_wakeup_loss
2: b pnv_wakeup_noloss
+#endif
-9:
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
-#endif /* CONFIG_PPC_P7_NAP */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
- NOTEST, 0x100)
-EXC_REAL_END(system_reset, 0x100, 0x200)
-EXC_VIRT_NONE(0x4100, 0x4200)
EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
#ifdef CONFIG_PPC_PSERIES
@@ -817,10 +827,8 @@ EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)
TRAMP_KVM(PACA_EXGEN, 0xb00)
EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
-
-#define LOAD_SYSCALL_HANDLER(reg) \
- ld reg,PACAKBASE(r13); \
- ori reg,reg,(ABS_ADDR(system_call_common))@l;
+#define LOAD_SYSCALL_HANDLER(reg) \
+ __LOAD_HANDLER(reg, system_call_common)
/* Syscall routine is used twice, in reloc-off and reloc-on paths */
#define SYSCALL_PSERIES_1 \
@@ -1377,7 +1385,7 @@ __end_interrupts:
DEFINE_FIXED_SYMBOL(__end_interrupts)
#ifdef CONFIG_PPC_970_NAP
-TRAMP_REAL_BEGIN(power4_fixup_nap)
+EXC_COMMON_BEGIN(power4_fixup_nap)
andc r9,r9,r10
std r9,TI_LOCAL_FLAGS(r11)
ld r10,_LINK(r1) /* make idle task do the */
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 08d14b096eb9..6c509f39bbde 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -24,6 +24,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
+#include <asm/export.h>
#ifdef CONFIG_VSX
#define __REST_32FPVSRS(n,c,base) \
@@ -59,6 +60,7 @@ _GLOBAL(load_fp_state)
MTFSF_L(fr0)
REST_32FPVSRS(0, R4, R3)
blr
+EXPORT_SYMBOL(load_fp_state)
/*
* Store FP state into memory, including FPSCR
@@ -69,6 +71,7 @@ _GLOBAL(store_fp_state)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r3)
blr
+EXPORT_SYMBOL(store_fp_state)
/*
* This task wants to use the FPU now.
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index a3f821eb7e9a..9d963547d243 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -34,6 +34,7 @@
#include <asm/ptrace.h>
#include <asm/bug.h>
#include <asm/kvm_book3s_asm.h>
+#include <asm/export.h>
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
@@ -738,6 +739,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
.globl mol_trampoline
.set mol_trampoline, i0x2f00
+ EXPORT_SYMBOL(mol_trampoline)
. = 0x3000
@@ -1045,6 +1047,7 @@ _ENTRY(switch_mmu_context)
4: trap
EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
blr
+EXPORT_SYMBOL(switch_mmu_context)
/*
* An undocumented "feature" of 604e requires that the v bit
@@ -1272,6 +1275,7 @@ sdata:
.globl empty_zero_page
empty_zero_page:
.space 4096
+EXPORT_SYMBOL(empty_zero_page)
.globl swapper_pg_dir
swapper_pg_dir:
@@ -1285,6 +1289,7 @@ intercept_table:
.long 0, 0, 0, 0, 0, 0, 0, 0
.long 0, 0, 0, 0, 0, 0, 0, 0
.long 0, 0, 0, 0, 0, 0, 0, 0
+EXPORT_SYMBOL(intercept_table)
/* Room for two PTE pointers, usually the kernel and current user pointers
* to their respective root page table.
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 7d7d8635227a..41374a468d1c 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -41,6 +41,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
+#include <asm/export.h>
/* As with the other PowerPC ports, it is expected that when code
* execution begins here, the following registers contain valid, yet
@@ -971,6 +972,7 @@ sdata:
.globl empty_zero_page
empty_zero_page:
.space 4096
+EXPORT_SYMBOL(empty_zero_page)
.globl swapper_pg_dir
swapper_pg_dir:
.space PGD_TABLE_SIZE
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 9cdf5c71e426..37e4a7cf0065 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -39,6 +39,7 @@
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/synch.h>
+#include <asm/export.h>
#include "head_booke.h"
@@ -1254,6 +1255,7 @@ sdata:
.globl empty_zero_page
empty_zero_page:
.space PAGE_SIZE
+EXPORT_SYMBOL(empty_zero_page)
/*
* To support >32-bit physical addresses, we use an 8KB pgdir.
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 79da0641bae2..04c546e20cc0 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -43,6 +43,7 @@
#include <asm/hw_irq.h>
#include <asm/cputhreads.h>
#include <asm/ppc-opcode.h>
+#include <asm/export.h>
/* The physical memory is laid out such that the secondary processor
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -1002,3 +1003,4 @@ swapper_pg_dir:
.globl empty_zero_page
empty_zero_page:
.space PAGE_SIZE
+EXPORT_SYMBOL(empty_zero_page)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 3a185c51ce8f..fb133a163263 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -31,6 +31,7 @@
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/fixmap.h>
+#include <asm/export.h>
/* Macro to make the code more readable. */
#ifdef CONFIG_8xx_CPU6
@@ -226,7 +227,7 @@ i##n: \
ret_from_except)
/* System reset */
- EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD)
/* Machine check */
. = 0x200
@@ -321,7 +322,7 @@ SystemCall:
#endif
InstructionTLBMiss:
-#ifdef CONFIG_8xx_CPU6
+#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
mtspr SPRN_SPRG_SCRATCH2, r3
#endif
EXCEPTION_PROLOG_0
@@ -329,23 +330,20 @@ InstructionTLBMiss:
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
+ mfspr r10, SPRN_SRR0 /* Get effective address of fault */
+ INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
/* Only modules will cause ITLB Misses as we always
* pin the first 8MB of kernel memory */
- mfspr r11, SPRN_SRR0 /* Get effective address of fault */
- INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
- mfcr r10
- IS_KERNEL(r11, r11)
+ mfcr r3
+ IS_KERNEL(r11, r10)
+#endif
mfspr r11, SPRN_M_TW /* Get level 1 table */
+#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
BRANCH_UNLESS_KERNEL(3f)
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
3:
- mtcr r10
- mfspr r10, SPRN_SRR0 /* Get effective address of fault */
-#else
- mfspr r10, SPRN_SRR0 /* Get effective address of fault */
- INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
- mfspr r11, SPRN_M_TW /* Get level 1 table base address */
+ mtcr r3
#endif
/* Insert level 1 index */
rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
@@ -377,58 +375,39 @@ InstructionTLBMiss:
MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */
/* Restore registers */
-#ifdef CONFIG_8xx_CPU6
+#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
mfspr r3, SPRN_SPRG_SCRATCH2
#endif
EXCEPTION_EPILOG_0
rfi
-/*
- * Bottom part of DataStoreTLBMiss handler for IMMR area
- * not enough space in the DataStoreTLBMiss area
- */
-DTLBMissIMMR:
- mtcr r10
- /* Set 512k byte guarded page and mark it valid */
- li r10, MD_PS512K | MD_GUARDED | MD_SVALID
- MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
- mfspr r10, SPRN_IMMR /* Get current IMMR */
- rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
- ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
- _PAGE_PRESENT | _PAGE_NO_CACHE
- MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
-
- li r11, RPN_PATTERN
- mtspr SPRN_DAR, r11 /* Tag DAR */
- EXCEPTION_EPILOG_0
- rfi
-
. = 0x1200
DataStoreTLBMiss:
+ mtspr SPRN_SPRG_SCRATCH2, r3
EXCEPTION_PROLOG_0
- mfcr r10
+ mfcr r3
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
- mfspr r11, SPRN_MD_EPN
- rlwinm r11, r11, 16, 0xfff8
+ mfspr r10, SPRN_MD_EPN
+ rlwinm r10, r10, 16, 0xfff8
+ cmpli cr0, r10, PAGE_OFFSET@h
+ mfspr r11, SPRN_M_TW /* Get level 1 table */
+ blt+ 3f
#ifndef CONFIG_PIN_TLB_IMMR
- cmpli cr0, r11, VIRT_IMMR_BASE@h
+ cmpli cr0, r10, VIRT_IMMR_BASE@h
#endif
- cmpli cr7, r11, PAGE_OFFSET@h
+_ENTRY(DTLBMiss_cmp)
+ cmpli cr7, r10, (PAGE_OFFSET + 0x1800000)@h
+ lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
#ifndef CONFIG_PIN_TLB_IMMR
_ENTRY(DTLBMiss_jmp)
beq- DTLBMissIMMR
#endif
- bge- cr7, 4f
-
- mfspr r11, SPRN_M_TW /* Get level 1 table */
+ blt cr7, DTLBMissLinear
3:
- mtcr r10
-#ifdef CONFIG_8xx_CPU6
- mtspr SPRN_SPRG_SCRATCH2, r3
-#endif
+ mtcr r3
mfspr r10, SPRN_MD_EPN
/* Insert level 1 index */
@@ -481,30 +460,7 @@ _ENTRY(DTLBMiss_jmp)
MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
/* Restore registers */
-#ifdef CONFIG_8xx_CPU6
mfspr r3, SPRN_SPRG_SCRATCH2
-#endif
- mtspr SPRN_DAR, r11 /* Tag DAR */
- EXCEPTION_EPILOG_0
- rfi
-
-4:
-_ENTRY(DTLBMiss_cmp)
- cmpli cr0, r11, (PAGE_OFFSET + 0x1800000)@h
- lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
- bge- 3b
-
- mtcr r10
- /* Set 8M byte page and mark it valid */
- li r10, MD_PS8MEG | MD_SVALID
- MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
- mfspr r10, SPRN_MD_EPN
- rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
- ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
- _PAGE_PRESENT
- MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
-
- li r11, RPN_PATTERN
mtspr SPRN_DAR, r11 /* Tag DAR */
EXCEPTION_EPILOG_0
rfi
@@ -570,6 +526,43 @@ DARFixed:/* Return from dcbx instruction bug workaround */
. = 0x2000
+/*
+ * Bottom part of DataStoreTLBMiss handlers for IMMR area and linear RAM.
+ * not enough space in the DataStoreTLBMiss area.
+ */
+DTLBMissIMMR:
+ mtcr r3
+ /* Set 512k byte guarded page and mark it valid */
+ li r10, MD_PS512K | MD_GUARDED | MD_SVALID
+ MTSPR_CPU6(SPRN_MD_TWC, r10, r11)
+ mfspr r10, SPRN_IMMR /* Get current IMMR */
+ rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
+ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
+ _PAGE_PRESENT | _PAGE_NO_CACHE
+ MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
+
+ li r11, RPN_PATTERN
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+ mfspr r3, SPRN_SPRG_SCRATCH2
+ EXCEPTION_EPILOG_0
+ rfi
+
+DTLBMissLinear:
+ mtcr r3
+ /* Set 8M byte page and mark it valid */
+ li r11, MD_PS8MEG | MD_SVALID
+ MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
+ rlwinm r10, r10, 16, 0x0f800000 /* 8xx supports max 256Mb RAM */
+ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
+ _PAGE_PRESENT
+ MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */
+
+ li r11, RPN_PATTERN
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+ mfspr r3, SPRN_SPRG_SCRATCH2
+ EXCEPTION_EPILOG_0
+ rfi
+
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
* by decoding the registers used by the dcbx instruction and adding them.
* DAR is set to the calculated address.
@@ -586,7 +579,9 @@ FixupDAR:/* Entry point for dcbx workaround. */
rlwinm r11, r10, 16, 0xfff8
_ENTRY(FixupDAR_cmp)
cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
- blt- cr7, 200f
+ /* create physical page address from effective address */
+ tophys(r11, r10)
+ blt- cr7, 201f
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
/* Insert level 1 index */
3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
@@ -616,10 +611,6 @@ _ENTRY(FixupDAR_cmp)
141: mfspr r10,SPRN_SPRG_SCRATCH2
b DARFixed /* Nope, go back to normal TLB processing */
- /* create physical page address from effective address */
-200: tophys(r11, r10)
- b 201b
-
144: mfspr r10, SPRN_DSISR
rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
mtspr SPRN_DSISR, r10
@@ -894,6 +885,7 @@ sdata:
.align PAGE_SHIFT
empty_zero_page:
.space PAGE_SIZE
+EXPORT_SYMBOL(empty_zero_page)
.globl swapper_pg_dir
swapper_pg_dir:
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 3bfa3150911f..bf4c6021515f 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -42,6 +42,7 @@
#include <asm/asm-offsets.h>
#include <asm/cache.h>
#include <asm/ptrace.h>
+#include <asm/export.h>
#include "head_booke.h"
/* As with the other PowerPC ports, it is expected that when code
@@ -1223,6 +1224,7 @@ sdata:
.globl empty_zero_page
empty_zero_page:
.space 4096
+EXPORT_SYMBOL(empty_zero_page)
.globl swapper_pg_dir
swapper_pg_dir:
.space PGD_TABLE_SIZE
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 9781c69eae57..03d089b3ed72 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -275,7 +275,7 @@ int hw_breakpoint_handler(struct die_args *args)
if (!stepped) {
WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
"0x%lx will be disabled.", info->address);
- perf_event_disable(bp);
+ perf_event_disable_inatomic(bp);
goto out;
}
/*
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index bd739fed26e3..72dac0b58061 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
* Threads will spin in HMT_LOW until the lock bit is cleared.
* r14 - pointer to core_idle_state
* r15 - used to load contents of core_idle_state
+ * r9 - used as a temporary variable
*/
core_idle_lock_held:
@@ -99,6 +100,8 @@ core_idle_lock_held:
bne 3b
HMT_MEDIUM
lwarx r15,0,r14
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ bne core_idle_lock_held
blr
/*
@@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common)
std r9,_MSR(r1)
std r1,PACAR1(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- /* Tell KVM we're entering idle */
- li r4,KVM_HWTHREAD_IN_IDLE
- stb r4,HSTATE_HWTHREAD_STATE(r13)
-#endif
-
/*
* Go to real mode to do the nap, as required by the architecture.
* Also, we need to be in real mode before setting hwthread_state,
@@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common)
.globl pnv_enter_arch207_idle_mode
pnv_enter_arch207_idle_mode:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle */
+ li r4,KVM_HWTHREAD_IN_IDLE
+ /******************************************************/
+ /* N O T E W E L L ! ! ! N O T E W E L L */
+ /* The following store to HSTATE_HWTHREAD_STATE(r13) */
+ /* MUST occur in real mode, i.e. with the MMU off, */
+ /* and the MMU must stay off until we clear this flag */
+ /* and test HSTATE_HWTHREAD_REQ(r13) in the system */
+ /* reset interrupt vector in exceptions-64s.S. */
+ /* The reason is that another thread can switch the */
+ /* MMU to a guest context whenever this flag is set */
+ /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
+ /* that would potentially cause this thread to start */
+ /* executing instructions from guest memory in */
+ /* hypervisor mode, leading to a host crash or data */
+ /* corruption, or worse. */
+ /******************************************************/
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
stb r3,PACA_THREAD_IDLE_STATE(r13)
cmpwi cr3,r3,PNV_THREAD_SLEEP
bge cr3,2f
@@ -250,6 +267,12 @@ enter_winkle:
* r3 - requested stop state
*/
power_enter_stop:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* Tell KVM we're entering idle */
+ li r4,KVM_HWTHREAD_IN_IDLE
+ /* DO THIS IN REAL MODE! See comment above. */
+ stb r4,HSTATE_HWTHREAD_STATE(r13)
+#endif
/*
* Check if the requested state is a deep idle state.
*/
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 0d432194c018..384357cb8bc0 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -18,6 +18,7 @@
#include <asm/unistd.h>
#include <asm/asm-compat.h>
#include <asm/asm-offsets.h>
+#include <asm/export.h>
.text
@@ -118,3 +119,4 @@ _GLOBAL(longjmp)
_GLOBAL(current_stack_pointer)
PPC_LL r3,0(r1)
blr
+EXPORT_SYMBOL(current_stack_pointer)
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 03756ffdcd71..93cf7a5846a6 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -33,6 +33,7 @@
#include <asm/kexec.h>
#include <asm/bug.h>
#include <asm/ptrace.h>
+#include <asm/export.h>
.text
@@ -319,6 +320,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
#endif /* CONFIG_4xx */
isync
blr
+EXPORT_SYMBOL(flush_instruction_cache)
#endif /* CONFIG_PPC_8xx */
/*
@@ -359,6 +361,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
isync
blr
_ASM_NOKPROBE_SYMBOL(flush_icache_range)
+EXPORT_SYMBOL(flush_icache_range)
/*
* Flush a particular page from the data cache to RAM.
@@ -497,6 +500,7 @@ _GLOBAL(copy_page)
li r0,MAX_COPY_PREFETCH
li r11,4
b 2b
+EXPORT_SYMBOL(copy_page)
/*
* Extended precision shifts.
@@ -524,6 +528,7 @@ _GLOBAL(__ashrdi3)
sraw r3,r3,r5 # MSW = MSW >> count
or r4,r4,r7 # LSW |= t2
blr
+EXPORT_SYMBOL(__ashrdi3)
_GLOBAL(__ashldi3)
subfic r6,r5,32
@@ -535,6 +540,7 @@ _GLOBAL(__ashldi3)
slw r4,r4,r5 # LSW = LSW << count
or r3,r3,r7 # MSW |= t2
blr
+EXPORT_SYMBOL(__ashldi3)
_GLOBAL(__lshrdi3)
subfic r6,r5,32
@@ -546,6 +552,7 @@ _GLOBAL(__lshrdi3)
srw r3,r3,r5 # MSW = MSW >> count
or r4,r4,r7 # LSW |= t2
blr
+EXPORT_SYMBOL(__lshrdi3)
/*
* 64-bit comparison: __cmpdi2(s64 a, s64 b)
@@ -561,6 +568,7 @@ _GLOBAL(__cmpdi2)
bltlr
li r3,2
blr
+EXPORT_SYMBOL(__cmpdi2)
/*
* 64-bit comparison: __ucmpdi2(u64 a, u64 b)
* Returns 0 if a < b, 1 if a == b, 2 if a > b.
@@ -575,6 +583,7 @@ _GLOBAL(__ucmpdi2)
bltlr
li r3,2
blr
+EXPORT_SYMBOL(__ucmpdi2)
_GLOBAL(__bswapdi2)
rotlwi r9,r4,8
@@ -586,6 +595,7 @@ _GLOBAL(__bswapdi2)
mr r3,r9
mr r4,r10
blr
+EXPORT_SYMBOL(__bswapdi2)
#ifdef CONFIG_SMP
_GLOBAL(start_secondary_resume)
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 9f0bed214bcb..4f178671f230 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -27,6 +27,7 @@
#include <asm/kexec.h>
#include <asm/ptrace.h>
#include <asm/mmu.h>
+#include <asm/export.h>
.text
@@ -110,6 +111,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
isync
blr
_ASM_NOKPROBE_SYMBOL(flush_icache_range)
+EXPORT_SYMBOL(flush_icache_range)
/*
* Like above, but only do the D-cache.
@@ -140,6 +142,7 @@ _GLOBAL(flush_dcache_range)
bdnz 0b
sync
blr
+EXPORT_SYMBOL(flush_dcache_range)
/*
* Like above, but works on non-mapped physical addresses.
@@ -243,6 +246,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
blr
_GLOBAL(__bswapdi2)
+EXPORT_SYMBOL(__bswapdi2)
srdi r8,r3,32
rlwinm r7,r3,8,0xffffffff
rlwimi r7,r3,24,0,7
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 95d3769a2e26..74bec5498972 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -56,6 +56,7 @@ static DECLARE_BITMAP(phb_bitmap, MAX_PHBS);
/* ISA Memory physical address */
resource_size_t isa_mem_base;
+EXPORT_SYMBOL(isa_mem_base);
static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 1f7930037cb7..678f87a63645 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -32,6 +32,8 @@
unsigned long isa_io_base = 0;
unsigned long pci_dram_offset = 0;
int pcibios_assign_bus_offset = 1;
+EXPORT_SYMBOL(isa_io_base);
+EXPORT_SYMBOL(pci_dram_offset);
void pcibios_make_OF_bus_map(void);
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
deleted file mode 100644
index 9f01e28ecef3..000000000000
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ /dev/null
@@ -1,37 +0,0 @@
-#include <linux/ftrace.h>
-#include <linux/mm.h>
-
-#include <asm/processor.h>
-#include <asm/switch_to.h>
-#include <asm/cacheflush.h>
-#include <asm/epapr_hcalls.h>
-
-#ifdef CONFIG_PPC64
-EXPORT_SYMBOL(flush_dcache_range);
-#endif
-EXPORT_SYMBOL(flush_icache_range);
-
-EXPORT_SYMBOL(empty_zero_page);
-
-long long __bswapdi2(long long);
-EXPORT_SYMBOL(__bswapdi2);
-
-#ifdef CONFIG_FUNCTION_TRACER
-EXPORT_SYMBOL(_mcount);
-#endif
-
-#ifdef CONFIG_PPC_FPU
-EXPORT_SYMBOL(load_fp_state);
-EXPORT_SYMBOL(store_fp_state);
-#endif
-
-#ifdef CONFIG_ALTIVEC
-EXPORT_SYMBOL(load_vr_state);
-EXPORT_SYMBOL(store_vr_state);
-#endif
-
-#ifdef CONFIG_EPAPR_PARAVIRT
-EXPORT_SYMBOL(epapr_hypercall_start);
-#endif
-
-EXPORT_SYMBOL(current_stack_pointer);
diff --git a/arch/powerpc/kernel/ppc_ksyms_32.c b/arch/powerpc/kernel/ppc_ksyms_32.c
deleted file mode 100644
index 2bfaafe5be99..000000000000
--- a/arch/powerpc/kernel/ppc_ksyms_32.c
+++ /dev/null
@@ -1,60 +0,0 @@
-#include <linux/export.h>
-#include <linux/smp.h>
-
-#include <asm/page.h>
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/hw_irq.h>
-#include <asm/time.h>
-#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-#include <asm/dcr.h>
-
-EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
-EXPORT_SYMBOL(DMA_MODE_READ);
-EXPORT_SYMBOL(DMA_MODE_WRITE);
-
-#if defined(CONFIG_PCI)
-EXPORT_SYMBOL(isa_io_base);
-EXPORT_SYMBOL(isa_mem_base);
-EXPORT_SYMBOL(pci_dram_offset);
-#endif
-
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(smp_hw_index);
-#endif
-
-long long __ashrdi3(long long, int);
-long long __ashldi3(long long, int);
-long long __lshrdi3(long long, int);
-int __ucmpdi2(unsigned long long, unsigned long long);
-int __cmpdi2(long long, long long);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__ucmpdi2);
-EXPORT_SYMBOL(__cmpdi2);
-
-EXPORT_SYMBOL(timer_interrupt);
-EXPORT_SYMBOL(tb_ticks_per_jiffy);
-
-EXPORT_SYMBOL(switch_mmu_context);
-
-#ifdef CONFIG_PPC_STD_MMU_32
-extern long mol_trampoline;
-EXPORT_SYMBOL(mol_trampoline); /* For MOL */
-EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
-#ifdef CONFIG_SMP
-extern int mmu_hash_lock;
-EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
-#endif /* CONFIG_SMP */
-extern long *intercept_table;
-EXPORT_SYMBOL(intercept_table);
-#endif /* CONFIG_PPC_STD_MMU_32 */
-
-#ifdef CONFIG_PPC_DCR_NATIVE
-EXPORT_SYMBOL(__mtdcr);
-EXPORT_SYMBOL(__mfdcr);
-#endif
-
-EXPORT_SYMBOL(flush_instruction_cache);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9e7c10fe205f..ce6dc61b15b2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1012,7 +1012,7 @@ void restore_tm_state(struct pt_regs *regs)
/* Ensure that restore_math() will restore */
if (msr_diff & MSR_FP)
current->thread.load_fp = 1;
-#ifdef CONFIG_ALIVEC
+#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
current->thread.load_vec = 1;
#endif
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index f52b7db327c8..010b7b310237 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -74,7 +74,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
copied = access_process_vm(child, (u64)addrOthers, &tmp,
- sizeof(tmp), 0);
+ sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
break;
ret = put_user(tmp, (u32 __user *)data);
@@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
ret = 0;
if (access_process_vm(child, (u64)addrOthers, &tmp,
- sizeof(tmp), 1) == sizeof(tmp))
+ sizeof(tmp),
+ FOLL_FORCE | FOLL_WRITE) == sizeof(tmp))
break;
ret = -EIO;
break;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index dba265c586df..270ee30abdcf 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -131,15 +131,26 @@ void machine_shutdown(void)
ppc_md.machine_shutdown();
}
+static void machine_hang(void)
+{
+ pr_emerg("System Halted, OK to turn off power\n");
+ local_irq_disable();
+ while (1)
+ ;
+}
+
void machine_restart(char *cmd)
{
machine_shutdown();
if (ppc_md.restart)
ppc_md.restart(cmd);
+
smp_send_stop();
- printk(KERN_EMERG "System Halted, OK to turn off power\n");
- local_irq_disable();
- while (1) ;
+
+ do_kernel_restart(cmd);
+ mdelay(1000);
+
+ machine_hang();
}
void machine_power_off(void)
@@ -147,10 +158,9 @@ void machine_power_off(void)
machine_shutdown();
if (pm_power_off)
pm_power_off();
+
smp_send_stop();
- printk(KERN_EMERG "System Halted, OK to turn off power\n");
- local_irq_disable();
- while (1) ;
+ machine_hang();
}
/* Used by the G5 thermal driver */
EXPORT_SYMBOL_GPL(machine_power_off);
@@ -163,10 +173,9 @@ void machine_halt(void)
machine_shutdown();
if (ppc_md.halt)
ppc_md.halt();
+
smp_send_stop();
- printk(KERN_EMERG "System Halted, OK to turn off power\n");
- local_irq_disable();
- while (1) ;
+ machine_hang();
}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 24ec3ea4b3a2..5fe79182f0fa 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -16,6 +16,7 @@
#include <linux/cpu.h>
#include <linux/console.h>
#include <linux/memblock.h>
+#include <linux/export.h>
#include <asm/io.h>
#include <asm/prom.h>
@@ -47,11 +48,16 @@ int boot_cpuid_phys;
EXPORT_SYMBOL_GPL(boot_cpuid_phys);
int smp_hw_index[NR_CPUS];
+EXPORT_SYMBOL(smp_hw_index);
unsigned long ISA_DMA_THRESHOLD;
unsigned int DMA_MODE_READ;
unsigned int DMA_MODE_WRITE;
+EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
+EXPORT_SYMBOL(DMA_MODE_READ);
+EXPORT_SYMBOL(DMA_MODE_WRITE);
+
/*
* These are used in binfmt_elf.c to put aux entries on the stack
* for each elf executable being started.
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 67859b7d1c97..bc3f7d0d7b79 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -596,6 +596,7 @@ void timer_interrupt(struct pt_regs * regs)
irq_exit();
set_irq_regs(old_regs);
}
+EXPORT_SYMBOL(timer_interrupt);
/*
* Hypervisor decrementer interrupts shouldn't occur but are sometimes
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index a1f8f5641e9e..023a462725b5 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -273,7 +273,6 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
force_sig_info(signr, &info, current);
}
-#ifdef CONFIG_PPC64
void system_reset_exception(struct pt_regs *regs)
{
/* See if any machine dependent calls */
@@ -291,6 +290,7 @@ void system_reset_exception(struct pt_regs *regs)
/* What should we do here? We could issue a shutdown or hard reset. */
}
+#ifdef CONFIG_PPC64
/*
* This function is called in real mode. Strictly no printk's please.
*
@@ -352,12 +352,11 @@ static inline int check_io_access(struct pt_regs *regs)
* For the debug message, we look at the preceding
* load or store.
*/
- if (*nip == 0x60000000) /* nop */
+ if (*nip == PPC_INST_NOP)
nip -= 2;
- else if (*nip == 0x4c00012c) /* isync */
+ else if (*nip == PPC_INST_ISYNC)
--nip;
- if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
- /* sync or twi */
+ if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
unsigned int rb;
--nip;
@@ -668,6 +667,31 @@ int machine_check_e200(struct pt_regs *regs)
return 0;
}
+#elif defined(CONFIG_PPC_8xx)
+int machine_check_8xx(struct pt_regs *regs)
+{
+ unsigned long reason = get_mc_reason(regs);
+
+ pr_err("Machine check in kernel mode.\n");
+ pr_err("Caused by (from SRR1=%lx): ", reason);
+ if (reason & 0x40000000)
+ pr_err("Fetch error at address %lx\n", regs->nip);
+ else
+ pr_err("Data access error at address %lx\n", regs->dar);
+
+#ifdef CONFIG_PCI
+ /* the qspan pci read routines can cause machine checks -- Cort
+ *
+ * yuck !!! that totally needs to go away ! There are better ways
+ * to deal with that than having a wart in the mcheck handler.
+ * -- BenH
+ */
+ bad_page_fault(regs, regs->dar, SIGBUS);
+ return 1;
+#else
+ return 0;
+#endif
+}
#else
int machine_check_generic(struct pt_regs *regs)
{
@@ -727,17 +751,6 @@ void machine_check_exception(struct pt_regs *regs)
if (recover > 0)
goto bail;
-#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
- /* the qspan pci read routines can cause machine checks -- Cort
- *
- * yuck !!! that totally needs to go away ! There are better ways
- * to deal with that than having a wart in the mcheck handler.
- * -- BenH
- */
- bad_page_fault(regs, regs->dar, SIGBUS);
- goto bail;
-#endif
-
if (debugger_fault_handler(regs))
goto bail;
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index bc85bdff4e01..0c123f3406cd 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -6,6 +6,7 @@
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/ptrace.h>
+#include <asm/export.h>
/*
* Load state from memory into VMX registers including VSCR.
@@ -17,6 +18,7 @@ _GLOBAL(load_vr_state)
mtvscr v0
REST_32VRS(0,r4,r3)
blr
+EXPORT_SYMBOL(load_vr_state)
/*
* Store VMX state into memory, including VSCR.
@@ -28,6 +30,7 @@ _GLOBAL(store_vr_state)
li r4, VRSTATE_VSCR
stvx v0, r4, r3
blr
+EXPORT_SYMBOL(store_vr_state)
/*
* Disable VMX for the task which had it previously,
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 82ff5de8b1e7..a0ea63ac2b52 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -23,6 +23,7 @@
#include <asm/ppc-opcode.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
+#include <asm/smp.h>
#include "book3s_xics.h"
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index ad5290005ca4..309361e86523 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -9,7 +9,7 @@ ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
-obj-y += string.o alloc.o crtsavres.o ppc_ksyms.o code-patching.o \
+obj-y += string.o alloc.o crtsavres.o code-patching.o \
feature-fixups.o
obj-$(CONFIG_PPC32) += div64.o copy_32.o
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index aa8214f30c92..ea29a5d67743 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -17,6 +17,7 @@
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
+#include <asm/export.h>
.text
@@ -68,6 +69,7 @@ _GLOBAL(__csum_partial)
adde r5,r5,r0
5: addze r3,r5 /* add in final carry */
blr
+EXPORT_SYMBOL(__csum_partial)
/*
* Computes the checksum of a memory block at src, length len,
@@ -297,3 +299,4 @@ dst_error:
.long 41b,dst_error
.long 50b,src_error
.long 51b,dst_error
+EXPORT_SYMBOL(csum_partial_copy_generic)
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index fdec6e613e95..fd9176671f9f 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -16,6 +16,7 @@
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
+#include <asm/export.h>
/*
* Computes the checksum of a memory block at buff, length len,
@@ -176,6 +177,7 @@ _GLOBAL(__csum_partial)
add r3,r4,r0
srdi r3,r3,32
blr
+EXPORT_SYMBOL(__csum_partial)
.macro srcnr
@@ -430,3 +432,4 @@ dstnr; stb r6,0(r4)
li r6,-EFAULT
stw r6,0(r8)
blr
+EXPORT_SYMBOL(csum_partial_copy_generic)
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 99f37f24185c..40cce33b08d6 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -12,6 +12,7 @@
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
+#include <asm/export.h>
#define COPY_16_BYTES \
lwz r7,4(r4); \
@@ -92,6 +93,7 @@ _GLOBAL(memset)
subf r6,r0,r6
cmplwi 0,r4,0
bne 2f /* Use normal procedure if r4 is not zero */
+EXPORT_SYMBOL(memset)
_GLOBAL(memset_nocache_branch)
b 2f /* Skip optimised bloc until cache is enabled */
@@ -216,6 +218,8 @@ _GLOBAL(memcpy)
stbu r0,1(r6)
bdnz 40b
65: blr
+EXPORT_SYMBOL(memcpy)
+EXPORT_SYMBOL(memmove)
generic_memcpy:
srwi. r7,r5,3
@@ -507,3 +511,4 @@ _GLOBAL(__copy_tofrom_user)
.long 112b,120b
.long 114b,120b
.text
+EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
index a3c4dc4defdd..21367b3a8146 100644
--- a/arch/powerpc/lib/copypage_64.S
+++ b/arch/powerpc/lib/copypage_64.S
@@ -10,6 +10,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/export.h>
.section ".toc","aw"
PPC64_CACHES:
@@ -110,3 +111,4 @@ END_FTR_SECTION_IFSET(CPU_FTR_CP_USE_DCBTZ)
std r11,120(r3)
std r12,128(r3)
blr
+EXPORT_SYMBOL(copy_page)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index f09899e35991..60386b2c99bb 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -8,6 +8,7 @@
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
+#include <asm/export.h>
#ifdef __BIG_ENDIAN__
#define sLd sld /* Shift towards low-numbered address. */
@@ -359,6 +360,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
addi r3,r3,8
171:
177:
+179:
addi r3,r3,8
370:
372:
@@ -373,7 +375,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
173:
174:
175:
-179:
181:
184:
186:
@@ -671,3 +672,4 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
.llong 89b,100b
.llong 90b,100b
.llong 91b,100b
+EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S
index 19e66001a4f9..3de7ac154f24 100644
--- a/arch/powerpc/lib/hweight_64.S
+++ b/arch/powerpc/lib/hweight_64.S
@@ -19,6 +19,7 @@
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
+#include <asm/export.h>
/* Note: This code relies on -mminimal-toc */
@@ -32,6 +33,7 @@ FTR_SECTION_ELSE
clrldi r3,r3,64-8
blr
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+EXPORT_SYMBOL(__arch_hweight8)
_GLOBAL(__arch_hweight16)
BEGIN_FTR_SECTION
@@ -54,6 +56,7 @@ FTR_SECTION_ELSE
blr
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50)
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+EXPORT_SYMBOL(__arch_hweight16)
_GLOBAL(__arch_hweight32)
BEGIN_FTR_SECTION
@@ -79,6 +82,7 @@ FTR_SECTION_ELSE
blr
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51)
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+EXPORT_SYMBOL(__arch_hweight32)
_GLOBAL(__arch_hweight64)
BEGIN_FTR_SECTION
@@ -108,3 +112,4 @@ FTR_SECTION_ELSE
blr
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52)
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+EXPORT_SYMBOL(__arch_hweight64)
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index eda7a96161ab..85fa9869aec5 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -11,6 +11,7 @@
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
+#include <asm/export.h>
_GLOBAL(memset)
neg r0,r3
@@ -77,6 +78,7 @@ _GLOBAL(memset)
10: bflr 31
stb r4,0(r6)
blr
+EXPORT_SYMBOL(memset)
_GLOBAL_TOC(memmove)
cmplw 0,r3,r4
@@ -119,3 +121,4 @@ _GLOBAL(backwards_memcpy)
beq 2b
mtctr r7
b 1b
+EXPORT_SYMBOL(memmove)
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
index 8953d2382a65..d75d18b7bd55 100644
--- a/arch/powerpc/lib/memcmp_64.S
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -8,6 +8,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
+#include <asm/export.h>
#define off8 r6
#define off16 r7
@@ -231,3 +232,4 @@ _GLOBAL(memcmp)
ld r28,-32(r1)
ld r27,-40(r1)
blr
+EXPORT_SYMBOL(memcmp)
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index 32a06ec395d2..f4d6088e2d53 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -8,6 +8,7 @@
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
+#include <asm/export.h>
.align 7
_GLOBAL_TOC(memcpy)
@@ -219,3 +220,4 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
4: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) /* return dest pointer */
blr
#endif
+EXPORT_SYMBOL(memcpy)
diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c
deleted file mode 100644
index ae69d846a841..000000000000
--- a/arch/powerpc/lib/ppc_ksyms.c
+++ /dev/null
@@ -1,29 +0,0 @@
-#include <linux/string.h>
-#include <linux/uaccess.h>
-#include <linux/bitops.h>
-#include <net/checksum.h>
-
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memchr);
-
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strncmp);
-
-#ifndef CONFIG_GENERIC_CSUM
-EXPORT_SYMBOL(__csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_generic);
-#endif
-
-EXPORT_SYMBOL(__copy_tofrom_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(copy_page);
-
-#ifdef CONFIG_PPC64
-EXPORT_SYMBOL(__arch_hweight8);
-EXPORT_SYMBOL(__arch_hweight16);
-EXPORT_SYMBOL(__arch_hweight32);
-EXPORT_SYMBOL(__arch_hweight64);
-#endif
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index beabc68d9a1e..d13e07603519 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -11,6 +11,7 @@
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
+#include <asm/export.h>
.section __ex_table,"a"
PPC_LONG_ALIGN
@@ -36,6 +37,7 @@ _GLOBAL(strncpy)
2: stbu r0,1(r6) /* clear it out if so */
bdnz 2b
blr
+EXPORT_SYMBOL(strncpy)
_GLOBAL(strncmp)
PPC_LCMPI 0,r5,0
@@ -53,6 +55,7 @@ _GLOBAL(strncmp)
blr
2: li r3,0
blr
+EXPORT_SYMBOL(strncmp)
#ifdef CONFIG_PPC32
_GLOBAL(memcmp)
@@ -68,6 +71,7 @@ _GLOBAL(memcmp)
blr
2: li r3,0
blr
+EXPORT_SYMBOL(memcmp)
#endif
_GLOBAL(memchr)
@@ -82,6 +86,7 @@ _GLOBAL(memchr)
beqlr
2: li r3,0
blr
+EXPORT_SYMBOL(memchr)
#ifdef CONFIG_PPC32
_GLOBAL(__clear_user)
@@ -125,4 +130,5 @@ _GLOBAL(__clear_user)
PPC_LONG 1b,91b
PPC_LONG 8b,92b
.text
+EXPORT_SYMBOL(__clear_user)
#endif
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
index 7bd9549a90a2..57ace356c949 100644
--- a/arch/powerpc/lib/string_64.S
+++ b/arch/powerpc/lib/string_64.S
@@ -20,6 +20,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
+#include <asm/export.h>
.section ".toc","aw"
PPC64_CACHES:
@@ -200,3 +201,4 @@ err1; dcbz r0,r3
cmpdi r4,32
blt .Lshort_clear
b .Lmedium_clear
+EXPORT_SYMBOL(__clear_user)
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index bb0354222b11..362954f98029 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
switch (REGION_ID(ea)) {
case USER_REGION_ID:
pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
+ if (mm == NULL)
+ return 1;
psize = get_slice_psize(mm, ea);
ssize = user_segment_size(ea);
vsid = get_vsid(mm->context.id, ea, ssize);
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 115347f74ce5..09cc50c8dace 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -26,6 +26,7 @@
#include <asm/ppc_asm.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
+#include <asm/export.h>
#ifdef CONFIG_SMP
.section .bss
@@ -33,6 +34,7 @@
.globl mmu_hash_lock
mmu_hash_lock:
.space 4
+EXPORT_SYMBOL(mmu_hash_lock)
#endif /* CONFIG_SMP */
/*
@@ -575,6 +577,7 @@ _GLOBAL(flush_hash_pages)
rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
stwcx. r8,0,r5 /* update the pte */
bne- 33b
+EXPORT_SYMBOL(flush_hash_pages)
/* Get the address of the primary PTE group in the hash table (r3) */
_GLOBAL(flush_hash_patch_A)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 90480e23fd2c..44d3c3a38e3e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -529,7 +529,7 @@ static bool might_have_hea(void)
*/
#ifdef CONFIG_IBMEBUS
return !cpu_has_feature(CPU_FTR_ARCH_207S) &&
- !firmware_has_feature(FW_FEATURE_SPLPAR);
+ firmware_has_feature(FW_FEATURE_SPLPAR);
#else
return false;
#endif
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 75b9cd6150cc..a51c188b81f3 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -845,7 +845,7 @@ void __init dump_numa_cpu_topology(void)
return;
for_each_online_node(node) {
- printk(KERN_DEBUG "Node %d CPUs:", node);
+ pr_info("Node %d CPUs:", node);
count = 0;
/*
@@ -856,52 +856,18 @@ void __init dump_numa_cpu_topology(void)
if (cpumask_test_cpu(cpu,
node_to_cpumask_map[node])) {
if (count == 0)
- printk(" %u", cpu);
+ pr_cont(" %u", cpu);
++count;
} else {
if (count > 1)
- printk("-%u", cpu - 1);
+ pr_cont("-%u", cpu - 1);
count = 0;
}
}
if (count > 1)
- printk("-%u", nr_cpu_ids - 1);
- printk("\n");
- }
-}
-
-static void __init dump_numa_memory_topology(void)
-{
- unsigned int node;
- unsigned int count;
-
- if (min_common_depth == -1 || !numa_enabled)
- return;
-
- for_each_online_node(node) {
- unsigned long i;
-
- printk(KERN_DEBUG "Node %d Memory:", node);
-
- count = 0;
-
- for (i = 0; i < memblock_end_of_DRAM();
- i += (1 << SECTION_SIZE_BITS)) {
- if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
- if (count == 0)
- printk(" 0x%lx", i);
- ++count;
- } else {
- if (count > 0)
- printk("-0x%lx", i);
- count = 0;
- }
- }
-
- if (count > 0)
- printk("-0x%lx", i);
- printk("\n");
+ pr_cont("-%u", nr_cpu_ids - 1);
+ pr_cont("\n");
}
}
@@ -947,8 +913,6 @@ void __init initmem_init(void)
if (parse_numa_properties())
setup_nonnuma();
- else
- dump_numa_memory_topology();
memblock_dump_all();
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 0e49ec541ab5..bda8c43be78a 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -175,7 +175,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@@ -201,7 +201,7 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
if (unlikely(pid == MMU_NO_CONTEXT))
goto no_context;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@@ -226,7 +226,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
pid = mm ? mm->context.id : 0;
if (unlikely(pid == MMU_NO_CONTEXT))
goto bail;
- if (!mm_is_core_local(mm)) {
+ if (!mm_is_thread_local(mm)) {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
@@ -321,7 +321,7 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
{
unsigned long pid;
unsigned long addr;
- int local = mm_is_core_local(mm);
+ int local = mm_is_thread_local(mm);
unsigned long ap = mmu_get_ap(psize);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
diff --git a/arch/powerpc/platforms/82xx/Kconfig b/arch/powerpc/platforms/82xx/Kconfig
index 7c7df4003820..994d1a959e20 100644
--- a/arch/powerpc/platforms/82xx/Kconfig
+++ b/arch/powerpc/platforms/82xx/Kconfig
@@ -30,8 +30,8 @@ config EP8248E
select 8272
select 8260
select FSL_SOC
- select PHYLIB
- select MDIO_BITBANG
+ select PHYLIB if NETDEVICES
+ select MDIO_BITBANG if PHYLIB
help
This enables support for the Embedded Planet EP8248E board.
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
index cdab847749e6..8fec050f2d5b 100644
--- a/arch/powerpc/platforms/82xx/ep8248e.c
+++ b/arch/powerpc/platforms/82xx/ep8248e.c
@@ -298,7 +298,9 @@ static const struct of_device_id of_bus_ids[] __initconst = {
static int __init declare_of_platform_devices(void)
{
of_platform_bus_probe(NULL, of_bus_ids, NULL);
- platform_driver_register(&ep8248e_mdio_driver);
+
+ if (IS_ENABLED(CONFIG_MDIO_BITBANG))
+ platform_driver_register(&ep8248e_mdio_driver);
return 0;
}
diff --git a/arch/powerpc/platforms/83xx/asp834x.c b/arch/powerpc/platforms/83xx/asp834x.c
index 17e54339f8d9..575afd6eb36a 100644
--- a/arch/powerpc/platforms/83xx/asp834x.c
+++ b/arch/powerpc/platforms/83xx/asp834x.c
@@ -30,9 +30,7 @@
*/
static void __init asp834x_setup_arch(void)
{
- if (ppc_md.progress)
- ppc_md.progress("asp834x_setup_arch()", 0);
-
+ mpc83xx_setup_arch();
mpc834x_usb_cfg();
}
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index e7fbd6366abb..d8642a4afc74 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -130,10 +130,7 @@ static void __init mpc83xx_km_setup_arch(void)
struct device_node *np;
#endif
- if (ppc_md.progress)
- ppc_md.progress("kmpbec83xx_setup_arch()", 0);
-
- mpc83xx_setup_pci();
+ mpc83xx_setup_arch();
#ifdef CONFIG_QUICC_ENGINE
np = of_find_node_by_name(NULL, "par_io");
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index 8899aa9d11f5..d75c9816a5c9 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -142,3 +142,11 @@ void __init mpc83xx_setup_pci(void)
mpc83xx_add_bridge(np);
}
#endif
+
+void __init mpc83xx_setup_arch(void)
+{
+ if (ppc_md.progress)
+ ppc_md.progress("mpc83xx_setup_arch()", 0);
+
+ mpc83xx_setup_pci();
+}
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index 040d5d085467..272c41c387b9 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -27,10 +27,7 @@
*/
static void __init mpc830x_rdb_setup_arch(void)
{
- if (ppc_md.progress)
- ppc_md.progress("mpc830x_rdb_setup_arch()", 0);
-
- mpc83xx_setup_pci();
+ mpc83xx_setup_arch();
mpc831x_usb_cfg();
}
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index 40e0d8307b59..fd80fd570e67 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -28,10 +28,7 @@
*/
static void __init mpc831x_rdb_setup_arch(void)
{
- if (ppc_md.progress)
- ppc_md.progress("mpc831x_rdb_setup_arch()", 0);
-
- mpc83xx_setup_pci();
+ mpc83xx_setup_arch();
mpc831x_usb_cfg();
}
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index cdfa47c4d394..bb7b25acf26f 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -58,8 +58,7 @@ static void __init mpc832x_sys_setup_arch(void)
struct device_node *np;
u8 __iomem *bcsr_regs = NULL;
- if (ppc_md.progress)
- ppc_md.progress("mpc832x_sys_setup_arch()", 0);
+ mpc83xx_setup_arch();
/* Map BCSR area */
np = of_find_node_by_name(NULL, "bcsr");
@@ -71,8 +70,6 @@ static void __init mpc832x_sys_setup_arch(void)
of_node_put(np);
}
- mpc83xx_setup_pci();
-
#ifdef CONFIG_QUICC_ENGINE
if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
par_io_init(np);
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 0d6a62fc5864..d7c9b186954d 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -197,10 +197,7 @@ static void __init mpc832x_rdb_setup_arch(void)
struct device_node *np;
#endif
- if (ppc_md.progress)
- ppc_md.progress("mpc832x_rdb_setup_arch()", 0);
-
- mpc83xx_setup_pci();
+ mpc83xx_setup_arch();
#ifdef CONFIG_QUICC_ENGINE
if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index 8fd0c1e8b182..73a5267df497 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -57,10 +57,7 @@ machine_device_initcall(mpc834x_itx, mpc834x_itx_declare_of_platform_devices);
*/
static void __init mpc834x_itx_setup_arch(void)
{
- if (ppc_md.progress)
- ppc_md.progress("mpc834x_itx_setup_arch()", 0);
-
- mpc83xx_setup_pci();
+ mpc83xx_setup_arch();
mpc834x_usb_cfg();
}
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index eeaee6123bb3..009cfc18a4ee 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -76,10 +76,7 @@ static int mpc834xemds_usb_cfg(void)
*/
static void __init mpc834x_mds_setup_arch(void)
{
- if (ppc_md.progress)
- ppc_md.progress("mpc834x_mds_setup_arch()", 0);
-
- mpc83xx_setup_pci();
+ mpc83xx_setup_arch();
mpc834xemds_usb_cfg();
}
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index dacf4c2df069..4fc3051c2b2e 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -66,8 +66,7 @@ static void __init mpc836x_mds_setup_arch(void)
struct device_node *np;
u8 __iomem *bcsr_regs = NULL;
- if (ppc_md.progress)
- ppc_md.progress("mpc836x_mds_setup_arch()", 0);
+ mpc83xx_setup_arch();
/* Map BCSR area */
np = of_find_node_by_name(NULL, "bcsr");
@@ -79,8 +78,6 @@ static void __init mpc836x_mds_setup_arch(void)
of_node_put(np);
}
- mpc83xx_setup_pci();
-
#ifdef CONFIG_QUICC_ENGINE
if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
par_io_init(np);
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index cf67ac93ddcb..93f024fd9b45 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -31,10 +31,7 @@ machine_device_initcall(mpc836x_rdk, mpc83xx_declare_of_platform_devices);
static void __init mpc836x_rdk_setup_arch(void)
{
- if (ppc_md.progress)
- ppc_md.progress("mpc836x_rdk_setup_arch()", 0);
-
- mpc83xx_setup_pci();
+ mpc83xx_setup_arch();
}
/*
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index 652b97d699c9..3b34cc1f626c 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -79,10 +79,7 @@ out:
*/
static void __init mpc837x_mds_setup_arch(void)
{
- if (ppc_md.progress)
- ppc_md.progress("mpc837x_mds_setup_arch()", 0);
-
- mpc83xx_setup_pci();
+ mpc83xx_setup_arch();
mpc837xmds_usb_cfg();
}
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 667731d81676..0c55fa6af2d5 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -50,10 +50,7 @@ static void mpc837x_rdb_sd_cfg(void)
*/
static void __init mpc837x_rdb_setup_arch(void)
{
- if (ppc_md.progress)
- ppc_md.progress("mpc837x_rdb_setup_arch()", 0);
-
- mpc83xx_setup_pci();
+ mpc83xx_setup_arch();
mpc837x_usb_cfg();
mpc837x_rdb_sd_cfg();
}
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index ad484199eff7..636eb9d0401a 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -86,5 +86,6 @@ extern void mpc83xx_setup_pci(void);
#endif
extern int mpc83xx_declare_of_platform_devices(void);
+extern void mpc83xx_setup_arch(void);
#endif /* __MPC83XX_H__ */
diff --git a/arch/powerpc/platforms/83xx/sbc834x.c b/arch/powerpc/platforms/83xx/sbc834x.c
index b867e88dfb0d..cb4bdabfdf1c 100644
--- a/arch/powerpc/platforms/83xx/sbc834x.c
+++ b/arch/powerpc/platforms/83xx/sbc834x.c
@@ -47,10 +47,7 @@
*/
static void __init sbc834x_setup_arch(void)
{
- if (ppc_md.progress)
- ppc_md.progress("sbc834x_setup_arch()", 0);
-
- mpc83xx_setup_pci();
+ mpc83xx_setup_arch();
}
machine_device_initcall(sbc834x, mpc83xx_declare_of_platform_devices);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index df25a3ed489d..9dc1d28975b9 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -72,7 +72,7 @@ config MPC85xx_CDS
config MPC85xx_MDS
bool "Freescale MPC85xx MDS"
select DEFAULT_UIMAGE
- select PHYLIB
+ select PHYLIB if NETDEVICES
select HAS_RAPIDIO
select SWIOTLB
help
diff --git a/arch/powerpc/platforms/85xx/bsc913x_qds.c b/arch/powerpc/platforms/85xx/bsc913x_qds.c
index 07dd6ae3ec52..d2f45569a026 100644
--- a/arch/powerpc/platforms/85xx/bsc913x_qds.c
+++ b/arch/powerpc/platforms/85xx/bsc913x_qds.c
@@ -72,7 +72,6 @@ define_machine(bsc9132_qds) {
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/bsc913x_rdb.c b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
index e48f6710e6d5..0ffdb4a80c2a 100644
--- a/arch/powerpc/platforms/85xx/bsc913x_rdb.c
+++ b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
@@ -59,7 +59,6 @@ define_machine(bsc9131_rdb) {
.setup_arch = bsc913x_rdb_setup_arch,
.init_IRQ = bsc913x_rdb_pic_init,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 3b9e3f0f9aec..4df1b4026eab 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -65,7 +65,6 @@ define_machine(c293_pcie) {
.setup_arch = c293_pcie_setup_arch,
.init_IRQ = c293_pcie_pic_init,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 3a6a84f07f43..1179115a4b5c 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -225,7 +225,6 @@ define_machine(corenet_generic) {
#else
.get_irq = mpic_get_coreint_irq,
#endif
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index 14af36a7fa9c..f29c6f0909f3 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -215,7 +215,6 @@ define_machine(ge_imp3a) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index 6ba687f19e45..94a7f92c858f 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -77,7 +77,6 @@ define_machine(mpc8536_ds) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 8756715c7a47..f3e055fdd1de 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -170,7 +170,6 @@ define_machine(mpc85xx_ads) {
.init_IRQ = mpc85xx_ads_pic_init,
.show_cpuinfo = mpc85xx_ads_show_cpuinfo,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 86f20156178e..224db30c497b 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -83,7 +83,8 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
return PCIBIOS_SUCCESSFUL;
}
-static void __noreturn mpc85xx_cds_restart(char *cmd)
+static int mpc85xx_cds_restart(struct notifier_block *this,
+ unsigned long mode, void *cmd)
{
struct pci_dev *dev;
u_char tmp;
@@ -108,12 +109,25 @@ static void __noreturn mpc85xx_cds_restart(char *cmd)
}
/*
- * If we can't find the VIA chip (maybe the P2P bridge is disabled)
- * or the VIA chip reset didn't work, just use the default reset.
+ * If we can't find the VIA chip (maybe the P2P bridge is
+ * disabled) or the VIA chip reset didn't work, just return
+ * and let default reset sequence happen.
*/
- fsl_rstcr_restart(NULL);
+ return NOTIFY_DONE;
}
+static int mpc85xx_cds_restart_register(void)
+{
+ static struct notifier_block restart_handler;
+
+ restart_handler.notifier_call = mpc85xx_cds_restart;
+ restart_handler.priority = 192;
+
+ return register_restart_handler(&restart_handler);
+}
+machine_arch_initcall(mpc85xx_cds, mpc85xx_cds_restart_register);
+
+
static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev)
{
u_char c;
@@ -380,11 +394,8 @@ define_machine(mpc85xx_cds) {
.show_cpuinfo = mpc85xx_cds_show_cpuinfo,
.get_irq = mpic_get_irq,
#ifdef CONFIG_PCI
- .restart = mpc85xx_cds_restart,
.pcibios_fixup_bus = mpc85xx_cds_fixup_bus,
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
-#else
- .restart = fsl_rstcr_restart,
#endif
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index ed69c7ee1829..dc9e035cc637 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -204,7 +204,6 @@ define_machine(mpc8544_ds) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -219,7 +218,6 @@ define_machine(mpc8572_ds) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -234,7 +232,6 @@ define_machine(p2020_ds) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index fa9cd710d2ae..d7e440e6dba3 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -63,6 +63,8 @@
#define DBG(fmt...)
#endif
+#if IS_BUILTIN(CONFIG_PHYLIB)
+
#define MV88E1111_SCR 0x10
#define MV88E1111_SCR_125CLK 0x0010
static int mpc8568_fixup_125_clock(struct phy_device *phydev)
@@ -152,6 +154,8 @@ static int mpc8568_mds_phy_fixups(struct phy_device *phydev)
return err;
}
+#endif
+
/* ************************************************************************
*
* Setup the architecture
@@ -313,6 +317,7 @@ static void __init mpc85xx_mds_setup_arch(void)
swiotlb_detect_4g();
}
+#if IS_BUILTIN(CONFIG_PHYLIB)
static int __init board_fixups(void)
{
@@ -342,9 +347,12 @@ static int __init board_fixups(void)
return 0;
}
+
machine_arch_initcall(mpc8568_mds, board_fixups);
machine_arch_initcall(mpc8569_mds, board_fixups);
+#endif
+
static int __init mpc85xx_publish_devices(void)
{
if (machine_is(mpc8568_mds))
@@ -385,7 +393,6 @@ define_machine(mpc8568_mds) {
.setup_arch = mpc85xx_mds_setup_arch,
.init_IRQ = mpc85xx_mds_pic_init,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
#ifdef CONFIG_PCI
@@ -405,7 +412,6 @@ define_machine(mpc8569_mds) {
.setup_arch = mpc85xx_mds_setup_arch,
.init_IRQ = mpc85xx_mds_pic_init,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
#ifdef CONFIG_PCI
@@ -426,7 +432,6 @@ define_machine(p1021_mds) {
.setup_arch = mpc85xx_mds_setup_arch,
.init_IRQ = mpc85xx_mds_pic_init,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
#ifdef CONFIG_PCI
@@ -434,4 +439,3 @@ define_machine(p1021_mds) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
};
-
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index c1499cbf3786..10069503e39f 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -213,7 +213,6 @@ define_machine(p2020_rdb) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -228,7 +227,6 @@ define_machine(p1020_rdb) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -243,7 +241,6 @@ define_machine(p1021_rdb_pc) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -258,7 +255,6 @@ define_machine(p2020_rdb_pc) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -273,7 +269,6 @@ define_machine(p1025_rdb) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -288,7 +283,6 @@ define_machine(p1020_mbg_pc) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -303,7 +297,6 @@ define_machine(p1020_utm_pc) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -318,7 +311,6 @@ define_machine(p1020_rdb_pc) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -333,7 +325,6 @@ define_machine(p1020_rdb_pd) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -348,7 +339,6 @@ define_machine(p1024_rdb) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/mvme2500.c b/arch/powerpc/platforms/85xx/mvme2500.c
index acc3d0d6049d..d5af0723a69e 100644
--- a/arch/powerpc/platforms/85xx/mvme2500.c
+++ b/arch/powerpc/platforms/85xx/mvme2500.c
@@ -66,7 +66,6 @@ define_machine(mvme2500) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index 661d7b59e413..78d13b364cd6 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -79,7 +79,6 @@ define_machine(p1010_rdb) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 63568d68c76f..0908abd7e36f 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -568,7 +568,6 @@ define_machine(p1022_ds) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
index 2f2943600301..276e00ab3dde 100644
--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -148,7 +148,6 @@ define_machine(p1022_rdk) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/p1023_rdb.c b/arch/powerpc/platforms/85xx/p1023_rdb.c
index 40d8de57c341..3e8cd0324dfc 100644
--- a/arch/powerpc/platforms/85xx/p1023_rdb.c
+++ b/arch/powerpc/platforms/85xx/p1023_rdb.c
@@ -110,7 +110,6 @@ define_machine(p1023_rdb) {
.setup_arch = mpc85xx_rdb_setup_arch,
.init_IRQ = mpc85xx_rdb_pic_init,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
#ifdef CONFIG_PCI
diff --git a/arch/powerpc/platforms/85xx/ppa8548.c b/arch/powerpc/platforms/85xx/ppa8548.c
index 2410167b290a..33c5ba644fa5 100644
--- a/arch/powerpc/platforms/85xx/ppa8548.c
+++ b/arch/powerpc/platforms/85xx/ppa8548.c
@@ -91,7 +91,6 @@ define_machine(ppa8548) {
.init_IRQ = ppa8548_pic_init,
.show_cpuinfo = ppa8548_show_cpuinfo,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index 50d745809809..b63a8548366f 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -77,7 +77,6 @@ define_machine(qemu_e500) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_coreint_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index 62b6c45a5a9b..2c670848ff08 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -130,7 +130,6 @@ define_machine(sbc8548) {
.init_IRQ = sbc8548_pic_init,
.show_cpuinfo = sbc8548_show_cpuinfo,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 79fd0dfd4b82..21d6aaa5c3e4 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -38,18 +38,18 @@ static void gpio_halt_wfn(struct work_struct *work)
}
static DECLARE_WORK(gpio_halt_wq, gpio_halt_wfn);
-static void gpio_halt_cb(void)
+static void __noreturn gpio_halt_cb(void)
{
enum of_gpio_flags flags;
int trigger, gpio;
if (!halt_node)
- return;
+ panic("No reset GPIO information was provided in DT\n");
gpio = of_get_gpio_flags(halt_node, 0, &flags);
if (!gpio_is_valid(gpio))
- return;
+ panic("Provided GPIO is invalid\n");
trigger = (flags == OF_GPIO_ACTIVE_LOW);
@@ -57,6 +57,8 @@ static void gpio_halt_cb(void)
/* Probably wont return */
gpio_set_value(gpio, trigger);
+
+ panic("Halt failed\n");
}
/* This IRQ means someone pressed the power button and it is waiting for us
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index cd255acde2e2..8da4ed90338d 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -91,7 +91,6 @@ define_machine(socrates) {
.setup_arch = socrates_setup_arch,
.init_IRQ = socrates_pic_init,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index 91b824c4dc08..1a1d44ea1754 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -103,7 +103,6 @@ define_machine(stx_gp3) {
.init_IRQ = stx_gp3_pic_init,
.show_cpuinfo = stx_gp3_show_cpuinfo,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index b7c54454d611..9fc20a37835e 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -132,7 +132,6 @@ define_machine(tqm85xx) {
.init_IRQ = tqm85xx_pic_init,
.show_cpuinfo = tqm85xx_show_cpuinfo,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
index 1bc02a87f597..360f6253e9ff 100644
--- a/arch/powerpc/platforms/85xx/twr_p102x.c
+++ b/arch/powerpc/platforms/85xx/twr_p102x.c
@@ -140,7 +140,6 @@ define_machine(twr_p1025) {
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index cf0c70ff026e..cd6ce845f398 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -167,7 +167,6 @@ define_machine(xes_mpc8572) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -182,7 +181,6 @@ define_machine(xes_mpc8548) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
@@ -197,7 +195,6 @@ define_machine(xes_mpc8540) {
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index ef684afb63c6..6b99300edd36 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -204,7 +204,6 @@ define_machine(gef_ppc9a) {
.init_IRQ = gef_ppc9a_init_irq,
.show_cpuinfo = gef_ppc9a_show_cpuinfo,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.time_init = mpc86xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 67dd0c231646..8cdeca061127 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -191,7 +191,6 @@ define_machine(gef_sbc310) {
.init_IRQ = gef_sbc310_init_irq,
.show_cpuinfo = gef_sbc310_show_cpuinfo,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.time_init = mpc86xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index 805026976cac..da8723ae23ec 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -181,7 +181,6 @@ define_machine(gef_sbc610) {
.init_IRQ = gef_sbc610_init_irq,
.show_cpuinfo = gef_sbc610_show_cpuinfo,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.time_init = mpc86xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index fef0582eddf1..a5d73fabe4d1 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -331,7 +331,6 @@ define_machine(mpc86xx_hpcd) {
.setup_arch = mpc86xx_hpcd_setup_arch,
.init_IRQ = mpc86xx_init_irq,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.time_init = mpc86xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 5ae42a037065..a0e989ed4b6f 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -130,7 +130,6 @@ define_machine(mpc86xx_hpcn) {
.init_IRQ = mpc86xx_init_irq,
.show_cpuinfo = mpc86xx_hpcn_show_cpuinfo,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.time_init = mpc86xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/mvme7100.c b/arch/powerpc/platforms/86xx/mvme7100.c
index addb41e7cd14..835352e63dc3 100644
--- a/arch/powerpc/platforms/86xx/mvme7100.c
+++ b/arch/powerpc/platforms/86xx/mvme7100.c
@@ -111,7 +111,6 @@ define_machine(mvme7100) {
.setup_arch = mvme7100_setup_arch,
.init_IRQ = mpc86xx_init_irq,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.time_init = mpc86xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c
index 52af5735742e..93db35d4f6eb 100644
--- a/arch/powerpc/platforms/86xx/sbc8641d.c
+++ b/arch/powerpc/platforms/86xx/sbc8641d.c
@@ -82,7 +82,6 @@ define_machine(sbc8641) {
.init_IRQ = mpc86xx_init_irq,
.show_cpuinfo = sbc8641_show_cpuinfo,
.get_irq = mpic_get_irq,
- .restart = fsl_rstcr_restart,
.time_init = mpc86xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 86707e67843f..aa35245d8d6d 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -393,7 +393,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
unsigned long *vpn, int count,
int psize, int ssize)
{
- unsigned long param[8];
+ unsigned long param[PLPAR_HCALL9_BUFSIZE];
int i = 0, pix = 0, rc;
unsigned long flags = 0;
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -522,7 +522,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
unsigned long flags = 0;
struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
- unsigned long param[9];
+ unsigned long param[PLPAR_HCALL9_BUFSIZE];
unsigned long hash, index, shift, hidx, slot;
real_pte_t pte;
int psize, ssize;
diff --git a/arch/powerpc/relocs_check.sh b/arch/powerpc/relocs_check.sh
index 2e4ebd0e25b3..ec2d5c835170 100755
--- a/arch/powerpc/relocs_check.sh
+++ b/arch/powerpc/relocs_check.sh
@@ -30,6 +30,7 @@ bad_relocs=$(
# On PPC64:
# R_PPC64_RELATIVE, R_PPC64_NONE
# R_PPC64_ADDR64 mach_<name>
+ # R_PPC64_ADDR64 __crc_<name>
# On PPC:
# R_PPC_RELATIVE, R_PPC_ADDR16_HI,
# R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
@@ -41,7 +42,8 @@ R_PPC_ADDR16_HI
R_PPC_ADDR16_HA
R_PPC_RELATIVE
R_PPC_NONE' |
- grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_'
+ grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_' |
+ grep -E -v '\<R_PPC64_ADDR64[[:space:]]+__crc_'
)
if [ -z "$bad_relocs" ]; then
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 3c0eb9b25535..986cd111d4df 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -233,8 +233,6 @@ void __init cpm_reset(void)
else
out_be32(&siu_conf->sc_sdcr, 1);
immr_unmap(siu_conf);
-
- cpm_muram_init();
}
static DEFINE_SPINLOCK(cmd_lock);
diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c
index 8dc1e24f3c23..f78ff841652c 100644
--- a/arch/powerpc/sysdev/cpm2.c
+++ b/arch/powerpc/sysdev/cpm2.c
@@ -66,10 +66,6 @@ void __init cpm2_reset(void)
cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE);
#endif
- /* Reclaim the DP memory for our use.
- */
- cpm_muram_init();
-
/* Tell everyone where the comm processor resides.
*/
cpmp = &cpm2_immr->im_cpm;
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 947f42007734..51bf749a4f3a 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -37,6 +37,21 @@
#include <linux/of_gpio.h>
#endif
+static int __init cpm_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL, "fsl,cpm2");
+ if (!np)
+ return -ENODEV;
+ cpm_muram_init();
+ of_node_put(np);
+ return 0;
+}
+subsys_initcall(cpm_init);
+
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
static u32 __iomem *cpm_udbg_txdesc;
static u8 __iomem *cpm_udbg_txbuf;
diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S
index d3098ef1404a..e687bb2003ff 100644
--- a/arch/powerpc/sysdev/dcr-low.S
+++ b/arch/powerpc/sysdev/dcr-low.S
@@ -12,6 +12,7 @@
#include <asm/ppc_asm.h>
#include <asm/processor.h>
#include <asm/bug.h>
+#include <asm/export.h>
#define DCR_ACCESS_PROLOG(table) \
cmpli cr0,r3,1024; \
@@ -28,9 +29,11 @@
_GLOBAL(__mfdcr)
DCR_ACCESS_PROLOG(__mfdcr_table)
+EXPORT_SYMBOL(__mfdcr)
_GLOBAL(__mtdcr)
DCR_ACCESS_PROLOG(__mtdcr_table)
+EXPORT_SYMBOL(__mtdcr)
__mfdcr_table:
mfdcr r3,0; blr
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 0ef9df49f0f2..d3a597456b6e 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -111,8 +111,7 @@ static struct pci_ops fsl_indirect_pcie_ops =
.write = indirect_write_config,
};
-#define MAX_PHYS_ADDR_BITS 40
-static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
+static u64 pci64_dma_offset;
#ifdef CONFIG_SWIOTLB
static void setup_swiotlb_ops(struct pci_controller *hose)
@@ -132,12 +131,10 @@ static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
return -EIO;
/*
- * Fixup PCI devices that are able to DMA to above the physical
- * address width of the SoC such that we can address any internal
- * SoC address from across PCI if needed
+ * Fix up PCI devices that are able to DMA to the large inbound
+ * mapping that allows addressing any RAM address from across PCI.
*/
- if ((dev_is_pci(dev)) &&
- dma_mask >= DMA_BIT_MASK(MAX_PHYS_ADDR_BITS)) {
+ if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
set_dma_ops(dev, &dma_direct_ops);
set_dma_offset(dev, pci64_dma_offset);
}
@@ -387,6 +384,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
mem_log++;
piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
+ pci64_dma_offset = 1ULL << mem_log;
if (setup_inbound) {
/* Setup inbound memory window */
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index a09ca704de58..d93056eedcb0 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -29,6 +29,7 @@
#include <linux/fsl_devices.h>
#include <linux/fs_enet_pd.h>
#include <linux/fs_uart_pd.h>
+#include <linux/reboot.h>
#include <linux/atomic.h>
#include <asm/io.h>
@@ -180,23 +181,38 @@ EXPORT_SYMBOL(get_baudrate);
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
static __be32 __iomem *rstcr;
+static int fsl_rstcr_restart(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ local_irq_disable();
+ /* set reset control register */
+ out_be32(rstcr, 0x2); /* HRESET_REQ */
+
+ return NOTIFY_DONE;
+}
+
static int __init setup_rstcr(void)
{
struct device_node *np;
+ static struct notifier_block restart_handler = {
+ .notifier_call = fsl_rstcr_restart,
+ .priority = 128,
+ };
+
for_each_node_by_name(np, "global-utilities") {
if ((of_get_property(np, "fsl,has-rstcr", NULL))) {
rstcr = of_iomap(np, 0) + 0xb0;
- if (!rstcr)
+ if (!rstcr) {
printk (KERN_ERR "Error: reset control "
"register not mapped!\n");
+ } else {
+ register_restart_handler(&restart_handler);
+ }
break;
}
}
- if (!rstcr && ppc_md.restart == fsl_rstcr_restart)
- printk(KERN_ERR "No RSTCR register, warm reboot won't work\n");
-
of_node_put(np);
return 0;
@@ -204,15 +220,6 @@ static int __init setup_rstcr(void)
arch_initcall(setup_rstcr);
-void __noreturn fsl_rstcr_restart(char *cmd)
-{
- local_irq_disable();
- if (rstcr)
- /* set reset control register */
- out_be32(rstcr, 0x2); /* HRESET_REQ */
-
- while (1) ;
-}
#endif
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 433566a5ef19..d73daa4f0ccf 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -19,8 +19,6 @@ extern u32 fsl_get_sys_freq(void);
struct spi_board_info;
struct device_node;
-extern void __noreturn fsl_rstcr_restart(char *cmd);
-
/* The different ports that the DIU can be connected to */
enum fsl_diu_monitor_port {
FSL_DIU_PORT_DVI, /* DVI */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 4d48cecfedd1..b9aac951a90f 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1249,7 +1249,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
/* Pick the physical address from the device tree if unspecified */
if (!phys_addr) {
/* Check if it is DCR-based */
- if (of_get_property(node, "dcr-reg", NULL)) {
+ if (of_property_read_bool(node, "dcr-reg")) {
flags |= MPIC_USES_DCR;
} else {
struct resource r;
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 9043d2e1e2ae..20f196b82a6e 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,6 +1,7 @@
generic-y += clkdev.h
+generic-y += export.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 64053d9ac3f2..836c56290499 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -12,9 +12,7 @@
#ifndef __ASSEMBLY__
-unsigned long return_address(int depth);
-
-#define ftrace_return_address(n) return_address(n)
+#define ftrace_return_address(n) __builtin_return_address(n)
void _mcount(void);
void ftrace_caller(void);
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 03323175de30..602af692efdc 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -192,7 +192,7 @@ struct task_struct;
struct mm_struct;
struct seq_file;
-typedef int (*dump_trace_func_t)(void *data, unsigned long address);
+typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp);
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 02613bad8bbb..3066031a73fe 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -9,6 +9,9 @@
#include <uapi/asm/unistd.h>
#define __IGNORE_time
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 72ccc41444dc..1f0fe98f6db9 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -61,7 +61,7 @@ obj-y += entry.o reipl.o relocate_kernel.o
extra-y += head.o head64.o vmlinux.lds
-obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
+obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 43446fa2a4e5..c74c59236f44 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -2014,12 +2014,12 @@ void show_code(struct pt_regs *regs)
*ptr++ = '\t';
ptr += print_insn(ptr, code + start, addr);
start += opsize;
- printk("%s", buffer);
+ pr_cont("%s", buffer);
ptr = buffer;
ptr += sprintf(ptr, "\n ");
hops++;
}
- printk("\n");
+ pr_cont("\n");
}
void print_fn_code(unsigned char *code, unsigned long len)
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 6693383bc01b..55d4fe174fd9 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -38,10 +38,10 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
+ if (func(data, sf->gprs[8], 0))
+ return sp;
/* Follow the backchain. */
while (1) {
- if (func(data, sf->gprs[8]))
- return sp;
low = sp;
sp = sf->back_chain;
if (!sp)
@@ -49,6 +49,8 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
+ if (func(data, sf->gprs[8], 1))
+ return sp;
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
@@ -56,7 +58,7 @@ __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
return sp;
regs = (struct pt_regs *) sp;
if (!user_mode(regs)) {
- if (func(data, regs->psw.addr))
+ if (func(data, regs->psw.addr, 1))
return sp;
}
low = sp;
@@ -85,33 +87,12 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
}
EXPORT_SYMBOL_GPL(dump_trace);
-struct return_address_data {
- unsigned long address;
- int depth;
-};
-
-static int __return_address(void *data, unsigned long address)
-{
- struct return_address_data *rd = data;
-
- if (rd->depth--)
- return 0;
- rd->address = address;
- return 1;
-}
-
-unsigned long return_address(int depth)
-{
- struct return_address_data rd = { .depth = depth + 2 };
-
- dump_trace(__return_address, &rd, NULL, current_stack_pointer());
- return rd.address;
-}
-EXPORT_SYMBOL_GPL(return_address);
-
-static int show_address(void *data, unsigned long address)
+static int show_address(void *data, unsigned long address, int reliable)
{
- printk("([<%016lx>] %pSR)\n", address, (void *)address);
+ if (reliable)
+ printk(" [<%016lx>] %pSR \n", address, (void *)address);
+ else
+ printk("([<%016lx>] %pSR)\n", address, (void *)address);
return 0;
}
@@ -138,14 +119,14 @@ void show_stack(struct task_struct *task, unsigned long *sp)
else
stack = (unsigned long *)task->thread.ksp;
}
+ printk(KERN_DEFAULT "Stack:\n");
for (i = 0; i < 20; i++) {
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
break;
- if ((i * sizeof(long) % 32) == 0)
- printk("%s ", i == 0 ? "" : "\n");
- printk("%016lx ", *stack++);
+ if (i % 4 == 0)
+ printk(KERN_DEFAULT " ");
+ pr_cont("%016lx%c", *stack++, i % 4 == 3 ? '\n' : ' ');
}
- printk("\n");
show_trace(task, (unsigned long)sp);
}
@@ -163,13 +144,13 @@ void show_registers(struct pt_regs *regs)
mode = user_mode(regs) ? "User" : "Krnl";
printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
if (!user_mode(regs))
- printk(" (%pSR)", (void *)regs->psw.addr);
- printk("\n");
+ pr_cont(" (%pSR)", (void *)regs->psw.addr);
+ pr_cont("\n");
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
"P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
- printk(" RI:%x EA:%x", psw->ri, psw->eaba);
- printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
+ pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
+ printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
printk(" %016lx %016lx %016lx %016lx\n",
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
@@ -205,14 +186,14 @@ void die(struct pt_regs *regs, const char *str)
printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
regs->int_code >> 17, ++die_counter);
#ifdef CONFIG_PREEMPT
- printk("PREEMPT ");
+ pr_cont("PREEMPT ");
#endif
#ifdef CONFIG_SMP
- printk("SMP ");
+ pr_cont("SMP ");
#endif
if (debug_pagealloc_enabled())
- printk("DEBUG_PAGEALLOC");
- printk("\n");
+ pr_cont("DEBUG_PAGEALLOC");
+ pr_cont("\n");
notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
print_modules();
show_regs(regs);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c51650a1ed16..49a30737adde 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -23,6 +23,7 @@
#include <asm/vx-insn.h>
#include <asm/setup.h>
#include <asm/nmi.h>
+#include <asm/export.h>
__PT_R0 = __PT_GPRS
__PT_R1 = __PT_GPRS + 8
@@ -259,6 +260,8 @@ sie_exit:
EX_TABLE(.Lrewind_pad,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
+EXPORT_SYMBOL(sie64a)
+EXPORT_SYMBOL(sie_exit)
#endif
/*
@@ -825,6 +828,9 @@ ENTRY(save_fpu_regs)
oi __LC_CPU_FLAGS+7,_CIF_FPU
br %r14
.Lsave_fpu_regs_end:
+#if IS_ENABLED(CONFIG_KVM)
+EXPORT_SYMBOL(save_fpu_regs)
+#endif
/*
* Load floating-point controls and floating-point or vector registers.
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index e499370fbccb..9a17e4475d27 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -9,6 +9,7 @@
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
#include <asm/ptrace.h>
+#include <asm/export.h>
.section .kprobes.text, "ax"
@@ -23,6 +24,8 @@ ENTRY(ftrace_stub)
ENTRY(_mcount)
br %r14
+EXPORT_SYMBOL(_mcount)
+
ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 17431f63de00..955a7b6fa0a4 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -222,7 +222,7 @@ static int __init service_level_perf_register(void)
}
arch_initcall(service_level_perf_register);
-static int __perf_callchain_kernel(void *data, unsigned long address)
+static int __perf_callchain_kernel(void *data, unsigned long address, int reliable)
{
struct perf_callchain_entry_ctx *entry = data;
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
deleted file mode 100644
index e67453b73c3c..000000000000
--- a/arch/s390/kernel/s390_ksyms.c
+++ /dev/null
@@ -1,15 +0,0 @@
-#include <linux/module.h>
-#include <linux/kvm_host.h>
-#include <asm/fpu/api.h>
-#include <asm/ftrace.h>
-
-#ifdef CONFIG_FUNCTION_TRACER
-EXPORT_SYMBOL(_mcount);
-#endif
-#if IS_ENABLED(CONFIG_KVM)
-EXPORT_SYMBOL(sie64a);
-EXPORT_SYMBOL(sie_exit);
-EXPORT_SYMBOL(save_fpu_regs);
-#endif
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 44f84b23d4e5..355db9db8210 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -27,12 +27,12 @@ static int __save_address(void *data, unsigned long address, int nosched)
return 1;
}
-static int save_address(void *data, unsigned long address)
+static int save_address(void *data, unsigned long address, int reliable)
{
return __save_address(data, address, 0);
}
-static int save_address_nosched(void *data, unsigned long address)
+static int save_address_nosched(void *data, unsigned long address, int reliable)
{
return __save_address(data, address, 1);
}
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 1cab8a177d0e..7a27eebab28a 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -119,8 +119,13 @@ static int handle_validity(struct kvm_vcpu *vcpu)
vcpu->stat.exit_validity++;
trace_kvm_s390_intercept_validity(vcpu, viwhy);
- WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy);
- return -EOPNOTSUPP;
+ KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
+ current->pid, vcpu->kvm);
+
+ /* do not warn on invalid runtime instrumentation mode */
+ WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
+ viwhy);
+ return -EINVAL;
}
static int handle_instruction(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index c6d553e85ab1..be9fa65bfac4 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -5,6 +5,7 @@
*/
#include <linux/linkage.h>
+#include <asm/export.h>
/*
* memset implementation
@@ -60,6 +61,7 @@ ENTRY(memset)
xc 0(1,%r1),0(%r1)
.Lmemset_mvc:
mvc 1(1,%r1),0(%r1)
+EXPORT_SYMBOL(memset)
/*
* memcpy implementation
@@ -86,3 +88,4 @@ ENTRY(memcpy)
j .Lmemcpy_rest
.Lmemcpy_mvc:
mvc 0(1,%r1),0(%r3)
+EXPORT_SYMBOL(memcpy)
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index adb0c34bf431..18d4107e10ee 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -266,7 +266,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
pages += nr;
- ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
+ ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0)
ret = (ret < 0) ? nr : ret + nr;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index cd404aa3931c..4a0c5bce3552 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -217,6 +217,7 @@ static __init int setup_hugepagesz(char *opt)
} else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else {
+ hugetlb_bad_size();
pr_err("hugepagesz= specifies an unsupported page size %s\n",
string);
return 0;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index f56a39bd8ba6..b3e9d18f2ec6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -151,36 +151,40 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
- unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
- unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
+ unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
unsigned long start_pfn = PFN_DOWN(start);
unsigned long size_pages = PFN_DOWN(size);
- unsigned long nr_pages;
- int rc, zone_enum;
+ pg_data_t *pgdat = NODE_DATA(nid);
+ struct zone *zone;
+ int rc, i;
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
- while (size_pages > 0) {
- if (start_pfn < dma_end_pfn) {
- nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
- dma_end_pfn - start_pfn : size_pages;
- zone_enum = ZONE_DMA;
- } else if (start_pfn < normal_end_pfn) {
- nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
- normal_end_pfn - start_pfn : size_pages;
- zone_enum = ZONE_NORMAL;
+ for (i = 0; i < MAX_NR_ZONES; i++) {
+ zone = pgdat->node_zones + i;
+ if (zone_idx(zone) != ZONE_MOVABLE) {
+ /* Add range within existing zone limits, if possible */
+ zone_start_pfn = zone->zone_start_pfn;
+ zone_end_pfn = zone->zone_start_pfn +
+ zone->spanned_pages;
} else {
- nr_pages = size_pages;
- zone_enum = ZONE_MOVABLE;
+ /* Add remaining range to ZONE_MOVABLE */
+ zone_start_pfn = start_pfn;
+ zone_end_pfn = start_pfn + size_pages;
}
- rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
- start_pfn, size_pages);
+ if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
+ continue;
+ nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
+ zone_end_pfn - start_pfn : size_pages;
+ rc = __add_pages(nid, zone, start_pfn, nr_pages);
if (rc)
break;
start_pfn += nr_pages;
size_pages -= nr_pages;
+ if (!size_pages)
+ break;
}
if (rc)
vmem_remove_mapping(start, size);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 16f4c3960b87..9a4de4599c7b 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <asm/processor.h>
-static int __s390_backtrace(void *data, unsigned long address)
+static int __s390_backtrace(void *data, unsigned long address, int reliable)
{
unsigned int *depth = data;
diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c
index 55836188b217..4f7314d5f334 100644
--- a/arch/score/kernel/ptrace.c
+++ b/arch/score/kernel/ptrace.c
@@ -131,7 +131,7 @@ read_tsk_long(struct task_struct *child,
{
int copied;
- copied = access_process_vm(child, addr, res, sizeof(*res), 0);
+ copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
return copied != sizeof(*res) ? -EIO : 0;
}
@@ -142,7 +142,7 @@ read_tsk_short(struct task_struct *child,
{
int copied;
- copied = access_process_vm(child, addr, res, sizeof(*res), 0);
+ copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
return copied != sizeof(*res) ? -EIO : 0;
}
@@ -153,7 +153,8 @@ write_tsk_short(struct task_struct *child,
{
int copied;
- copied = access_process_vm(child, addr, &val, sizeof(val), 1);
+ copied = access_process_vm(child, addr, &val, sizeof(val),
+ FOLL_FORCE | FOLL_WRITE);
return copied != sizeof(val) ? -EIO : 0;
}
@@ -164,7 +165,8 @@ write_tsk_long(struct task_struct *child,
{
int copied;
- copied = access_process_vm(child, addr, &val, sizeof(val), 1);
+ copied = access_process_vm(child, addr, &val, sizeof(val),
+ FOLL_FORCE | FOLL_WRITE);
return copied != sizeof(val) ? -EIO : 0;
}
diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c
index 1517a7dcd6d9..5cea1e750cec 100644
--- a/arch/score/kernel/traps.c
+++ b/arch/score/kernel/traps.c
@@ -29,6 +29,7 @@
#include <asm/cacheflush.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
+#include <asm/uaccess.h>
unsigned long exception_handlers[32];
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 00476662ac2c..336f33a419d9 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -31,7 +31,7 @@ isa-y := $(isa-y)-up
endif
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
-cflags-$(CONFIG_CPU_J2) := $(call cc-option,-mj2,)
+cflags-$(CONFIG_CPU_J2) += $(call cc-option,-mj2,)
cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
$(call cc-option,-m2a-nofpu,) \
$(call cc-option,-m4-nofpu,)
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index e9c2c42031fe..4e21949593cf 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -22,6 +22,16 @@ config SH_DEVICE_TREE
have sufficient driver coverage to use this option; do not
select it if you are using original SuperH hardware.
+config SH_JCORE_SOC
+ bool "J-Core SoC"
+ depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2)
+ select CLKSRC_JCORE_PIT
+ select JCORE_AIC
+ default y if CPU_J2
+ help
+ Select this option to include drivers core components of the
+ J-Core SoC, including interrupt controllers and timers.
+
config SH_SOLUTION_ENGINE
bool "SolutionEngine"
select SOLUTION_ENGINE
diff --git a/arch/sh/configs/j2_defconfig b/arch/sh/configs/j2_defconfig
index 94d1eca52f72..2eb81ebe3888 100644
--- a/arch/sh/configs/j2_defconfig
+++ b/arch/sh/configs/j2_defconfig
@@ -8,6 +8,7 @@ CONFIG_MEMORY_START=0x10000000
CONFIG_MEMORY_SIZE=0x04000000
CONFIG_CPU_BIG_ENDIAN=y
CONFIG_SH_DEVICE_TREE=y
+CONFIG_SH_JCORE_SOC=y
CONFIG_HZ_100=y
CONFIG_CMDLINE_OVERWRITE=y
CONFIG_CMDLINE="console=ttyUL0 earlycon"
@@ -20,6 +21,7 @@ CONFIG_INET=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_NETDEVICES=y
+CONFIG_SERIAL_EARLYCON=y
CONFIG_SERIAL_UARTLITE=y
CONFIG_SERIAL_UARTLITE_CONSOLE=y
CONFIG_I2C=y
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
index 40fa6c8adc43..063c298ba56c 100644
--- a/arch/sh/mm/gup.c
+++ b/arch/sh/mm/gup.c
@@ -258,7 +258,8 @@ slow_irqon:
pages += nr;
ret = get_user_pages_unlocked(start,
- (end - start) >> PAGE_SHIFT, write, 0, pages);
+ (end - start) >> PAGE_SHIFT, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 6024c26c0585..cfc918067f80 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += cputime.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h
+generic-y += export.h
generic-y += irq_regs.h
generic-y += irq_work.h
generic-y += linkage.h
diff --git a/arch/sparc/include/asm/string.h b/arch/sparc/include/asm/string.h
index 98b72a0c8e6e..86f34be14ce0 100644
--- a/arch/sparc/include/asm/string.h
+++ b/arch/sparc/include/asm/string.h
@@ -5,4 +5,38 @@
#else
#include <asm/string_32.h>
#endif
+
+/* First the mem*() things. */
+#define __HAVE_ARCH_MEMMOVE
+void *memmove(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMCPY
+#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
+
+#define __HAVE_ARCH_MEMSET
+#define memset(s, c, count) __builtin_memset(s, c, count)
+
+#define __HAVE_ARCH_MEMSCAN
+
+#define memscan(__arg0, __char, __arg2) \
+({ \
+ void *__memscan_zero(void *, size_t); \
+ void *__memscan_generic(void *, int, size_t); \
+ void *__retval, *__addr = (__arg0); \
+ size_t __size = (__arg2); \
+ \
+ if(__builtin_constant_p(__char) && !(__char)) \
+ __retval = __memscan_zero(__addr, __size); \
+ else \
+ __retval = __memscan_generic(__addr, (__char), __size); \
+ \
+ __retval; \
+})
+
+#define __HAVE_ARCH_MEMCMP
+int memcmp(const void *,const void *,__kernel_size_t);
+
+#define __HAVE_ARCH_STRNCMP
+int strncmp(const char *, const char *, __kernel_size_t);
+
#endif
diff --git a/arch/sparc/include/asm/string_32.h b/arch/sparc/include/asm/string_32.h
index 69974e924611..649412476a69 100644
--- a/arch/sparc/include/asm/string_32.h
+++ b/arch/sparc/include/asm/string_32.h
@@ -11,60 +11,4 @@
#include <asm/page.h>
-/* Really, userland/ksyms should not see any of this stuff. */
-
-#ifdef __KERNEL__
-
-void __memmove(void *,const void *,__kernel_size_t);
-
-#ifndef EXPORT_SYMTAB_STROPS
-
-/* First the mem*() things. */
-#define __HAVE_ARCH_MEMMOVE
-#undef memmove
-#define memmove(_to, _from, _n) \
-({ \
- void *_t = (_to); \
- __memmove(_t, (_from), (_n)); \
- _t; \
-})
-
-#define __HAVE_ARCH_MEMCPY
-#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
-
-#define __HAVE_ARCH_MEMSET
-#define memset(s, c, count) __builtin_memset(s, c, count)
-
-#define __HAVE_ARCH_MEMSCAN
-
-#undef memscan
-#define memscan(__arg0, __char, __arg2) \
-({ \
- void *__memscan_zero(void *, size_t); \
- void *__memscan_generic(void *, int, size_t); \
- void *__retval, *__addr = (__arg0); \
- size_t __size = (__arg2); \
- \
- if(__builtin_constant_p(__char) && !(__char)) \
- __retval = __memscan_zero(__addr, __size); \
- else \
- __retval = __memscan_generic(__addr, (__char), __size); \
- \
- __retval; \
-})
-
-#define __HAVE_ARCH_MEMCMP
-int memcmp(const void *,const void *,__kernel_size_t);
-
-/* Now the str*() stuff... */
-#define __HAVE_ARCH_STRLEN
-__kernel_size_t strlen(const char *);
-
-#define __HAVE_ARCH_STRNCMP
-int strncmp(const char *, const char *, __kernel_size_t);
-
-#endif /* !EXPORT_SYMTAB_STROPS */
-
-#endif /* __KERNEL__ */
-
#endif /* !(__SPARC_STRING_H__) */
diff --git a/arch/sparc/include/asm/string_64.h b/arch/sparc/include/asm/string_64.h
index 5936b8ff3c05..6b9ccb308605 100644
--- a/arch/sparc/include/asm/string_64.h
+++ b/arch/sparc/include/asm/string_64.h
@@ -9,54 +9,10 @@
#ifndef __SPARC64_STRING_H__
#define __SPARC64_STRING_H__
-/* Really, userland/ksyms should not see any of this stuff. */
-
-#ifdef __KERNEL__
-
#include <asm/asi.h>
-#ifndef EXPORT_SYMTAB_STROPS
-
-/* First the mem*() things. */
-#define __HAVE_ARCH_MEMMOVE
-void *memmove(void *, const void *, __kernel_size_t);
-
-#define __HAVE_ARCH_MEMCPY
-#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
-
-#define __HAVE_ARCH_MEMSET
-#define memset(s, c, count) __builtin_memset(s, c, count)
-
-#define __HAVE_ARCH_MEMSCAN
-
-#undef memscan
-#define memscan(__arg0, __char, __arg2) \
-({ \
- void *__memscan_zero(void *, size_t); \
- void *__memscan_generic(void *, int, size_t); \
- void *__retval, *__addr = (__arg0); \
- size_t __size = (__arg2); \
- \
- if(__builtin_constant_p(__char) && !(__char)) \
- __retval = __memscan_zero(__addr, __size); \
- else \
- __retval = __memscan_generic(__addr, (__char), __size); \
- \
- __retval; \
-})
-
-#define __HAVE_ARCH_MEMCMP
-int memcmp(const void *,const void *,__kernel_size_t);
-
/* Now the str*() stuff... */
#define __HAVE_ARCH_STRLEN
__kernel_size_t strlen(const char *);
-#define __HAVE_ARCH_STRNCMP
-int strncmp(const char *, const char *, __kernel_size_t);
-
-#endif /* !EXPORT_SYMTAB_STROPS */
-
-#endif /* __KERNEL__ */
-
#endif /* !(__SPARC64_STRING_H__) */
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index fdb13327fded..fa3c02d41138 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -86,7 +86,7 @@ obj-y += auxio_$(BITS).o
obj-$(CONFIG_SUN_PM) += apc.o pmc.o
obj-$(CONFIG_MODULES) += module.o
-obj-$(CONFIG_MODULES) += sparc_ksyms_$(BITS).o
+obj-$(CONFIG_MODULES) += sparc_ksyms.o
obj-$(CONFIG_SPARC_LED) += led.o
obj-$(CONFIG_KGDB) += kgdb_$(BITS).o
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 07918ab3062e..d85bdb999819 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -29,6 +29,7 @@
#include <asm/unistd.h>
#include <asm/asmmacro.h>
+#include <asm/export.h>
#define curptr g6
@@ -1207,6 +1208,8 @@ delay_continue:
ret
restore
+EXPORT_SYMBOL(__udelay)
+EXPORT_SYMBOL(__ndelay)
/* Handle a software breakpoint */
/* We have to inform parent that child has stopped */
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index 3d92c0a8f6c4..7bb317b87dde 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -24,6 +24,7 @@
#include <asm/thread_info.h> /* TI_UWINMASK */
#include <asm/errno.h>
#include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */
+#include <asm/export.h>
.data
/* The following are used with the prom_vector node-ops to figure out
@@ -60,6 +61,7 @@ sun4e_notsup:
*/
.globl empty_zero_page
empty_zero_page: .skip PAGE_SIZE
+EXPORT_SYMBOL(empty_zero_page)
.global root_flags
.global ram_flags
@@ -813,3 +815,4 @@ lvl14_save:
__ret_efault:
ret
restore %g0, -EFAULT, %o0
+EXPORT_SYMBOL(__ret_efault)
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index a076b4249e62..beba6c11554c 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -32,7 +32,8 @@
#include <asm/estate.h>
#include <asm/sfafsr.h>
#include <asm/unistd.h>
-
+#include <asm/export.h>
+
/* This section from from _start to sparc64_boot_end should fit into
* 0x0000000000404000 to 0x0000000000408000.
*/
@@ -143,6 +144,7 @@ prom_cpu_compatible:
.skip 64
prom_root_node:
.word 0
+EXPORT_SYMBOL(prom_root_node)
prom_mmu_ihandle_cache:
.word 0
prom_boot_mapped_pc:
@@ -158,6 +160,7 @@ is_sun4v:
.word 0
sun4v_chip_type:
.word SUN4V_CHIP_INVALID
+EXPORT_SYMBOL(sun4v_chip_type)
1:
rd %pc, %l0
@@ -920,6 +923,7 @@ swapper_4m_tsb:
.globl prom_tba, tlb_type
prom_tba: .xword 0
tlb_type: .word 0 /* Must NOT end up in BSS */
+EXPORT_SYMBOL(tlb_type)
.section ".fixup",#alloc,#execinstr
.globl __ret_efault, __retl_efault, __ret_one, __retl_one
@@ -927,6 +931,7 @@ ENTRY(__ret_efault)
ret
restore %g0, -EFAULT, %o0
ENDPROC(__ret_efault)
+EXPORT_SYMBOL(__ret_efault)
ENTRY(__retl_efault)
retl
diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S
index 314dd0c9fc5b..e4e5b832fcb6 100644
--- a/arch/sparc/kernel/helpers.S
+++ b/arch/sparc/kernel/helpers.S
@@ -15,6 +15,7 @@ __flushw_user:
2: retl
nop
.size __flushw_user,.-__flushw_user
+EXPORT_SYMBOL(__flushw_user)
/* Flush %fp and %i7 to the stack for all register
* windows active inside of the cpu. This allows
@@ -61,3 +62,4 @@ real_hard_smp_processor_id:
.size hard_smp_processor_id,.-hard_smp_processor_id
#endif
.size real_hard_smp_processor_id,.-real_hard_smp_processor_id
+EXPORT_SYMBOL_GPL(real_hard_smp_processor_id)
diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S
index d127130bf424..4116ee5c7791 100644
--- a/arch/sparc/kernel/hvcalls.S
+++ b/arch/sparc/kernel/hvcalls.S
@@ -343,6 +343,7 @@ ENTRY(sun4v_mach_set_watchdog)
0: retl
nop
ENDPROC(sun4v_mach_set_watchdog)
+EXPORT_SYMBOL(sun4v_mach_set_watchdog)
/* No inputs and does not return. */
ENTRY(sun4v_mach_sir)
@@ -776,6 +777,7 @@ ENTRY(sun4v_niagara_getperf)
retl
nop
ENDPROC(sun4v_niagara_getperf)
+EXPORT_SYMBOL(sun4v_niagara_getperf)
ENTRY(sun4v_niagara_setperf)
mov HV_FAST_SET_PERFREG, %o5
@@ -783,6 +785,7 @@ ENTRY(sun4v_niagara_setperf)
retl
nop
ENDPROC(sun4v_niagara_setperf)
+EXPORT_SYMBOL(sun4v_niagara_setperf)
ENTRY(sun4v_niagara2_getperf)
mov %o0, %o4
@@ -792,6 +795,7 @@ ENTRY(sun4v_niagara2_getperf)
retl
nop
ENDPROC(sun4v_niagara2_getperf)
+EXPORT_SYMBOL(sun4v_niagara2_getperf)
ENTRY(sun4v_niagara2_setperf)
mov HV_FAST_N2_SET_PERFREG, %o5
@@ -799,6 +803,7 @@ ENTRY(sun4v_niagara2_setperf)
retl
nop
ENDPROC(sun4v_niagara2_setperf)
+EXPORT_SYMBOL(sun4v_niagara2_setperf)
ENTRY(sun4v_reboot_data_set)
mov HV_FAST_REBOOT_DATA_SET, %o5
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 9ddc4928a089..ac082dd8c67d 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -127,7 +127,8 @@ static int get_from_target(struct task_struct *target, unsigned long uaddr,
if (copy_from_user(kbuf, (void __user *) uaddr, len))
return -EFAULT;
} else {
- int len2 = access_process_vm(target, uaddr, kbuf, len, 0);
+ int len2 = access_process_vm(target, uaddr, kbuf, len,
+ FOLL_FORCE);
if (len2 != len)
return -EFAULT;
}
@@ -141,7 +142,8 @@ static int set_to_target(struct task_struct *target, unsigned long uaddr,
if (copy_to_user((void __user *) uaddr, kbuf, len))
return -EFAULT;
} else {
- int len2 = access_process_vm(target, uaddr, kbuf, len, 1);
+ int len2 = access_process_vm(target, uaddr, kbuf, len,
+ FOLL_FORCE | FOLL_WRITE);
if (len2 != len)
return -EFAULT;
}
@@ -505,7 +507,8 @@ static int genregs32_get(struct task_struct *target,
if (access_process_vm(target,
(unsigned long)
&reg_window[pos],
- k, sizeof(*k), 0)
+ k, sizeof(*k),
+ FOLL_FORCE)
!= sizeof(*k))
return -EFAULT;
k++;
@@ -531,12 +534,14 @@ static int genregs32_get(struct task_struct *target,
if (access_process_vm(target,
(unsigned long)
&reg_window[pos],
- &reg, sizeof(reg), 0)
+ &reg, sizeof(reg),
+ FOLL_FORCE)
!= sizeof(reg))
return -EFAULT;
if (access_process_vm(target,
(unsigned long) u,
- &reg, sizeof(reg), 1)
+ &reg, sizeof(reg),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(reg))
return -EFAULT;
pos++;
@@ -615,7 +620,8 @@ static int genregs32_set(struct task_struct *target,
(unsigned long)
&reg_window[pos],
(void *) k,
- sizeof(*k), 1)
+ sizeof(*k),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(*k))
return -EFAULT;
k++;
@@ -642,13 +648,15 @@ static int genregs32_set(struct task_struct *target,
if (access_process_vm(target,
(unsigned long)
u,
- &reg, sizeof(reg), 0)
+ &reg, sizeof(reg),
+ FOLL_FORCE)
!= sizeof(reg))
return -EFAULT;
if (access_process_vm(target,
(unsigned long)
&reg_window[pos],
- &reg, sizeof(reg), 1)
+ &reg, sizeof(reg),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(reg))
return -EFAULT;
pos++;
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
new file mode 100644
index 000000000000..09aa69e422e5
--- /dev/null
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -0,0 +1,12 @@
+/*
+ * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+
+/* This is needed only for drivers/sbus/char/openprom.c */
+EXPORT_SYMBOL(saved_command_line);
diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c
deleted file mode 100644
index bf4ccb10a78c..000000000000
--- a/arch/sparc/kernel/sparc_ksyms_32.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
- *
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
- */
-
-#include <linux/module.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/delay.h>
-#include <asm/head.h>
-#include <asm/dma.h>
-
-struct poll {
- int fd;
- short events;
- short revents;
-};
-
-/* from entry.S */
-EXPORT_SYMBOL(__udelay);
-EXPORT_SYMBOL(__ndelay);
-
-/* from head_32.S */
-EXPORT_SYMBOL(__ret_efault);
-EXPORT_SYMBOL(empty_zero_page);
-
-/* Exporting a symbol from /init/main.c */
-EXPORT_SYMBOL(saved_command_line);
diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c
deleted file mode 100644
index 9e034f29dcc5..000000000000
--- a/arch/sparc/kernel/sparc_ksyms_64.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
- *
- * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
- * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
- */
-
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/bitops.h>
-
-#include <asm/cpudata.h>
-#include <asm/uaccess.h>
-#include <asm/spitfire.h>
-#include <asm/oplib.h>
-#include <asm/hypervisor.h>
-#include <asm/cacheflush.h>
-
-struct poll {
- int fd;
- short events;
- short revents;
-};
-
-/* from helpers.S */
-EXPORT_SYMBOL(__flushw_user);
-EXPORT_SYMBOL_GPL(real_hard_smp_processor_id);
-
-/* from head_64.S */
-EXPORT_SYMBOL(__ret_efault);
-EXPORT_SYMBOL(tlb_type);
-EXPORT_SYMBOL(sun4v_chip_type);
-EXPORT_SYMBOL(prom_root_node);
-
-/* from hvcalls.S */
-EXPORT_SYMBOL(sun4v_niagara_getperf);
-EXPORT_SYMBOL(sun4v_niagara_setperf);
-EXPORT_SYMBOL(sun4v_niagara2_getperf);
-EXPORT_SYMBOL(sun4v_niagara2_setperf);
-EXPORT_SYMBOL(sun4v_mach_set_watchdog);
-
-/* from hweight.S */
-EXPORT_SYMBOL(__arch_hweight8);
-EXPORT_SYMBOL(__arch_hweight16);
-EXPORT_SYMBOL(__arch_hweight32);
-EXPORT_SYMBOL(__arch_hweight64);
-
-/* from ffs_ffz.S */
-EXPORT_SYMBOL(ffs);
-EXPORT_SYMBOL(__ffs);
-
-/* Exporting a symbol from /init/main.c */
-EXPORT_SYMBOL(saved_command_line);
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 3269b0234093..885f00e81d1a 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -43,5 +43,4 @@ lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
obj-$(CONFIG_SPARC64) += iomap.o
obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
-obj-y += ksyms.o
obj-$(CONFIG_SPARC64) += PeeCeeI.o
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index 3e6209ebb7d7..97e1b211090c 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -7,6 +7,7 @@
#ifdef __KERNEL__
#include <asm/visasm.h>
#include <asm/asi.h>
+#include <asm/export.h>
#define GLOBAL_SPARE g7
#else
#define GLOBAL_SPARE g5
@@ -567,3 +568,4 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
mov EX_RETVAL(%o4), %o0
.size FUNC_NAME, .-FUNC_NAME
+EXPORT_SYMBOL(FUNC_NAME)
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index 62c2647bd5ce..1c7b6a39b942 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -13,6 +13,7 @@
#include <asm/ptrace.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
+#include <asm/export.h>
/* On entry: %o5=current FPRS value, %g7 is callers address */
/* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
@@ -79,3 +80,4 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
80: jmpl %g7 + %g0, %g0
nop
ENDPROC(VISenter)
+EXPORT_SYMBOL(VISenter)
diff --git a/arch/sparc/lib/ashldi3.S b/arch/sparc/lib/ashldi3.S
index 86f60de07b0a..c8b1cf71bc73 100644
--- a/arch/sparc/lib/ashldi3.S
+++ b/arch/sparc/lib/ashldi3.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
+#include <asm/export.h>
.text
ENTRY(__ashldi3)
@@ -33,3 +34,4 @@ ENTRY(__ashldi3)
retl
nop
ENDPROC(__ashldi3)
+EXPORT_SYMBOL(__ashldi3)
diff --git a/arch/sparc/lib/ashrdi3.S b/arch/sparc/lib/ashrdi3.S
index 6eb8ba2dd50e..4310256e7964 100644
--- a/arch/sparc/lib/ashrdi3.S
+++ b/arch/sparc/lib/ashrdi3.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
+#include <asm/export.h>
.text
ENTRY(__ashrdi3)
@@ -35,3 +36,4 @@ ENTRY(__ashrdi3)
jmpl %o7 + 8, %g0
nop
ENDPROC(__ashrdi3)
+EXPORT_SYMBOL(__ashrdi3)
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index a5c5a0279ccc..1c6a1bde5138 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -6,6 +6,7 @@
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
+#include <asm/export.h>
.text
@@ -29,6 +30,7 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_##op); \
+EXPORT_SYMBOL(atomic_##op);
#define ATOMIC_OP_RETURN(op) \
ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
@@ -42,7 +44,8 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_##op##_return);
+ENDPROC(atomic_##op##_return); \
+EXPORT_SYMBOL(atomic_##op##_return);
#define ATOMIC_FETCH_OP(op) \
ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
@@ -56,7 +59,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_fetch_##op);
+ENDPROC(atomic_fetch_##op); \
+EXPORT_SYMBOL(atomic_fetch_##op);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
@@ -88,6 +92,7 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_##op); \
+EXPORT_SYMBOL(atomic64_##op);
#define ATOMIC64_OP_RETURN(op) \
ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
@@ -101,7 +106,8 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
op %g1, %o0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_##op##_return);
+ENDPROC(atomic64_##op##_return); \
+EXPORT_SYMBOL(atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op) \
ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
@@ -115,7 +121,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
mov %g1, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_fetch_##op);
+ENDPROC(atomic64_fetch_##op); \
+EXPORT_SYMBOL(atomic64_fetch_##op);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
@@ -147,3 +154,4 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
sub %g1, 1, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
ENDPROC(atomic64_dec_if_positive)
+EXPORT_SYMBOL(atomic64_dec_if_positive)
diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S
index 36f72cc0e67e..7031bf1587cb 100644
--- a/arch/sparc/lib/bitops.S
+++ b/arch/sparc/lib/bitops.S
@@ -6,6 +6,7 @@
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
+#include <asm/export.h>
.text
@@ -29,6 +30,7 @@ ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(test_and_set_bit)
+EXPORT_SYMBOL(test_and_set_bit)
ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
@@ -50,6 +52,7 @@ ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(test_and_clear_bit)
+EXPORT_SYMBOL(test_and_clear_bit)
ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
@@ -71,6 +74,7 @@ ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(test_and_change_bit)
+EXPORT_SYMBOL(test_and_change_bit)
ENTRY(set_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
@@ -90,6 +94,7 @@ ENTRY(set_bit) /* %o0=nr, %o1=addr */
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(set_bit)
+EXPORT_SYMBOL(set_bit)
ENTRY(clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
@@ -109,6 +114,7 @@ ENTRY(clear_bit) /* %o0=nr, %o1=addr */
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(clear_bit)
+EXPORT_SYMBOL(clear_bit)
ENTRY(change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
@@ -128,3 +134,4 @@ ENTRY(change_bit) /* %o0=nr, %o1=addr */
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
ENDPROC(change_bit)
+EXPORT_SYMBOL(change_bit)
diff --git a/arch/sparc/lib/blockops.S b/arch/sparc/lib/blockops.S
index 3c771011ff4b..1f2692d59d18 100644
--- a/arch/sparc/lib/blockops.S
+++ b/arch/sparc/lib/blockops.S
@@ -6,6 +6,7 @@
#include <linux/linkage.h>
#include <asm/page.h>
+#include <asm/export.h>
/* Zero out 64 bytes of memory at (buf + offset).
* Assumes %g1 contains zero.
@@ -64,6 +65,7 @@ ENTRY(bzero_1page)
retl
nop
ENDPROC(bzero_1page)
+EXPORT_SYMBOL(bzero_1page)
ENTRY(__copy_1page)
/* NOTE: If you change the number of insns of this routine, please check
@@ -87,3 +89,4 @@ ENTRY(__copy_1page)
retl
nop
ENDPROC(__copy_1page)
+EXPORT_SYMBOL(__copy_1page)
diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S
index 8c058114b649..3bb1914c4fa4 100644
--- a/arch/sparc/lib/bzero.S
+++ b/arch/sparc/lib/bzero.S
@@ -5,6 +5,7 @@
*/
#include <linux/linkage.h>
+#include <asm/export.h>
.text
@@ -78,6 +79,8 @@ __bzero_done:
mov %o3, %o0
ENDPROC(__bzero)
ENDPROC(memset)
+EXPORT_SYMBOL(__bzero)
+EXPORT_SYMBOL(memset)
#define EX_ST(x,y) \
98: x,y; \
@@ -143,3 +146,4 @@ __clear_user_done:
retl
clr %o0
ENDPROC(__clear_user)
+EXPORT_SYMBOL(__clear_user)
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 0084c3361e15..c9d8b6232111 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -14,6 +14,7 @@
*/
#include <asm/errno.h>
+#include <asm/export.h>
#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \
ldd [buf + offset + 0x00], t0; \
@@ -104,6 +105,7 @@ csum_partial_fix_alignment:
* buffer of size 0x20. Follow the code path for that case.
*/
.globl csum_partial
+ EXPORT_SYMBOL(csum_partial)
csum_partial: /* %o0=buf, %o1=len, %o2=sum */
andcc %o0, 0x7, %g0 ! alignment problems?
bne csum_partial_fix_alignment ! yep, handle it
@@ -335,6 +337,7 @@ cc_dword_align:
*/
.align 8
.globl __csum_partial_copy_sparc_generic
+ EXPORT_SYMBOL(__csum_partial_copy_sparc_generic)
__csum_partial_copy_sparc_generic:
/* %o0=src, %o1=dest, %g1=len, %g7=sum */
xor %o0, %o1, %o4 ! get changing bits
diff --git a/arch/sparc/lib/checksum_64.S b/arch/sparc/lib/checksum_64.S
index 1d230f693dc4..f6732174fe6b 100644
--- a/arch/sparc/lib/checksum_64.S
+++ b/arch/sparc/lib/checksum_64.S
@@ -13,6 +13,7 @@
* BSD4.4 portable checksum routine
*/
+#include <asm/export.h>
.text
csum_partial_fix_alignment:
@@ -37,6 +38,7 @@ csum_partial_fix_alignment:
.align 32
.globl csum_partial
+ EXPORT_SYMBOL(csum_partial)
csum_partial: /* %o0=buff, %o1=len, %o2=sum */
prefetch [%o0 + 0x000], #n_reads
clr %o4
diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S
index 46272dfc26e8..f30d6b78afbd 100644
--- a/arch/sparc/lib/clear_page.S
+++ b/arch/sparc/lib/clear_page.S
@@ -10,6 +10,7 @@
#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
+#include <asm/export.h>
/* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then
@@ -26,6 +27,7 @@
.text
.globl _clear_page
+ EXPORT_SYMBOL(_clear_page)
_clear_page: /* %o0=dest */
ba,pt %xcc, clear_page_common
clr %o4
@@ -35,6 +37,7 @@ _clear_page: /* %o0=dest */
*/
.align 32
.globl clear_user_page
+ EXPORT_SYMBOL(clear_user_page)
clear_user_page: /* %o0=dest, %o1=vaddr */
lduw [%g6 + TI_PRE_COUNT], %o2
sethi %hi(PAGE_OFFSET), %g2
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 302c0e60dc2c..482de093bdae 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -5,6 +5,7 @@
#include <linux/linkage.h>
#include <asm/asi.h>
+#include <asm/export.h>
#define XCC xcc
@@ -90,3 +91,4 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */
retl
clr %o0
ENDPROC(___copy_in_user)
+EXPORT_SYMBOL(___copy_in_user)
diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S
index dd16c61f3263..7197b7250895 100644
--- a/arch/sparc/lib/copy_page.S
+++ b/arch/sparc/lib/copy_page.S
@@ -10,6 +10,7 @@
#include <asm/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
+#include <asm/export.h>
/* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then
@@ -44,6 +45,7 @@
.align 32
.globl copy_user_page
.type copy_user_page,#function
+ EXPORT_SYMBOL(copy_user_page)
copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
lduw [%g6 + TI_PRE_COUNT], %o4
sethi %hi(PAGE_OFFSET), %g2
diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S
index ef095b6c43b1..cea644dc67a6 100644
--- a/arch/sparc/lib/copy_user.S
+++ b/arch/sparc/lib/copy_user.S
@@ -15,6 +15,7 @@
#include <asm/asmmacro.h>
#include <asm/page.h>
#include <asm/thread_info.h>
+#include <asm/export.h>
/* Work around cpp -rob */
#define ALLOC #alloc
@@ -119,6 +120,7 @@
__copy_user_begin:
.globl __copy_user
+ EXPORT_SYMBOL(__copy_user)
dword_align:
andcc %o1, 1, %g0
be 4f
diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S
index e566c770a0f6..0ecbafc30fd0 100644
--- a/arch/sparc/lib/csum_copy.S
+++ b/arch/sparc/lib/csum_copy.S
@@ -3,6 +3,8 @@
* Copyright (C) 2005 David S. Miller <davem@davemloft.net>
*/
+#include <asm/export.h>
+
#ifdef __KERNEL__
#define GLOBAL_SPARE %g7
#else
@@ -63,6 +65,7 @@
add %o5, %o4, %o4
.globl FUNC_NAME
+ EXPORT_SYMBOL(FUNC_NAME)
FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */
LOAD(prefetch, %o0 + 0x000, #n_reads)
xor %o0, %o1, %g1
diff --git a/arch/sparc/lib/divdi3.S b/arch/sparc/lib/divdi3.S
index 9614b48b6ef8..a2b5a976be33 100644
--- a/arch/sparc/lib/divdi3.S
+++ b/arch/sparc/lib/divdi3.S
@@ -17,6 +17,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
+#include <asm/export.h>
.text
.align 4
.globl __divdi3
@@ -279,3 +280,4 @@ __divdi3:
.LL81:
ret
restore
+EXPORT_SYMBOL(__divdi3)
diff --git a/arch/sparc/lib/ffs.S b/arch/sparc/lib/ffs.S
index b39389f69899..23aab144d28e 100644
--- a/arch/sparc/lib/ffs.S
+++ b/arch/sparc/lib/ffs.S
@@ -1,4 +1,5 @@
#include <linux/linkage.h>
+#include <asm/export.h>
.register %g2,#scratch
@@ -65,6 +66,8 @@ ENTRY(__ffs)
add %o2, %g1, %o0
ENDPROC(ffs)
ENDPROC(__ffs)
+EXPORT_SYMBOL(__ffs)
+EXPORT_SYMBOL(ffs)
.section .popc_6insn_patch, "ax"
.word ffs
diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S
index 95414e0a6808..f9985f129fb6 100644
--- a/arch/sparc/lib/hweight.S
+++ b/arch/sparc/lib/hweight.S
@@ -1,4 +1,5 @@
#include <linux/linkage.h>
+#include <asm/export.h>
.text
.align 32
@@ -7,6 +8,7 @@ ENTRY(__arch_hweight8)
nop
nop
ENDPROC(__arch_hweight8)
+EXPORT_SYMBOL(__arch_hweight8)
.section .popc_3insn_patch, "ax"
.word __arch_hweight8
sllx %o0, 64-8, %g1
@@ -19,6 +21,7 @@ ENTRY(__arch_hweight16)
nop
nop
ENDPROC(__arch_hweight16)
+EXPORT_SYMBOL(__arch_hweight16)
.section .popc_3insn_patch, "ax"
.word __arch_hweight16
sllx %o0, 64-16, %g1
@@ -31,6 +34,7 @@ ENTRY(__arch_hweight32)
nop
nop
ENDPROC(__arch_hweight32)
+EXPORT_SYMBOL(__arch_hweight32)
.section .popc_3insn_patch, "ax"
.word __arch_hweight32
sllx %o0, 64-32, %g1
@@ -43,6 +47,7 @@ ENTRY(__arch_hweight64)
nop
nop
ENDPROC(__arch_hweight64)
+EXPORT_SYMBOL(__arch_hweight64)
.section .popc_3insn_patch, "ax"
.word __arch_hweight64
retl
diff --git a/arch/sparc/lib/ipcsum.S b/arch/sparc/lib/ipcsum.S
index 4742d59029ee..5d61648b53dd 100644
--- a/arch/sparc/lib/ipcsum.S
+++ b/arch/sparc/lib/ipcsum.S
@@ -1,4 +1,5 @@
#include <linux/linkage.h>
+#include <asm/export.h>
.text
ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
@@ -31,3 +32,4 @@ ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
retl
and %o2, %o1, %o0
ENDPROC(ip_fast_csum)
+EXPORT_SYMBOL(ip_fast_csum)
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
deleted file mode 100644
index de5e97817bdb..000000000000
--- a/arch/sparc/lib/ksyms.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Export of symbols defined in assembler
- */
-
-/* Tell string.h we don't want memcpy etc. as cpp defines */
-#define EXPORT_SYMTAB_STROPS
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/checksum.h>
-#include <asm/uaccess.h>
-#include <asm/ftrace.h>
-
-/* string functions */
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncmp);
-
-/* mem* functions */
-extern void *__memscan_zero(void *, size_t);
-extern void *__memscan_generic(void *, int, size_t);
-extern void *__bzero(void *, size_t);
-
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(__memscan_zero);
-EXPORT_SYMBOL(__memscan_generic);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(__bzero);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial);
-
-#ifdef CONFIG_MCOUNT
-EXPORT_SYMBOL(_mcount);
-#endif
-
-/*
- * sparc
- */
-#ifdef CONFIG_SPARC32
-extern int __ashrdi3(int, int);
-extern int __ashldi3(int, int);
-extern int __lshrdi3(int, int);
-extern int __muldi3(int, int);
-extern int __divdi3(int, int);
-
-extern void (*__copy_1page)(void *, const void *);
-extern void (*bzero_1page)(void *);
-
-extern void ___rw_read_enter(void);
-extern void ___rw_read_try(void);
-extern void ___rw_read_exit(void);
-extern void ___rw_write_enter(void);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
-
-/* Special internal versions of library functions. */
-EXPORT_SYMBOL(__copy_1page);
-EXPORT_SYMBOL(__memmove);
-EXPORT_SYMBOL(bzero_1page);
-
-/* Moving data to/from/in userspace. */
-EXPORT_SYMBOL(__copy_user);
-
-/* Used by asm/spinlock.h */
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(___rw_read_enter);
-EXPORT_SYMBOL(___rw_read_try);
-EXPORT_SYMBOL(___rw_read_exit);
-EXPORT_SYMBOL(___rw_write_enter);
-#endif
-
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__muldi3);
-EXPORT_SYMBOL(__divdi3);
-#endif
-
-/*
- * sparc64
- */
-#ifdef CONFIG_SPARC64
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(__csum_partial_copy_from_user);
-EXPORT_SYMBOL(__csum_partial_copy_to_user);
-EXPORT_SYMBOL(ip_fast_csum);
-
-/* Moving data to/from/in userspace. */
-EXPORT_SYMBOL(___copy_to_user);
-EXPORT_SYMBOL(___copy_from_user);
-EXPORT_SYMBOL(___copy_in_user);
-EXPORT_SYMBOL(__clear_user);
-
-/* Atomic counter implementation. */
-#define ATOMIC_OP(op) \
-EXPORT_SYMBOL(atomic_##op); \
-EXPORT_SYMBOL(atomic64_##op);
-
-#define ATOMIC_OP_RETURN(op) \
-EXPORT_SYMBOL(atomic_##op##_return); \
-EXPORT_SYMBOL(atomic64_##op##_return);
-
-#define ATOMIC_FETCH_OP(op) \
-EXPORT_SYMBOL(atomic_fetch_##op); \
-EXPORT_SYMBOL(atomic64_fetch_##op);
-
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
-
-ATOMIC_OPS(add)
-ATOMIC_OPS(sub)
-
-#undef ATOMIC_OPS
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
-
-ATOMIC_OPS(and)
-ATOMIC_OPS(or)
-ATOMIC_OPS(xor)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-EXPORT_SYMBOL(atomic64_dec_if_positive);
-
-/* Atomic bit operations. */
-EXPORT_SYMBOL(test_and_set_bit);
-EXPORT_SYMBOL(test_and_clear_bit);
-EXPORT_SYMBOL(test_and_change_bit);
-EXPORT_SYMBOL(set_bit);
-EXPORT_SYMBOL(clear_bit);
-EXPORT_SYMBOL(change_bit);
-
-/* Special internal versions of library functions. */
-EXPORT_SYMBOL(_clear_page);
-EXPORT_SYMBOL(clear_user_page);
-EXPORT_SYMBOL(copy_user_page);
-
-/* RAID code needs this */
-void VISenter(void);
-EXPORT_SYMBOL(VISenter);
-
-extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
-extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
-extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
-EXPORT_SYMBOL(xor_vis_2);
-EXPORT_SYMBOL(xor_vis_3);
-EXPORT_SYMBOL(xor_vis_4);
-EXPORT_SYMBOL(xor_vis_5);
-
-extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
-extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
-extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
-
-EXPORT_SYMBOL(xor_niagara_2);
-EXPORT_SYMBOL(xor_niagara_3);
-EXPORT_SYMBOL(xor_niagara_4);
-EXPORT_SYMBOL(xor_niagara_5);
-#endif
diff --git a/arch/sparc/lib/locks.S b/arch/sparc/lib/locks.S
index 64f53f2b673d..f38c4e59d078 100644
--- a/arch/sparc/lib/locks.S
+++ b/arch/sparc/lib/locks.S
@@ -10,6 +10,7 @@
#include <asm/psr.h>
#include <asm/smp.h>
#include <asm/spinlock.h>
+#include <asm/export.h>
.text
.align 4
@@ -48,6 +49,7 @@ ___rw_write_enter_spin_on_wlock:
ld [%g1], %g2
.globl ___rw_read_enter
+EXPORT_SYMBOL(___rw_read_enter)
___rw_read_enter:
orcc %g2, 0x0, %g0
bne,a ___rw_read_enter_spin_on_wlock
@@ -59,6 +61,7 @@ ___rw_read_enter:
mov %g4, %o7
.globl ___rw_read_exit
+EXPORT_SYMBOL(___rw_read_exit)
___rw_read_exit:
orcc %g2, 0x0, %g0
bne,a ___rw_read_exit_spin_on_wlock
@@ -70,6 +73,7 @@ ___rw_read_exit:
mov %g4, %o7
.globl ___rw_read_try
+EXPORT_SYMBOL(___rw_read_try)
___rw_read_try:
orcc %g2, 0x0, %g0
bne ___rw_read_try_spin_on_wlock
@@ -81,6 +85,7 @@ ___rw_read_try:
mov %g4, %o7
.globl ___rw_write_enter
+EXPORT_SYMBOL(___rw_write_enter)
___rw_write_enter:
orcc %g2, 0x0, %g0
bne ___rw_write_enter_spin_on_wlock
diff --git a/arch/sparc/lib/lshrdi3.S b/arch/sparc/lib/lshrdi3.S
index 60ebc7cdbee0..c9b9373f8d81 100644
--- a/arch/sparc/lib/lshrdi3.S
+++ b/arch/sparc/lib/lshrdi3.S
@@ -1,4 +1,5 @@
#include <linux/linkage.h>
+#include <asm/export.h>
ENTRY(__lshrdi3)
cmp %o2, 0
@@ -25,3 +26,4 @@ ENTRY(__lshrdi3)
retl
nop
ENDPROC(__lshrdi3)
+EXPORT_SYMBOL(__lshrdi3)
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index 0b0ed4d34219..194f383611c0 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
+#include <asm/export.h>
/*
* This is the main variant and is called by C code. GCC's -pg option
@@ -16,6 +17,7 @@
.align 32
.globl _mcount
.type _mcount,#function
+ EXPORT_SYMBOL(_mcount)
.globl mcount
.type mcount,#function
_mcount:
diff --git a/arch/sparc/lib/memcmp.S b/arch/sparc/lib/memcmp.S
index efa106c41ed0..cee7f30dbb61 100644
--- a/arch/sparc/lib/memcmp.S
+++ b/arch/sparc/lib/memcmp.S
@@ -6,6 +6,7 @@
#include <linux/linkage.h>
#include <asm/asm.h>
+#include <asm/export.h>
.text
ENTRY(memcmp)
@@ -25,3 +26,4 @@ ENTRY(memcmp)
2: retl
mov 0, %o0
ENDPROC(memcmp)
+EXPORT_SYMBOL(memcmp)
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
index 4d8c497517bd..8913feaa7ac7 100644
--- a/arch/sparc/lib/memcpy.S
+++ b/arch/sparc/lib/memcpy.S
@@ -7,6 +7,7 @@
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <asm/export.h>
#define FUNC(x) \
.globl x; \
.type x,@function; \
@@ -58,93 +59,11 @@ x:
stb %t0, [%dst - (offset) - 0x02]; \
stb %t1, [%dst - (offset) - 0x01];
-/* Both these macros have to start with exactly the same insn */
-#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src - (offset) - 0x20], %t0; \
- ldd [%src - (offset) - 0x18], %t2; \
- ldd [%src - (offset) - 0x10], %t4; \
- ldd [%src - (offset) - 0x08], %t6; \
- st %t0, [%dst - (offset) - 0x20]; \
- st %t1, [%dst - (offset) - 0x1c]; \
- st %t2, [%dst - (offset) - 0x18]; \
- st %t3, [%dst - (offset) - 0x14]; \
- st %t4, [%dst - (offset) - 0x10]; \
- st %t5, [%dst - (offset) - 0x0c]; \
- st %t6, [%dst - (offset) - 0x08]; \
- st %t7, [%dst - (offset) - 0x04];
-
-#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src - (offset) - 0x20], %t0; \
- ldd [%src - (offset) - 0x18], %t2; \
- ldd [%src - (offset) - 0x10], %t4; \
- ldd [%src - (offset) - 0x08], %t6; \
- std %t0, [%dst - (offset) - 0x20]; \
- std %t2, [%dst - (offset) - 0x18]; \
- std %t4, [%dst - (offset) - 0x10]; \
- std %t6, [%dst - (offset) - 0x08];
-
-#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- st %t0, [%dst + (offset) + 0x00]; \
- st %t1, [%dst + (offset) + 0x04]; \
- st %t2, [%dst + (offset) + 0x08]; \
- st %t3, [%dst + (offset) + 0x0c];
-
-#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src + (offset) + 0x00], %t0; \
- ldub [%src + (offset) + 0x01], %t1; \
- stb %t0, [%dst + (offset) + 0x00]; \
- stb %t1, [%dst + (offset) + 0x01];
-
-#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- srl %t0, shir, %t5; \
- srl %t1, shir, %t6; \
- sll %t0, shil, %t0; \
- or %t5, %prev, %t5; \
- sll %t1, shil, %prev; \
- or %t6, %t0, %t0; \
- srl %t2, shir, %t1; \
- srl %t3, shir, %t6; \
- sll %t2, shil, %t2; \
- or %t1, %prev, %t1; \
- std %t4, [%dst + (offset) + (offset2) - 0x04]; \
- std %t0, [%dst + (offset) + (offset2) + 0x04]; \
- sll %t3, shil, %prev; \
- or %t6, %t2, %t4;
-
-#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- srl %t0, shir, %t4; \
- srl %t1, shir, %t5; \
- sll %t0, shil, %t6; \
- or %t4, %prev, %t0; \
- sll %t1, shil, %prev; \
- or %t5, %t6, %t1; \
- srl %t2, shir, %t4; \
- srl %t3, shir, %t5; \
- sll %t2, shil, %t6; \
- or %t4, %prev, %t2; \
- sll %t3, shil, %prev; \
- or %t5, %t6, %t3; \
- std %t0, [%dst + (offset) + (offset2) + 0x00]; \
- std %t2, [%dst + (offset) + (offset2) + 0x08];
-
.text
.align 4
-0:
- retl
- nop ! Only bcopy returns here and it retuns void...
-
-#ifdef __KERNEL__
-FUNC(amemmove)
-FUNC(__memmove)
-#endif
FUNC(memmove)
+EXPORT_SYMBOL(memmove)
cmp %o0, %o1
mov %o0, %g7
bleu 9f
@@ -202,6 +121,7 @@ FUNC(memmove)
add %o0, 2, %o0
FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
+EXPORT_SYMBOL(memcpy)
sub %o0, %o1, %o4
mov %o0, %g7
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
index 857ad4f8905f..012cdb6ca467 100644
--- a/arch/sparc/lib/memmove.S
+++ b/arch/sparc/lib/memmove.S
@@ -5,6 +5,7 @@
*/
#include <linux/linkage.h>
+#include <asm/export.h>
.text
ENTRY(memmove) /* o0=dst o1=src o2=len */
@@ -57,3 +58,4 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
stb %g7, [%o0 - 0x1]
ba,a,pt %xcc, 99b
ENDPROC(memmove)
+EXPORT_SYMBOL(memmove)
diff --git a/arch/sparc/lib/memscan_32.S b/arch/sparc/lib/memscan_32.S
index 4ff1657dfc24..51ce690c42a8 100644
--- a/arch/sparc/lib/memscan_32.S
+++ b/arch/sparc/lib/memscan_32.S
@@ -4,6 +4,8 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <asm/export.h>
+
/* In essence, this is just a fancy strlen. */
#define LO_MAGIC 0x01010101
@@ -13,6 +15,8 @@
.align 4
.globl __memscan_zero, __memscan_generic
.globl memscan
+EXPORT_SYMBOL(__memscan_zero)
+EXPORT_SYMBOL(__memscan_generic)
__memscan_zero:
/* %o0 = addr, %o1 = size */
cmp %o1, 0
diff --git a/arch/sparc/lib/memscan_64.S b/arch/sparc/lib/memscan_64.S
index 5686dfa5dc15..daa96f4b03e6 100644
--- a/arch/sparc/lib/memscan_64.S
+++ b/arch/sparc/lib/memscan_64.S
@@ -5,6 +5,8 @@
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
*/
+ #include <asm/export.h>
+
#define HI_MAGIC 0x8080808080808080
#define LO_MAGIC 0x0101010101010101
#define ASI_PL 0x88
@@ -13,6 +15,8 @@
.align 32
.globl __memscan_zero, __memscan_generic
.globl memscan
+ EXPORT_SYMBOL(__memscan_zero)
+ EXPORT_SYMBOL(__memscan_generic)
__memscan_zero:
/* %o0 = bufp, %o1 = size */
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index f75e6906df14..bb539b42b088 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -9,6 +9,7 @@
*/
#include <asm/ptrace.h>
+#include <asm/export.h>
/* Work around cpp -rob */
#define ALLOC #alloc
@@ -63,6 +64,8 @@ __bzero_begin:
.globl __bzero
.globl memset
+ EXPORT_SYMBOL(__bzero)
+ EXPORT_SYMBOL(memset)
.globl __memset_start, __memset_end
__memset_start:
memset:
diff --git a/arch/sparc/lib/muldi3.S b/arch/sparc/lib/muldi3.S
index 9794939d1c12..17a0f49aef3c 100644
--- a/arch/sparc/lib/muldi3.S
+++ b/arch/sparc/lib/muldi3.S
@@ -17,6 +17,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
+#include <asm/export.h>
.text
.align 4
.globl __muldi3
@@ -74,3 +75,4 @@ __muldi3:
add %l2, %l0, %i0
ret
restore %g0, %l3, %o1
+EXPORT_SYMBOL(__muldi3)
diff --git a/arch/sparc/lib/strlen.S b/arch/sparc/lib/strlen.S
index 536f83507fbf..ca0e7077e871 100644
--- a/arch/sparc/lib/strlen.S
+++ b/arch/sparc/lib/strlen.S
@@ -7,6 +7,7 @@
#include <linux/linkage.h>
#include <asm/asm.h>
+#include <asm/export.h>
#define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080
@@ -78,3 +79,4 @@ ENTRY(strlen)
retl
mov 2, %o0
ENDPROC(strlen)
+EXPORT_SYMBOL(strlen)
diff --git a/arch/sparc/lib/strncmp_32.S b/arch/sparc/lib/strncmp_32.S
index c0d1b568c1c5..e3fe014813af 100644
--- a/arch/sparc/lib/strncmp_32.S
+++ b/arch/sparc/lib/strncmp_32.S
@@ -4,6 +4,7 @@
*/
#include <linux/linkage.h>
+#include <asm/export.h>
.text
ENTRY(strncmp)
@@ -116,3 +117,4 @@ ENTRY(strncmp)
retl
sub %o3, %o0, %o0
ENDPROC(strncmp)
+EXPORT_SYMBOL(strncmp)
diff --git a/arch/sparc/lib/strncmp_64.S b/arch/sparc/lib/strncmp_64.S
index 0656627166f3..efb5f884330d 100644
--- a/arch/sparc/lib/strncmp_64.S
+++ b/arch/sparc/lib/strncmp_64.S
@@ -6,6 +6,7 @@
#include <linux/linkage.h>
#include <asm/asi.h>
+#include <asm/export.h>
.text
ENTRY(strncmp)
@@ -28,3 +29,4 @@ ENTRY(strncmp)
retl
clr %o0
ENDPROC(strncmp)
+EXPORT_SYMBOL(strncmp)
diff --git a/arch/sparc/lib/xor.S b/arch/sparc/lib/xor.S
index 2c05641c3263..45a49cb618b5 100644
--- a/arch/sparc/lib/xor.S
+++ b/arch/sparc/lib/xor.S
@@ -13,6 +13,7 @@
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
+#include <asm/export.h>
/*
* Requirements:
@@ -90,6 +91,7 @@ ENTRY(xor_vis_2)
retl
wr %g0, 0, %fprs
ENDPROC(xor_vis_2)
+EXPORT_SYMBOL(xor_vis_2)
ENTRY(xor_vis_3)
rd %fprs, %o5
@@ -156,6 +158,7 @@ ENTRY(xor_vis_3)
retl
wr %g0, 0, %fprs
ENDPROC(xor_vis_3)
+EXPORT_SYMBOL(xor_vis_3)
ENTRY(xor_vis_4)
rd %fprs, %o5
@@ -241,6 +244,7 @@ ENTRY(xor_vis_4)
retl
wr %g0, 0, %fprs
ENDPROC(xor_vis_4)
+EXPORT_SYMBOL(xor_vis_4)
ENTRY(xor_vis_5)
save %sp, -192, %sp
@@ -347,6 +351,7 @@ ENTRY(xor_vis_5)
ret
restore
ENDPROC(xor_vis_5)
+EXPORT_SYMBOL(xor_vis_5)
/* Niagara versions. */
ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
@@ -393,6 +398,7 @@ ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
ret
restore
ENDPROC(xor_niagara_2)
+EXPORT_SYMBOL(xor_niagara_2)
ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
save %sp, -192, %sp
@@ -454,6 +460,7 @@ ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
ret
restore
ENDPROC(xor_niagara_3)
+EXPORT_SYMBOL(xor_niagara_3)
ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
save %sp, -192, %sp
@@ -536,6 +543,7 @@ ENTRY(xor_niagara_4) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3 */
ret
restore
ENDPROC(xor_niagara_4)
+EXPORT_SYMBOL(xor_niagara_4)
ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=src4 */
save %sp, -192, %sp
@@ -634,3 +642,4 @@ ENTRY(xor_niagara_5) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2, %o4=src3, %o5=s
ret
restore
ENDPROC(xor_niagara_5)
+EXPORT_SYMBOL(xor_niagara_5)
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 4e06750a5d29..cd0e32bbcb1d 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -238,7 +238,8 @@ slow:
pages += nr;
ret = get_user_pages_unlocked(start,
- (end - start) >> PAGE_SHIFT, write, 0, pages);
+ (end - start) >> PAGE_SHIFT, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index 77f28ce9c646..9976fcecd17e 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -5,8 +5,8 @@
OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
-CFLAGS_syscall_64.o += -Wno-override-init
-CFLAGS_syscall_32.o += -Wno-override-init
+CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += common.o
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index b75a8bcd2d23..21b352a11b49 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -44,6 +44,7 @@
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
+#include <asm/export.h>
.section .entry.text, "ax"
@@ -991,6 +992,7 @@ trace:
jmp ftrace_stub
END(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
+EXPORT_SYMBOL(mcount)
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index c98ec2efd750..ef766a358b37 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -35,6 +35,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/pgtable_types.h>
+#include <asm/export.h>
#include <linux/err.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
@@ -875,6 +876,7 @@ ENTRY(native_load_gs_index)
popfq
ret
END(native_load_gs_index)
+EXPORT_SYMBOL(native_load_gs_index)
_ASM_EXTABLE(.Lgs_change, bad_gs)
.section .fixup, "ax"
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index ff6ef7b30822..2b3618542544 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -389,5 +389,3 @@
380 i386 pkey_mprotect sys_pkey_mprotect
381 i386 pkey_alloc sys_pkey_alloc
382 i386 pkey_free sys_pkey_free
-#383 i386 pkey_get sys_pkey_get
-#384 i386 pkey_set sys_pkey_set
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 2f024d02511d..e93ef0b38db8 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -338,8 +338,6 @@
329 common pkey_mprotect sys_pkey_mprotect
330 common pkey_alloc sys_pkey_alloc
331 common pkey_free sys_pkey_free
-#332 common pkey_get sys_pkey_get
-#333 common pkey_set sys_pkey_set
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S
index e5a17114a8c4..fee6bc79b987 100644
--- a/arch/x86/entry/thunk_32.S
+++ b/arch/x86/entry/thunk_32.S
@@ -6,6 +6,7 @@
*/
#include <linux/linkage.h>
#include <asm/asm.h>
+ #include <asm/export.h>
/* put return address in eax (arg1) */
.macro THUNK name, func, put_ret_addr_in_eax=0
@@ -36,5 +37,7 @@
#ifdef CONFIG_PREEMPT
THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
+ EXPORT_SYMBOL(___preempt_schedule)
+ EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index 627ecbcb2e62..be36bf4e0957 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include "calling.h"
#include <asm/asm.h>
+#include <asm/export.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
@@ -49,6 +50,8 @@
#ifdef CONFIG_PREEMPT
THUNK ___preempt_schedule, preempt_schedule
THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
+ EXPORT_SYMBOL(___preempt_schedule)
+ EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif
#if defined(CONFIG_TRACE_IRQFLAGS) \
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a3a9eb84b5cf..a74a2dbc0180 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3607,10 +3607,14 @@ __init int intel_pmu_init(void)
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
- * assume at least 3 events:
+ * assume at least 3 events, when not running in a hypervisor:
*/
- if (version > 1)
- x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
+ if (version > 1) {
+ int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);
+
+ x86_pmu.num_counters_fixed =
+ max((int)edx.split.num_counters_fixed, assume);
+ }
if (boot_cpu_has(X86_FEATURE_PDCM)) {
u64 capabilities;
@@ -3898,6 +3902,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_FAM6_XEON_PHI_KNL:
+ case INTEL_FAM6_XEON_PHI_KNM:
memcpy(hw_cache_event_ids,
slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs,
@@ -3912,7 +3917,7 @@ __init int intel_pmu_init(void)
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
- pr_cont("Knights Landing events, ");
+ pr_cont("Knights Landing/Mill events, ");
break;
case INTEL_FAM6_SKYLAKE_MOBILE:
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 3ca87b5a8677..4f5ac726335f 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -48,7 +48,8 @@
* Scope: Core
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
* perf code: 0x02
- * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
+ * SKL,KNL
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
@@ -56,15 +57,16 @@
* Scope: Core
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
- * Available model: SNB,IVB,HSW,BDW,SKL
+ * Available model: SNB,IVB,HSW,BDW,SKL,KNL
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
- * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL
* Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
- * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
+ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
+ * SKL,KNL
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
@@ -118,6 +120,7 @@ struct cstate_model {
/* Quirk flags */
#define SLM_PKG_C6_USE_C7_MSR (1UL << 0)
+#define KNL_CORE_C6_MSR (1UL << 1)
struct perf_cstate_msr {
u64 msr;
@@ -488,6 +491,18 @@ static const struct cstate_model slm_cstates __initconst = {
.quirks = SLM_PKG_C6_USE_C7_MSR,
};
+
+static const struct cstate_model knl_cstates __initconst = {
+ .core_events = BIT(PERF_CSTATE_CORE_C6_RES),
+
+ .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
+ BIT(PERF_CSTATE_PKG_C3_RES) |
+ BIT(PERF_CSTATE_PKG_C6_RES),
+ .quirks = KNL_CORE_C6_MSR,
+};
+
+
+
#define X86_CSTATES_MODEL(model, states) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
@@ -523,6 +538,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
+
+ X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
@@ -558,6 +575,11 @@ static int __init cstate_probe(const struct cstate_model *cm)
if (cm->quirks & SLM_PKG_C6_USE_C7_MSR)
pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
+ /* KNL has different MSR for CORE C6 */
+ if (cm->quirks & KNL_CORE_C6_MSR)
+ pkg_msr[PERF_CSTATE_CORE_C6_RES].msr = MSR_KNL_CORE_C6_RESIDENCY;
+
+
has_cstate_core = cstate_probe_msr(cm->core_events,
PERF_CSTATE_CORE_EVENT_MAX,
core_msr, core_events_attrs);
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index fc6cf21c535e..81b321ace8e0 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -458,8 +458,8 @@ void intel_pmu_lbr_del(struct perf_event *event)
if (!x86_pmu.lbr_nr)
return;
- if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
- event->ctx->task_ctx_data) {
+ if (branch_user_callstack(cpuc->br_sel) &&
+ event->ctx->task_ctx_data) {
task_ctx = event->ctx->task_ctx_data;
task_ctx->lbr_callstack_users--;
}
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index b0f0e835a770..0a535cea8ff3 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -763,6 +763,7 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index d9844cc74486..efca2685d876 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1349,6 +1349,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 1188bc849ee3..a39629206864 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -194,6 +194,8 @@
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/asm/export.h b/arch/x86/include/asm/export.h
new file mode 100644
index 000000000000..138de56b13eb
--- /dev/null
+++ b/arch/x86/include/asm/export.h
@@ -0,0 +1,4 @@
+#ifdef CONFIG_64BIT
+#define KSYM_ALIGN 16
+#endif
+#include <asm-generic/export.h>
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 9ae5ab80a497..34a46dc076d3 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -64,5 +64,6 @@
/* Xeon Phi */
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
+#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index de25aad07853..d34bd370074b 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -351,4 +351,10 @@ extern void arch_phys_wc_del(int handle);
#define arch_phys_wc_add arch_phys_wc_add
#endif
+#ifdef CONFIG_X86_PAT
+extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
+extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
+#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
+#endif
+
#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 56f4c6676b29..78f3760ca1f2 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -88,7 +88,6 @@
#define MSR_IA32_RTIT_CTL 0x00000570
#define MSR_IA32_RTIT_STATUS 0x00000571
-#define MSR_IA32_RTIT_STATUS 0x00000571
#define MSR_IA32_RTIT_ADDR0_A 0x00000580
#define MSR_IA32_RTIT_ADDR0_B 0x00000581
#define MSR_IA32_RTIT_ADDR1_A 0x00000582
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index e02e3f80d363..84f58de08c2b 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -521,7 +521,8 @@ do { \
static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
const unsigned long __percpu *addr)
{
- unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
+ unsigned long __percpu *a =
+ (unsigned long __percpu *)addr + nr / BITS_PER_LONG;
#ifdef CONFIG_X86_64
return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
@@ -538,7 +539,7 @@ static inline bool x86_this_cpu_variable_test_bit(int nr,
asm volatile("bt "__percpu_arg(2)",%1\n\t"
CC_SET(c)
: CC_OUT(c) (oldbit)
- : "m" (*(unsigned long *)addr), "Ir" (nr));
+ : "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
return oldbit;
}
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 3d33a719f5c1..a34e0d4b957d 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -103,8 +103,10 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
({ \
long tmp; \
struct rw_semaphore* ret; \
+ register void *__sp asm(_ASM_SP); \
+ \
asm volatile("# beginning down_write\n\t" \
- LOCK_PREFIX " xadd %1,(%3)\n\t" \
+ LOCK_PREFIX " xadd %1,(%4)\n\t" \
/* adds 0xffff0001, returns the old value */ \
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
/* was the active mask 0 before? */\
@@ -112,7 +114,7 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
" call " slow_path "\n" \
"1:\n" \
"# ending down_write" \
- : "+m" (sem->count), "=d" (tmp), "=a" (ret) \
+ : "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \
: "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
: "memory", "cc"); \
ret; \
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2aaca53c0974..ad6f5eb07a95 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -52,6 +52,15 @@ struct task_struct;
#include <asm/cpufeature.h>
#include <linux/atomic.h>
+struct thread_info {
+ unsigned long flags; /* low level flags */
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .flags = 0, \
+}
+
#define init_stack (init_thread_union.stack)
#else /* !__ASSEMBLY__ */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 4dd5d500eb60..79076d75bdbf 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -46,9 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o
-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
-obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
-obj-$(CONFIG_X86_64) += mcount_64.o
+obj-$(CONFIG_X86_64) += sys_x86_64.o mcount_64.o
obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8a5abaa7d453..931ced8ca345 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -454,6 +454,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+ acpi_penalize_sci_irq(bus_irq, trigger, polarity);
/*
* stash over-ride to indicate we've been here
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 620ab06bcf45..017bda12caae 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -429,7 +429,7 @@ int __init save_microcode_in_initrd_amd(void)
* We need the physical address of the container for both bitness since
* boot_params.hdr.ramdisk_image is a physical address.
*/
- cont = __pa(container);
+ cont = __pa_nodebug(container);
cont_va = container;
#endif
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 8cb57df9398d..1db8dc490b66 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -32,6 +32,8 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
+ { X86_FEATURE_AVX512_4VNNIW, CR_EDX, 2, 0x00000007, 0 },
+ { X86_FEATURE_AVX512_4FMAPS, CR_EDX, 3, 0x00000007, 0 },
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 81160578b91a..5130985b758b 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -27,6 +27,7 @@
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>
+#include <asm/timer.h>
#include <asm/apic.h>
#define CPUID_VMWARE_INFO_LEAF 0x40000000
@@ -94,6 +95,10 @@ static void __init vmware_platform_setup(void)
} else {
pr_warn("Failed to get TSC freq from the hypervisor\n");
}
+
+#ifdef CONFIG_X86_IO_APIC
+ no_timer_check = 1;
+#endif
}
/*
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index b85fe5f91c3f..90e8dde3ec26 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -350,7 +350,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
* continue building up new bios map based on this
* information
*/
- if (current_type != last_type) {
+ if (current_type != last_type || current_type == E820_PRAM) {
if (last_type != 0) {
new_bios[new_bios_entry].size =
change_point[chgidx]->addr - last_addr;
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 124aa5c593f8..095ef7ddd6ae 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -74,6 +74,8 @@ void fpu__xstate_clear_all_cpu_caps(void)
setup_clear_cpu_cap(X86_FEATURE_MPX);
setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
setup_clear_cpu_cap(X86_FEATURE_PKU);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS);
}
/*
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 5f401262f12d..b6b2f0264af3 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -23,6 +23,7 @@
#include <asm/percpu.h>
#include <asm/nops.h>
#include <asm/bootparam.h>
+#include <asm/export.h>
/* Physical address */
#define pa(X) ((X) - __PAGE_OFFSET)
@@ -673,6 +674,7 @@ ENTRY(empty_zero_page)
.fill 4096,1,0
ENTRY(swapper_pg_dir)
.fill 1024,4,0
+EXPORT_SYMBOL(empty_zero_page)
/*
* This starts the data section.
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index c98a559c346e..b4421cc191b0 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -21,6 +21,7 @@
#include <asm/percpu.h>
#include <asm/nops.h>
#include "../entry/calling.h"
+#include <asm/export.h>
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
@@ -486,10 +487,12 @@ early_gdt_descr_base:
ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000
+EXPORT_SYMBOL(phys_base)
#include "../../x86/xen/xen-head.S"
__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
+EXPORT_SYMBOL(empty_zero_page)
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
deleted file mode 100644
index 1f9b878ef5ef..000000000000
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ /dev/null
@@ -1,47 +0,0 @@
-#include <linux/export.h>
-#include <linux/spinlock_types.h>
-
-#include <asm/checksum.h>
-#include <asm/pgtable.h>
-#include <asm/desc.h>
-#include <asm/ftrace.h>
-
-#ifdef CONFIG_FUNCTION_TRACER
-/* mcount is defined in assembly */
-EXPORT_SYMBOL(mcount);
-#endif
-
-/*
- * Note, this is a prototype to get at the symbol for
- * the export, but dont use it from C code, it is used
- * by assembly code and is not using C calling convention!
- */
-#ifndef CONFIG_X86_CMPXCHG64
-extern void cmpxchg8b_emu(void);
-EXPORT_SYMBOL(cmpxchg8b_emu);
-#endif
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_generic);
-
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-EXPORT_SYMBOL(__get_user_8);
-
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
-
-EXPORT_SYMBOL(strstr);
-
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(empty_zero_page);
-
-#ifdef CONFIG_PREEMPT
-EXPORT_SYMBOL(___preempt_schedule);
-EXPORT_SYMBOL(___preempt_schedule_notrace);
-#endif
-
-EXPORT_SYMBOL(__sw_hweight32);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 28cee019209c..d9d8d16b69db 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -50,6 +50,7 @@
#include <linux/kallsyms.h>
#include <linux/ftrace.h>
#include <linux/frame.h>
+#include <linux/kasan.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
@@ -1057,9 +1058,10 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
* tailcall optimization. So, to be absolutely safe
* we also save and restore enough stack bytes to cover
* the argument area.
+ * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
+ * raw stack chunk with redzones:
*/
- memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
- MIN_STACK_SIZE(addr));
+ __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
regs->flags &= ~X86_EFLAGS_IF;
trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry);
@@ -1080,6 +1082,9 @@ void jprobe_return(void)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ /* Unpoison stack redzones in the frames we are going to jump over. */
+ kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
+
asm volatile (
#ifdef CONFIG_X86_64
" xchg %%rbx,%%rsp \n"
@@ -1118,7 +1123,7 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
/* It's OK to start function graph tracing again */
unpause_graph_tracing();
*regs = kcb->jprobe_saved_regs;
- memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
+ __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 61924222a9e1..7b0d3da52fb4 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -7,6 +7,7 @@
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/ftrace.h>
+#include <asm/export.h>
.code64
@@ -17,8 +18,10 @@
#ifdef CC_USING_FENTRY
# define function_hook __fentry__
+EXPORT_SYMBOL(__fentry__)
#else
# define function_hook mcount
+EXPORT_SYMBOL(mcount)
#endif
/* All cases save the original rbp (8 bytes) */
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 51402a7e4ca6..0bee04d41bed 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -625,8 +625,6 @@ static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
amd_disable_seq_and_redirect_scrub);
-#endif
-
#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
#include <linux/jump_label.h>
#include <asm/string_64.h>
@@ -657,3 +655,4 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
#endif
+#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index bbfbca5fea0c..9c337b0e8ba7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1221,11 +1221,16 @@ void __init setup_arch(char **cmdline_p)
*/
get_smp_config();
+ /*
+ * Systems w/o ACPI and mptables might not have it mapped the local
+ * APIC yet, but prefill_possible_map() might need to access it.
+ */
+ init_apic_mappings();
+
prefill_possible_map();
init_cpu_to_node();
- init_apic_mappings();
io_apic_init_mappings();
kvm_guest_init();
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index 40df33753bae..ec1f756f9dc9 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -105,9 +105,6 @@ void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact)
/* Don't let flags to be set from userspace */
act->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI);
- if (user_64bit_mode(current_pt_regs()))
- return;
-
if (in_ia32_syscall())
act->sa.sa_flags |= SA_IA32_ABI;
if (in_x32_syscall())
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 68f8cc222f25..c00cb64bc0a1 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -261,8 +261,10 @@ static inline void __smp_reschedule_interrupt(void)
__visible void smp_reschedule_interrupt(struct pt_regs *regs)
{
+ irq_enter();
ack_APIC_irq();
__smp_reschedule_interrupt();
+ irq_exit();
/*
* KVM uses this interrupt to force a cpu out of guest mode
*/
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 951f093a96fe..42f5eb7b4f6c 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1409,15 +1409,17 @@ __init void prefill_possible_map(void)
/* No boot processor was found in mptable or ACPI MADT */
if (!num_processors) {
- int apicid = boot_cpu_physical_apicid;
- int cpu = hard_smp_processor_id();
+ if (boot_cpu_has(X86_FEATURE_APIC)) {
+ int apicid = boot_cpu_physical_apicid;
+ int cpu = hard_smp_processor_id();
- pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
+ pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
- /* Make sure boot cpu is enumerated */
- if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
- apic->apic_id_valid(apicid))
- generic_processor_info(apicid, boot_cpu_apic_version);
+ /* Make sure boot cpu is enumerated */
+ if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
+ apic->apic_id_valid(apicid))
+ generic_processor_info(apicid, boot_cpu_apic_version);
+ }
if (!num_processors)
num_processors = 1;
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index c9a073866ca7..a23ce84a3f6c 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -57,7 +57,8 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
unsigned char opcode[15];
unsigned long addr = convert_ip_to_linear(child, regs);
- copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+ copied = access_process_vm(child, addr, opcode, sizeof(opcode),
+ FOLL_FORCE);
for (i = 0; i < copied; i++) {
switch (opcode[i]) {
/* popf and iret */
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c
index 9298993dc8b7..2d721e533cf4 100644
--- a/arch/x86/kernel/unwind_guess.c
+++ b/arch/x86/kernel/unwind_guess.c
@@ -47,7 +47,14 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
get_stack_info(first_frame, state->task, &state->stack_info,
&state->stack_mask);
- if (!__kernel_text_address(*first_frame))
+ /*
+ * The caller can provide the address of the first frame directly
+ * (first_frame) or indirectly (regs->sp) to indicate which stack frame
+ * to start unwinding at. Skip ahead until we reach it.
+ */
+ if (!unwind_done(state) &&
+ (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
+ !__kernel_text_address(*first_frame)))
unwind_next_frame(state);
}
EXPORT_SYMBOL_GPL(__unwind_start);
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
deleted file mode 100644
index b2cee3d19477..000000000000
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/* Exports for assembly files.
- All C exports should go in the respective C files. */
-
-#include <linux/export.h>
-#include <linux/spinlock_types.h>
-#include <linux/smp.h>
-
-#include <net/checksum.h>
-
-#include <asm/processor.h>
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/desc.h>
-#include <asm/ftrace.h>
-
-#ifdef CONFIG_FUNCTION_TRACER
-/* mcount and __fentry__ are defined in assembly */
-#ifdef CC_USING_FENTRY
-EXPORT_SYMBOL(__fentry__);
-#else
-EXPORT_SYMBOL(mcount);
-#endif
-#endif
-
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-EXPORT_SYMBOL(__get_user_8);
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
-
-EXPORT_SYMBOL(copy_user_generic_string);
-EXPORT_SYMBOL(copy_user_generic_unrolled);
-EXPORT_SYMBOL(copy_user_enhanced_fast_string);
-EXPORT_SYMBOL(__copy_user_nocache);
-EXPORT_SYMBOL(_copy_from_user);
-EXPORT_SYMBOL(_copy_to_user);
-
-EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled);
-
-EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(clear_page);
-
-EXPORT_SYMBOL(csum_partial);
-
-EXPORT_SYMBOL(__sw_hweight32);
-EXPORT_SYMBOL(__sw_hweight64);
-
-/*
- * Export string functions. We normally rely on gcc builtin for most of these,
- * but gcc sometimes decides not to inline them.
- */
-#undef memcpy
-#undef memset
-#undef memmove
-
-extern void *__memset(void *, int, __kernel_size_t);
-extern void *__memcpy(void *, const void *, __kernel_size_t);
-extern void *__memmove(void *, const void *, __kernel_size_t);
-extern void *memset(void *, int, __kernel_size_t);
-extern void *memcpy(void *, const void *, __kernel_size_t);
-extern void *memmove(void *, const void *, __kernel_size_t);
-
-EXPORT_SYMBOL(__memset);
-EXPORT_SYMBOL(__memcpy);
-EXPORT_SYMBOL(__memmove);
-
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memmove);
-
-#ifndef CONFIG_DEBUG_VIRTUAL
-EXPORT_SYMBOL(phys_base);
-#endif
-EXPORT_SYMBOL(empty_zero_page);
-#ifndef CONFIG_PARAVIRT
-EXPORT_SYMBOL(native_load_gs_index);
-#endif
-
-#ifdef CONFIG_PREEMPT
-EXPORT_SYMBOL(___preempt_schedule);
-EXPORT_SYMBOL(___preempt_schedule_notrace);
-#endif
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index c7220ba94aa7..1a22de70f7f7 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -594,7 +594,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
ioapic->irr = 0;
ioapic->irr_delivered = 0;
ioapic->id = 0;
- memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
+ memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
rtc_irq_eoi_tracking_reset(ioapic);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6c633de84dd7..e375235d81c9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5733,13 +5733,13 @@ static int kvmclock_cpu_online(unsigned int cpu)
static void kvm_timer_init(void)
{
- int cpu;
-
max_tsc_khz = tsc_khz;
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy policy;
+ int cpu;
+
memset(&policy, 0, sizeof(policy));
cpu = get_cpu();
cpufreq_get_policy(&policy, cpu);
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index c1e623209853..4d34bb548b41 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -28,6 +28,7 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/asm.h>
+#include <asm/export.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
@@ -251,6 +252,7 @@ ENTRY(csum_partial)
ENDPROC(csum_partial)
#endif
+EXPORT_SYMBOL(csum_partial)
/*
unsigned int csum_partial_copy_generic (const char *src, char *dst,
@@ -490,3 +492,4 @@ ENDPROC(csum_partial_copy_generic)
#undef ROUND1
#endif
+EXPORT_SYMBOL(csum_partial_copy_generic)
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index 65be7cfaf947..5e2af3a88cf5 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,6 +1,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
+#include <asm/export.h>
/*
* Most CPUs support enhanced REP MOVSB/STOSB instructions. It is
@@ -23,6 +24,7 @@ ENTRY(clear_page)
rep stosq
ret
ENDPROC(clear_page)
+EXPORT_SYMBOL(clear_page)
ENTRY(clear_page_orig)
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index ad5349778490..03a186fc06ea 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -7,6 +7,7 @@
*/
#include <linux/linkage.h>
+#include <asm/export.h>
.text
@@ -48,3 +49,4 @@ ENTRY(cmpxchg8b_emu)
ret
ENDPROC(cmpxchg8b_emu)
+EXPORT_SYMBOL(cmpxchg8b_emu)
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 24ef1c2104d4..e8508156c99d 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
+#include <asm/export.h>
/*
* Some CPUs run faster using the string copy instructions (sane microcode).
@@ -17,6 +18,7 @@ ENTRY(copy_page)
rep movsq
ret
ENDPROC(copy_page)
+EXPORT_SYMBOL(copy_page)
ENTRY(copy_page_regs)
subq $2*8, %rsp
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index bf603ebbfd8e..d376e4b48f88 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -14,6 +14,7 @@
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
+#include <asm/export.h>
/* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user)
@@ -29,6 +30,7 @@ ENTRY(_copy_to_user)
"jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS
ENDPROC(_copy_to_user)
+EXPORT_SYMBOL(_copy_to_user)
/* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user)
@@ -44,6 +46,8 @@ ENTRY(_copy_from_user)
"jmp copy_user_enhanced_fast_string", \
X86_FEATURE_ERMS
ENDPROC(_copy_from_user)
+EXPORT_SYMBOL(_copy_from_user)
+
.section .fixup,"ax"
/* must zero dest */
@@ -155,6 +159,7 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b)
ENDPROC(copy_user_generic_unrolled)
+EXPORT_SYMBOL(copy_user_generic_unrolled)
/* Some CPUs run faster using the string copy instructions.
* This is also a lot simpler. Use them when possible.
@@ -200,6 +205,7 @@ ENTRY(copy_user_generic_string)
_ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b)
ENDPROC(copy_user_generic_string)
+EXPORT_SYMBOL(copy_user_generic_string)
/*
* Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
@@ -229,6 +235,7 @@ ENTRY(copy_user_enhanced_fast_string)
_ASM_EXTABLE(1b,12b)
ENDPROC(copy_user_enhanced_fast_string)
+EXPORT_SYMBOL(copy_user_enhanced_fast_string)
/*
* copy_user_nocache - Uncached memory copy with exception handling
@@ -379,3 +386,4 @@ ENTRY(__copy_user_nocache)
_ASM_EXTABLE(40b,.L_fixup_1b_copy)
_ASM_EXTABLE(41b,.L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
+EXPORT_SYMBOL(__copy_user_nocache)
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index 9a7fe6a70491..378e5d5bf9b1 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -135,6 +135,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
return (__force __wsum)add32_with_carry(do_csum(buff, len),
(__force u32)sum);
}
+EXPORT_SYMBOL(csum_partial);
/*
* this routine is used for miscellaneous IP-like checksums, mainly
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 0ef5128c2de8..37b62d412148 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -32,6 +32,7 @@
#include <asm/thread_info.h>
#include <asm/asm.h>
#include <asm/smap.h>
+#include <asm/export.h>
.text
ENTRY(__get_user_1)
@@ -44,6 +45,7 @@ ENTRY(__get_user_1)
ASM_CLAC
ret
ENDPROC(__get_user_1)
+EXPORT_SYMBOL(__get_user_1)
ENTRY(__get_user_2)
add $1,%_ASM_AX
@@ -57,6 +59,7 @@ ENTRY(__get_user_2)
ASM_CLAC
ret
ENDPROC(__get_user_2)
+EXPORT_SYMBOL(__get_user_2)
ENTRY(__get_user_4)
add $3,%_ASM_AX
@@ -70,6 +73,7 @@ ENTRY(__get_user_4)
ASM_CLAC
ret
ENDPROC(__get_user_4)
+EXPORT_SYMBOL(__get_user_4)
ENTRY(__get_user_8)
#ifdef CONFIG_X86_64
@@ -97,6 +101,7 @@ ENTRY(__get_user_8)
ret
#endif
ENDPROC(__get_user_8)
+EXPORT_SYMBOL(__get_user_8)
bad_get_user:
diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
index 8a602a1e404a..23d893cbc200 100644
--- a/arch/x86/lib/hweight.S
+++ b/arch/x86/lib/hweight.S
@@ -1,4 +1,5 @@
#include <linux/linkage.h>
+#include <asm/export.h>
#include <asm/asm.h>
@@ -32,6 +33,7 @@ ENTRY(__sw_hweight32)
__ASM_SIZE(pop,) %__ASM_REG(dx)
ret
ENDPROC(__sw_hweight32)
+EXPORT_SYMBOL(__sw_hweight32)
ENTRY(__sw_hweight64)
#ifdef CONFIG_X86_64
@@ -77,3 +79,4 @@ ENTRY(__sw_hweight64)
ret
#endif
ENDPROC(__sw_hweight64)
+EXPORT_SYMBOL(__sw_hweight64)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 49e6ebac7e73..779782f58324 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -4,6 +4,7 @@
#include <asm/errno.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
+#include <asm/export.h>
/*
* We build a jump to memcpy_orig by default which gets NOPped out on
@@ -40,6 +41,8 @@ ENTRY(memcpy)
ret
ENDPROC(memcpy)
ENDPROC(__memcpy)
+EXPORT_SYMBOL(memcpy)
+EXPORT_SYMBOL(__memcpy)
/*
* memcpy_erms() - enhanced fast string memcpy. This is faster and
@@ -274,6 +277,7 @@ ENTRY(memcpy_mcsafe_unrolled)
xorq %rax, %rax
ret
ENDPROC(memcpy_mcsafe_unrolled)
+EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled)
.section .fixup, "ax"
/* Return -EFAULT for any failure */
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 90ce01bee00c..15de86cd15b0 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -8,6 +8,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
+#include <asm/export.h>
#undef memmove
@@ -207,3 +208,5 @@ ENTRY(__memmove)
retq
ENDPROC(__memmove)
ENDPROC(memmove)
+EXPORT_SYMBOL(__memmove)
+EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index e1229ecd2a82..55b95db30a61 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -3,6 +3,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
+#include <asm/export.h>
.weak memset
@@ -43,6 +44,8 @@ ENTRY(__memset)
ret
ENDPROC(memset)
ENDPROC(__memset)
+EXPORT_SYMBOL(memset)
+EXPORT_SYMBOL(__memset)
/*
* ISO C memset - set a memory block to a byte value. This function uses
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index c891ece81e5b..cd5d716d2897 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -15,6 +15,7 @@
#include <asm/errno.h>
#include <asm/asm.h>
#include <asm/smap.h>
+#include <asm/export.h>
/*
@@ -43,6 +44,7 @@ ENTRY(__put_user_1)
xor %eax,%eax
EXIT
ENDPROC(__put_user_1)
+EXPORT_SYMBOL(__put_user_1)
ENTRY(__put_user_2)
ENTER
@@ -55,6 +57,7 @@ ENTRY(__put_user_2)
xor %eax,%eax
EXIT
ENDPROC(__put_user_2)
+EXPORT_SYMBOL(__put_user_2)
ENTRY(__put_user_4)
ENTER
@@ -67,6 +70,7 @@ ENTRY(__put_user_4)
xor %eax,%eax
EXIT
ENDPROC(__put_user_4)
+EXPORT_SYMBOL(__put_user_4)
ENTRY(__put_user_8)
ENTER
@@ -82,6 +86,7 @@ ENTRY(__put_user_8)
xor %eax,%eax
EXIT
ENDPROC(__put_user_8)
+EXPORT_SYMBOL(__put_user_8)
bad_put_user:
movl $-EFAULT,%eax
diff --git a/arch/x86/lib/strstr_32.c b/arch/x86/lib/strstr_32.c
index 8e2d55f754bf..a03b1c750bfe 100644
--- a/arch/x86/lib/strstr_32.c
+++ b/arch/x86/lib/strstr_32.c
@@ -1,4 +1,5 @@
#include <linux/string.h>
+#include <linux/export.h>
char *strstr(const char *cs, const char *ct)
{
@@ -28,4 +29,4 @@ __asm__ __volatile__(
: "dx", "di");
return __res;
}
-
+EXPORT_SYMBOL(strstr);
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index b8b6a60b32cf..0d4fb3ebbbac 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -435,7 +435,7 @@ slow_irqon:
ret = get_user_pages_unlocked(start,
(end - start) >> PAGE_SHIFT,
- write, 0, pages);
+ pages, write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index ddd2661c4502..887e57182716 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -104,10 +104,10 @@ void __init kernel_randomize_memory(void)
* consistent with the vaddr_start/vaddr_end variables.
*/
BUILD_BUG_ON(vaddr_start >= vaddr_end);
- BUILD_BUG_ON(config_enabled(CONFIG_X86_ESPFIX64) &&
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
vaddr_end >= EFI_VA_START);
- BUILD_BUG_ON((config_enabled(CONFIG_X86_ESPFIX64) ||
- config_enabled(CONFIG_EFI)) &&
+ BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
+ IS_ENABLED(CONFIG_EFI)) &&
vaddr_end >= __START_KERNEL_map);
BUILD_BUG_ON(vaddr_end > __START_KERNEL_map);
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 80476878eb4c..e4f800999b32 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -544,10 +544,9 @@ static int mpx_resolve_fault(long __user *addr, int write)
{
long gup_ret;
int nr_pages = 1;
- int force = 0;
- gup_ret = get_user_pages((unsigned long)addr, nr_pages, write,
- force, NULL, NULL);
+ gup_ret = get_user_pages((unsigned long)addr, nr_pages,
+ write ? FOLL_WRITE : 0, NULL, NULL);
/*
* get_user_pages() returns number of pages gotten.
* 0 means we failed to fault in and get anything,
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 170cc4ff057b..83e701f160a9 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -730,6 +730,20 @@ void io_free_memtype(resource_size_t start, resource_size_t end)
free_memtype(start, end);
}
+int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
+{
+ enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
+
+ return io_reserve_memtype(start, start + size, &type);
+}
+EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
+
+void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
+{
+ io_free_memtype(start, start + size);
+}
+EXPORT_SYMBOL(arch_io_free_memtype_wc);
+
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index b4d5e95fe4df..4a6a5a26c582 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -40,7 +40,15 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
*/
return BIOS_STATUS_UNIMPLEMENTED;
- ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
+ /*
+ * If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI
+ * callback method, which uses efi_call() directly, with the kernel page tables:
+ */
+ if (unlikely(test_bit(EFI_OLD_MEMMAP, &efi.flags)))
+ ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
+ else
+ ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
+
return ret;
}
EXPORT_SYMBOL_GPL(uv_bios_call);
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index 3ee2bb6b440b..e7e7055a8658 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -8,7 +8,7 @@ else
BITS := 64
endif
-obj-y = bug.o bugs_$(BITS).o delay.o fault.o ksyms.o ldt.o \
+obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \
ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \
stub_$(BITS).o stub_segv.o \
sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \
diff --git a/arch/x86/um/checksum_32.S b/arch/x86/um/checksum_32.S
index fa4b8b9841ff..b9933eb9274a 100644
--- a/arch/x86/um/checksum_32.S
+++ b/arch/x86/um/checksum_32.S
@@ -27,6 +27,7 @@
#include <asm/errno.h>
#include <asm/asm.h>
+#include <asm/export.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
@@ -214,3 +215,4 @@ csum_partial:
ret
#endif
+ EXPORT_SYMBOL(csum_partial)
diff --git a/arch/x86/um/ksyms.c b/arch/x86/um/ksyms.c
deleted file mode 100644
index 2e8f43ec6214..000000000000
--- a/arch/x86/um/ksyms.c
+++ /dev/null
@@ -1,13 +0,0 @@
-#include <linux/module.h>
-#include <asm/string.h>
-#include <asm/checksum.h>
-
-#ifndef CONFIG_X86_32
-/*XXX: we need them because they would be exported by x86_64 */
-#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
-EXPORT_SYMBOL(memcpy);
-#else
-EXPORT_SYMBOL(__memcpy);
-#endif
-#endif
-EXPORT_SYMBOL(csum_partial);
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c
index 5766ead6fdb9..60a5a5a85505 100644
--- a/arch/x86/um/ptrace_32.c
+++ b/arch/x86/um/ptrace_32.c
@@ -36,7 +36,8 @@ int is_syscall(unsigned long addr)
* slow, but that doesn't matter, since it will be called only
* in case of singlestepping, if copy_from_user failed.
*/
- n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
+ n = access_process_vm(current, addr, &instr, sizeof(instr),
+ FOLL_FORCE);
if (n != sizeof(instr)) {
printk(KERN_ERR "is_syscall : failed to read "
"instruction from 0x%lx\n", addr);
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index 0b5c184dd5b3..e30202b1716e 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -212,7 +212,8 @@ int is_syscall(unsigned long addr)
* slow, but that doesn't matter, since it will be called only
* in case of singlestepping, if copy_from_user failed.
*/
- n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
+ n = access_process_vm(current, addr, &instr, sizeof(instr),
+ FOLL_FORCE);
if (n != sizeof(instr)) {
printk("is_syscall : failed to read instruction from "
"0x%lx\n", addr);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c0fdd57da7aa..bdd855685403 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1837,6 +1837,7 @@ static void __init init_hvm_pv_info(void)
xen_domain_type = XEN_HVM_DOMAIN;
}
+#endif
static int xen_cpu_up_prepare(unsigned int cpu)
{
@@ -1887,6 +1888,7 @@ static int xen_cpu_up_online(unsigned int cpu)
return 0;
}
+#ifdef CONFIG_XEN_PVHVM
#ifdef CONFIG_KEXEC_CORE
static void xen_hvm_shutdown(void)
{
diff --git a/block/badblocks.c b/block/badblocks.c
index 7be53cb1cc3c..6ebcef282314 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -133,6 +133,26 @@ retry:
}
EXPORT_SYMBOL_GPL(badblocks_check);
+static void badblocks_update_acked(struct badblocks *bb)
+{
+ u64 *p = bb->page;
+ int i;
+ bool unacked = false;
+
+ if (!bb->unacked_exist)
+ return;
+
+ for (i = 0; i < bb->count ; i++) {
+ if (!BB_ACK(p[i])) {
+ unacked = true;
+ break;
+ }
+ }
+
+ if (!unacked)
+ bb->unacked_exist = 0;
+}
+
/**
* badblocks_set() - Add a range of bad blocks to the table.
* @bb: the badblocks structure that holds all badblock information
@@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
bb->changed = 1;
if (!acknowledged)
bb->unacked_exist = 1;
+ else
+ badblocks_update_acked(bb);
write_sequnlock_irqrestore(&bb->lock, flags);
return rv;
@@ -354,7 +376,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
* current range. Earlier ranges could also overlap,
* but only this one can overlap the end of the range.
*/
- if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
+ if ((BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) &&
+ (BB_OFFSET(p[lo]) < target)) {
/* Partial overlap, leave the tail of this range */
int ack = BB_ACK(p[lo]);
sector_t a = BB_OFFSET(p[lo]);
@@ -377,7 +400,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
lo--;
}
while (lo >= 0 &&
- BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
+ (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) &&
+ (BB_OFFSET(p[lo]) < target)) {
/* This range does overlap */
if (BB_OFFSET(p[lo]) < s) {
/* Keep the early parts of this range. */
@@ -399,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
}
}
+ badblocks_update_acked(bb);
bb->changed = 1;
out:
write_sequnlock_irq(&bb->lock);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index dd38e5ced4a3..b08ccbb9393a 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1340,10 +1340,8 @@ int blkcg_policy_register(struct blkcg_policy *pol)
struct blkcg_policy_data *cpd;
cpd = pol->cpd_alloc_fn(GFP_KERNEL);
- if (!cpd) {
- mutex_unlock(&blkcg_pol_mutex);
+ if (!cpd)
goto err_free_cpds;
- }
blkcg->cpd[pol->plid] = cpd;
cpd->blkcg = blkcg;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6a14b68b9135..3c882cbc7541 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -343,6 +343,34 @@ static void flush_data_end_io(struct request *rq, int error)
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
/*
+ * Updating q->in_flight[] here for making this tag usable
+ * early. Because in blk_queue_start_tag(),
+ * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
+ * reserve tags for sync I/O.
+ *
+ * More importantly this way can avoid the following I/O
+ * deadlock:
+ *
+ * - suppose there are 40 fua requests comming to flush queue
+ * and queue depth is 31
+ * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
+ * tag for async I/O any more
+ * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
+ * and flush_data_end_io() is called
+ * - the other rqs still can't go ahead if not updating
+ * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
+ * are held in flush data queue and make no progress of
+ * handling post flush rq
+ * - only after the post flush rq is handled, all these rqs
+ * can be completed
+ */
+
+ elv_completed_request(q, rq);
+
+ /* for avoiding double accounting */
+ rq->cmd_flags &= ~REQ_STARTED;
+
+ /*
* After populating an empty queue, kick it to avoid stall. Read
* the comment in flush_end_io().
*/
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ddc2eed64771..f3d27a6dee09 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1217,9 +1217,9 @@ static struct request *blk_mq_map_request(struct request_queue *q,
blk_mq_set_alloc_data(&alloc_data, q, 0, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, op, op_flags);
- hctx->queued++;
- data->hctx = hctx;
- data->ctx = ctx;
+ data->hctx = alloc_data.hctx;
+ data->ctx = alloc_data.ctx;
+ data->hctx->queued++;
return rq;
}
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 96631e6a22b9..06cf9807f49a 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
* Softirq action handler - move entries to local list and loop over them
* while passing them to the queue registered handler.
*/
-static void blk_done_softirq(struct softirq_action *h)
+static __latent_entropy void blk_done_softirq(struct softirq_action *h)
{
struct list_head *cpu_list, local_list;
diff --git a/drivers/Makefile b/drivers/Makefile
index f0afdfb3c7df..194d20bee7dc 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -21,7 +21,7 @@ obj-y += video/
obj-y += idle/
# IPMI must come before ACPI in order to provide IPMI opregion support
-obj-$(CONFIG_IPMI_HANDLER) += char/ipmi/
+obj-y += char/ipmi/
obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_SFI) += sfi/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 8ea8211b2d58..eb76a4c10dbf 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <asm/mwait.h>
+#include <xen/xen.h>
#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
@@ -477,6 +478,10 @@ static struct acpi_driver acpi_pad_driver = {
static int __init acpi_pad_init(void)
{
+ /* Xen ACPI PAD is used when running as Xen Dom0. */
+ if (xen_initial_domain())
+ return -ENODEV;
+
power_saving_mwait_init();
if (power_saving_mwait_eax == 0)
return -EINVAL;
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index f1e6dcc7a827..54d48b90de2c 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -46,6 +46,7 @@
#include "acdispat.h"
#include "acnamesp.h"
#include "actables.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsinit")
@@ -214,23 +215,17 @@ acpi_ds_initialize_objects(u32 table_index,
/* Walk entire namespace from the supplied root */
- status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
/*
* We don't use acpi_walk_namespace since we do not want to acquire
* the namespace reader lock.
*/
status =
acpi_ns_walk_namespace(ACPI_TYPE_ANY, start_node, ACPI_UINT32_MAX,
- ACPI_NS_WALK_UNLOCK, acpi_ds_init_one_object,
- NULL, &info, NULL);
+ 0, acpi_ds_init_one_object, NULL, &info,
+ NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
}
- (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 32e9ddc0cf2b..2b3210f42a46 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -99,14 +99,11 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
"Method auto-serialization parse [%4.4s] %p\n",
acpi_ut_get_node_name(node), node));
- acpi_ex_enter_interpreter();
-
/* Create/Init a root op for the method parse tree */
op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
if (!op) {
- status = AE_NO_MEMORY;
- goto unlock;
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
acpi_ps_set_name(op, node->name.integer);
@@ -118,8 +115,7 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
if (!walk_state) {
acpi_ps_free_op(op);
- status = AE_NO_MEMORY;
- goto unlock;
+ return_ACPI_STATUS(AE_NO_MEMORY);
}
status = acpi_ds_init_aml_walk(walk_state, op, node,
@@ -138,8 +134,6 @@ acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
status = acpi_ps_parse_aml(walk_state);
acpi_ps_delete_parse_tree(op);
-unlock:
- acpi_ex_exit_interpreter();
return_ACPI_STATUS(status);
}
@@ -731,26 +725,6 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
acpi_ds_method_data_delete_all(walk_state);
/*
- * If method is serialized, release the mutex and restore the
- * current sync level for this thread
- */
- if (method_desc->method.mutex) {
-
- /* Acquisition Depth handles recursive calls */
-
- method_desc->method.mutex->mutex.acquisition_depth--;
- if (!method_desc->method.mutex->mutex.acquisition_depth) {
- walk_state->thread->current_sync_level =
- method_desc->method.mutex->mutex.
- original_sync_level;
-
- acpi_os_release_mutex(method_desc->method.
- mutex->mutex.os_mutex);
- method_desc->method.mutex->mutex.thread_id = 0;
- }
- }
-
- /*
* Delete any namespace objects created anywhere within the
* namespace by the execution of this method. Unless:
* 1) This method is a module-level executable code method, in which
@@ -786,6 +760,26 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
~ACPI_METHOD_MODIFIED_NAMESPACE;
}
}
+
+ /*
+ * If method is serialized, release the mutex and restore the
+ * current sync level for this thread
+ */
+ if (method_desc->method.mutex) {
+
+ /* Acquisition Depth handles recursive calls */
+
+ method_desc->method.mutex->mutex.acquisition_depth--;
+ if (!method_desc->method.mutex->mutex.acquisition_depth) {
+ walk_state->thread->current_sync_level =
+ method_desc->method.mutex->mutex.
+ original_sync_level;
+
+ acpi_os_release_mutex(method_desc->method.
+ mutex->mutex.os_mutex);
+ method_desc->method.mutex->mutex.thread_id = 0;
+ }
+ }
}
/* Decrement the thread count on the method */
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 028b22a3154e..e36218206bb0 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -607,11 +607,9 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
}
}
- acpi_ex_exit_interpreter();
status =
acpi_ev_initialize_region
(acpi_ns_get_attached_object(node), FALSE);
- acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
/*
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 3843f1fc5dbb..75ddd160a716 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -45,6 +45,7 @@
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evrgnini")
@@ -597,9 +598,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
}
}
+ acpi_ex_exit_interpreter();
status =
acpi_ev_execute_reg_method(region_obj,
ACPI_REG_CONNECT);
+ acpi_ex_enter_interpreter();
if (acpi_ns_locked) {
status =
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 334d3c5ba617..d1f20143bb11 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -137,7 +137,9 @@ unlock:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Begin Table Object Initialization\n"));
+ acpi_ex_enter_interpreter();
status = acpi_ds_initialize_objects(table_index, node);
+ acpi_ex_exit_interpreter();
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"**** Completed Table Object Initialization\n"));
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f0a029e68d3e..0d099a24f776 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -662,7 +662,7 @@ static int ghes_proc(struct ghes *ghes)
ghes_do_proc(ghes, ghes->estatus);
out:
ghes_clear_estatus(ghes);
- return 0;
+ return rc;
}
static void ghes_add_timer(struct ghes *ghes)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 680531062160..48e19d013170 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -526,6 +526,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
acpi_ec_clear(ec);
}
+#ifdef CONFIG_PM_SLEEP
static bool acpi_ec_query_flushed(struct acpi_ec *ec)
{
bool flushed;
@@ -557,6 +558,7 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
spin_unlock_irqrestore(&ec->lock, flags);
__acpi_ec_flush_event(ec);
}
+#endif /* CONFIG_PM_SLEEP */
static bool acpi_ec_guard_event(struct acpi_ec *ec)
{
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 384cfc3083e1..6cf4988206f2 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -129,8 +129,18 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
control = obj->package.elements[1].integer.value;
for (i = 0; i < fan->fps_count; i++) {
- if (control == fan->fps[i].control)
+ /*
+ * When Fine Grain Control is set, return the state
+ * corresponding to maximum fan->fps[i].control
+ * value compared to the current speed. Here the
+ * fan->fps[] is sorted array with increasing speed.
+ */
+ if (fan->fif.fine_grain_ctrl && control < fan->fps[i].control) {
+ i = (i > 0) ? i - 1 : 0;
break;
+ } else if (control == fan->fps[i].control) {
+ break;
+ }
}
if (i == fan->fps_count) {
dev_dbg(&device->dev, "Invalid control value returned\n");
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 4305ee9db4b2..416953a42510 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -162,11 +162,18 @@ void acpi_os_vprintf(const char *fmt, va_list args)
if (acpi_in_debugger) {
kdb_printf("%s", buffer);
} else {
- printk(KERN_CONT "%s", buffer);
+ if (printk_get_level(buffer))
+ printk("%s", buffer);
+ else
+ printk(KERN_CONT "%s", buffer);
}
#else
- if (acpi_debugger_write_log(buffer) < 0)
- printk(KERN_CONT "%s", buffer);
+ if (acpi_debugger_write_log(buffer) < 0) {
+ if (printk_get_level(buffer))
+ printk("%s", buffer);
+ else
+ printk(KERN_CONT "%s", buffer);
+ }
#endif
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index c983bf733ad3..bc3d914dfc3e 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -87,6 +87,7 @@ struct acpi_pci_link {
static LIST_HEAD(acpi_link_list);
static DEFINE_MUTEX(acpi_link_lock);
+static int sci_irq = -1, sci_penalty;
/* --------------------------------------------------------------------------
PCI Link Device Management
@@ -496,25 +497,13 @@ static int acpi_irq_get_penalty(int irq)
{
int penalty = 0;
- /*
- * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
- * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
- * use for PCI IRQs.
- */
- if (irq == acpi_gbl_FADT.sci_interrupt) {
- u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
-
- if (type != IRQ_TYPE_LEVEL_LOW)
- penalty += PIRQ_PENALTY_ISA_ALWAYS;
- else
- penalty += PIRQ_PENALTY_PCI_USING;
- }
+ if (irq == sci_irq)
+ penalty += sci_penalty;
if (irq < ACPI_MAX_ISA_IRQS)
return penalty + acpi_isa_irq_penalty[irq];
- penalty += acpi_irq_pci_sharing_penalty(irq);
- return penalty;
+ return penalty + acpi_irq_pci_sharing_penalty(irq);
}
int __init acpi_irq_penalty_init(void)
@@ -619,6 +608,10 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_device_bid(link->device));
return -ENODEV;
} else {
+ if (link->irq.active < ACPI_MAX_ISA_IRQS)
+ acpi_isa_irq_penalty[link->irq.active] +=
+ PIRQ_PENALTY_PCI_USING;
+
printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
@@ -849,7 +842,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
continue;
if (used)
- new_penalty = acpi_irq_get_penalty(irq) +
+ new_penalty = acpi_isa_irq_penalty[irq] +
PIRQ_PENALTY_ISA_USED;
else
new_penalty = 0;
@@ -871,7 +864,7 @@ static int __init acpi_irq_penalty_update(char *str, int used)
void acpi_penalize_isa_irq(int irq, int active)
{
if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
- acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
+ acpi_isa_irq_penalty[irq] +=
(active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
}
@@ -881,6 +874,17 @@ bool acpi_isa_irq_available(int irq)
acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
}
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
+{
+ sci_irq = irq;
+
+ if (trigger == ACPI_MADT_TRIGGER_LEVEL &&
+ polarity == ACPI_MADT_POLARITY_ACTIVE_LOW)
+ sci_penalty = PIRQ_PENALTY_PCI_USING;
+ else
+ sci_penalty = PIRQ_PENALTY_ISA_ALWAYS;
+}
+
/*
* Over-ride default table to reserve additional IRQs for use by ISA
* e.g. acpi_irq_isa=5
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index f2fd3fee588a..03f5ec11ab31 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -468,10 +468,11 @@ static int acpi_data_get_property_array(struct acpi_device_data *data,
}
/**
- * acpi_data_get_property_reference - returns handle to the referenced object
- * @data: ACPI device data object containing the property
+ * __acpi_node_get_property_reference - returns handle to the referenced object
+ * @fwnode: Firmware node to get the property from
* @propname: Name of the property
* @index: Index of the reference to return
+ * @num_args: Maximum number of arguments after each reference
* @args: Location to store the returned reference with optional arguments
*
* Find property with @name, verifify that it is a package containing at least
@@ -482,17 +483,40 @@ static int acpi_data_get_property_array(struct acpi_device_data *data,
* If there's more than one reference in the property value package, @index is
* used to select the one to return.
*
+ * It is possible to leave holes in the property value set like in the
+ * example below:
+ *
+ * Package () {
+ * "cs-gpios",
+ * Package () {
+ * ^GPIO, 19, 0, 0,
+ * ^GPIO, 20, 0, 0,
+ * 0,
+ * ^GPIO, 21, 0, 0,
+ * }
+ * }
+ *
+ * Calling this function with index %2 return %-ENOENT and with index %3
+ * returns the last entry. If the property does not contain any more values
+ * %-ENODATA is returned. The NULL entry must be single integer and
+ * preferably contain value %0.
+ *
* Return: %0 on success, negative error code on failure.
*/
-static int acpi_data_get_property_reference(struct acpi_device_data *data,
- const char *propname, size_t index,
- struct acpi_reference_args *args)
+int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+ const char *propname, size_t index, size_t num_args,
+ struct acpi_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
+ struct acpi_device_data *data;
struct acpi_device *device;
int ret, idx = 0;
+ data = acpi_device_data_of_node(fwnode);
+ if (!data)
+ return -EINVAL;
+
ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj);
if (ret)
return ret;
@@ -532,59 +556,54 @@ static int acpi_data_get_property_reference(struct acpi_device_data *data,
while (element < end) {
u32 nargs, i;
- if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
- return -EPROTO;
-
- ret = acpi_bus_get_device(element->reference.handle, &device);
- if (ret)
- return -ENODEV;
-
- element++;
- nargs = 0;
-
- /* assume following integer elements are all args */
- for (i = 0; element + i < end; i++) {
- int type = element[i].type;
+ if (element->type == ACPI_TYPE_LOCAL_REFERENCE) {
+ ret = acpi_bus_get_device(element->reference.handle,
+ &device);
+ if (ret)
+ return -ENODEV;
+
+ nargs = 0;
+ element++;
+
+ /* assume following integer elements are all args */
+ for (i = 0; element + i < end && i < num_args; i++) {
+ int type = element[i].type;
+
+ if (type == ACPI_TYPE_INTEGER)
+ nargs++;
+ else if (type == ACPI_TYPE_LOCAL_REFERENCE)
+ break;
+ else
+ return -EPROTO;
+ }
- if (type == ACPI_TYPE_INTEGER)
- nargs++;
- else if (type == ACPI_TYPE_LOCAL_REFERENCE)
- break;
- else
+ if (nargs > MAX_ACPI_REFERENCE_ARGS)
return -EPROTO;
- }
- if (idx++ == index) {
- args->adev = device;
- args->nargs = nargs;
- for (i = 0; i < nargs; i++)
- args->args[i] = element[i].integer.value;
+ if (idx == index) {
+ args->adev = device;
+ args->nargs = nargs;
+ for (i = 0; i < nargs; i++)
+ args->args[i] = element[i].integer.value;
- return 0;
+ return 0;
+ }
+
+ element += nargs;
+ } else if (element->type == ACPI_TYPE_INTEGER) {
+ if (idx == index)
+ return -ENOENT;
+ element++;
+ } else {
+ return -EPROTO;
}
- element += nargs;
+ idx++;
}
- return -EPROTO;
-}
-
-/**
- * acpi_node_get_property_reference - get a handle to the referenced object.
- * @fwnode: Firmware node to get the property from.
- * @propname: Name of the property.
- * @index: Index of the reference to return.
- * @args: Location to store the returned reference with optional arguments.
- */
-int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
- const char *name, size_t index,
- struct acpi_reference_args *args)
-{
- struct acpi_device_data *data = acpi_device_data_of_node(fwnode);
-
- return data ? acpi_data_get_property_reference(data, name, index, args) : -EINVAL;
+ return -ENODATA;
}
-EXPORT_SYMBOL_GPL(acpi_node_get_property_reference);
+EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
static int acpi_data_prop_read_single(struct acpi_device_data *data,
const char *propname,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 562af94bec35..3c71b982bf2a 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1002,7 +1002,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- uint32_t desc)
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1010,12 +1010,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc)
+ if (desc < ref->desc) {
n = n->rb_left;
- else if (desc > ref->desc)
+ } else if (desc > ref->desc) {
n = n->rb_right;
- else
+ } else if (need_strong_ref && !ref->strong) {
+ binder_user_error("tried to use weak ref as strong ref\n");
+ return NULL;
+ } else {
return ref;
+ }
}
return NULL;
}
@@ -1285,7 +1289,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
@@ -1380,7 +1387,7 @@ static void binder_transaction(struct binder_proc *proc,
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle);
+ ref = binder_get_ref(proc, tr->target.handle, true);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
@@ -1577,7 +1584,9 @@ static void binder_transaction(struct binder_proc *proc,
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->binder = 0;
fp->handle = ref->desc;
+ fp->cookie = 0;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
@@ -1589,7 +1598,10 @@ static void binder_transaction(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
@@ -1624,7 +1636,9 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
+ fp->binder = 0;
fp->handle = new_ref->desc;
+ fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
@@ -1678,6 +1692,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
+ fp->binder = 0;
fp->handle = target_fd;
} break;
@@ -1800,7 +1815,9 @@ static int binder_thread_write(struct binder_proc *proc,
ref->desc);
}
} else
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target,
+ cmd == BC_ACQUIRE ||
+ cmd == BC_RELEASE);
if (ref == NULL) {
binder_user_error("%d:%d refcount change on invalid ref %d\n",
proc->pid, thread->pid, target);
@@ -1996,7 +2013,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 90eabaf81215..9669fc7c19df 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1400,142 +1400,59 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
}
#endif
-/*
- * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
- * to single msi.
- */
-static int ahci_init_msix(struct pci_dev *pdev, unsigned int n_ports,
- struct ahci_host_priv *hpriv, unsigned long flags)
+static int ahci_get_irq_vector(struct ata_host *host, int port)
{
- int nvec, i, rc;
-
- /* Do not init MSI-X if MSI is disabled for the device */
- if (hpriv->flags & AHCI_HFLAG_NO_MSI)
- return -ENODEV;
-
- nvec = pci_msix_vec_count(pdev);
- if (nvec < 0)
- return nvec;
-
- /*
- * Proper MSI-X implementations will have a vector per-port.
- * Barring that, we prefer single-MSI over single-MSIX. If this
- * check fails (not enough MSI-X vectors for all ports) we will
- * be called again with the flag clear iff ahci_init_msi()
- * fails.
- */
- if (flags & AHCI_HFLAG_MULTI_MSIX) {
- if (nvec < n_ports)
- return -ENODEV;
- nvec = n_ports;
- } else if (nvec) {
- nvec = 1;
- } else {
- /*
- * Emit dev_err() since this was the non-legacy irq
- * method of last resort.
- */
- rc = -ENODEV;
- goto fail;
- }
-
- for (i = 0; i < nvec; i++)
- hpriv->msix[i].entry = i;
- rc = pci_enable_msix_exact(pdev, hpriv->msix, nvec);
- if (rc < 0)
- goto fail;
-
- if (nvec > 1)
- hpriv->flags |= AHCI_HFLAG_MULTI_MSIX;
- hpriv->irq = hpriv->msix[0].vector; /* for single msi-x */
-
- return nvec;
-fail:
- dev_err(&pdev->dev,
- "failed to enable MSI-X with error %d, # of vectors: %d\n",
- rc, nvec);
-
- return rc;
+ return pci_irq_vector(to_pci_dev(host->dev), port);
}
static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
struct ahci_host_priv *hpriv)
{
- int rc, nvec;
+ int nvec;
if (hpriv->flags & AHCI_HFLAG_NO_MSI)
return -ENODEV;
- nvec = pci_msi_vec_count(pdev);
- if (nvec < 0)
- return nvec;
-
/*
* If number of MSIs is less than number of ports then Sharing Last
* Message mode could be enforced. In this case assume that advantage
* of multipe MSIs is negated and use single MSI mode instead.
*/
- if (nvec < n_ports)
- goto single_msi;
+ if (n_ports > 1) {
+ nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
+ PCI_IRQ_MSIX | PCI_IRQ_MSI);
+ if (nvec > 0) {
+ if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
+ hpriv->get_irq_vector = ahci_get_irq_vector;
+ hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
+ return nvec;
+ }
- rc = pci_enable_msi_exact(pdev, nvec);
- if (rc == -ENOSPC)
- goto single_msi;
- if (rc < 0)
- return rc;
+ /*
+ * Fallback to single MSI mode if the controller
+ * enforced MRSM mode.
+ */
+ printk(KERN_INFO
+ "ahci: MRSM is on, fallback to single MSI\n");
+ pci_free_irq_vectors(pdev);
+ }
- /* fallback to single MSI mode if the controller enforced MRSM mode */
- if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) {
- pci_disable_msi(pdev);
- printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
- goto single_msi;
+ /*
+ * -ENOSPC indicated we don't have enough vectors. Don't bother
+ * trying a single vectors for any other error:
+ */
+ if (nvec < 0 && nvec != -ENOSPC)
+ return nvec;
}
- if (nvec > 1)
- hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
-
- goto out;
-
-single_msi:
- nvec = 1;
-
- rc = pci_enable_msi(pdev);
- if (rc < 0)
- return rc;
-out:
- hpriv->irq = pdev->irq;
-
- return nvec;
-}
-
-static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
- struct ahci_host_priv *hpriv)
-{
- int nvec;
-
/*
- * Try to enable per-port MSI-X. If the host is not capable
- * fall back to single MSI before finally attempting single
- * MSI-X.
+ * If the host is not capable of supporting per-port vectors, fall
+ * back to single MSI before finally attempting single MSI-X.
*/
- nvec = ahci_init_msix(pdev, n_ports, hpriv, AHCI_HFLAG_MULTI_MSIX);
- if (nvec >= 0)
- return nvec;
-
- nvec = ahci_init_msi(pdev, n_ports, hpriv);
- if (nvec >= 0)
- return nvec;
-
- /* try single-msix */
- nvec = ahci_init_msix(pdev, n_ports, hpriv, 0);
- if (nvec >= 0)
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (nvec == 1)
return nvec;
-
- /* legacy intx interrupts */
- pci_intx(pdev, 1);
- hpriv->irq = pdev->irq;
-
- return 0;
+ return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
}
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1698,11 +1615,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!host)
return -ENOMEM;
host->private_data = hpriv;
- hpriv->msix = devm_kzalloc(&pdev->dev,
- sizeof(struct msix_entry) * n_ports, GFP_KERNEL);
- if (!hpriv->msix)
- return -ENOMEM;
- ahci_init_interrupts(pdev, n_ports, hpriv);
+
+ if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
+ /* legacy intx interrupts */
+ pci_intx(pdev, 1);
+ }
+ hpriv->irq = pci_irq_vector(pdev, 0);
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
host->flags |= ATA_HOST_PARALLEL_SCAN;
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 70b06bcfb7e3..0cc08f892fea 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -242,12 +242,10 @@ enum {
AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
#ifdef CONFIG_PCI_MSI
- AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */
- AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */
+ AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */
#else
/* compile out MSI infrastructure */
AHCI_HFLAG_MULTI_MSI = 0,
- AHCI_HFLAG_MULTI_MSIX = 0,
#endif
AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
@@ -351,7 +349,6 @@ struct ahci_host_priv {
* the PHY position in this array.
*/
struct phy **phys;
- struct msix_entry *msix; /* Optional MSI-X support */
unsigned nports; /* Number of ports */
void *plat_data; /* Other platform data */
unsigned int irq; /* interrupt line */
@@ -362,22 +359,11 @@ struct ahci_host_priv {
*/
void (*start_engine)(struct ata_port *ap);
irqreturn_t (*irq_handler)(int irq, void *dev_instance);
-};
-#ifdef CONFIG_PCI_MSI
-static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port)
-{
- if (hpriv->flags & AHCI_HFLAG_MULTI_MSIX)
- return hpriv->msix[port].vector;
- else
- return hpriv->irq + port;
-}
-#else
-static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port)
-{
- return hpriv->irq;
-}
-#endif
+ /* only required for per-port MSI(-X) support */
+ int (*get_irq_vector)(struct ata_host *host,
+ int port);
+};
extern int ahci_ignore_sss;
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 7bdee9bd8786..1eba8dff875e 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -30,24 +30,23 @@
#define PORT_PHY3 0xB0
#define PORT_PHY4 0xB4
#define PORT_PHY5 0xB8
+#define PORT_AXICC 0xBC
#define PORT_TRANS 0xC8
/* port register default value */
#define AHCI_PORT_PHY_1_CFG 0xa003fffe
#define AHCI_PORT_TRANS_CFG 0x08000029
+#define AHCI_PORT_AXICC_CFG 0x3fffffff
/* for ls1021a */
#define LS1021A_PORT_PHY2 0x28183414
#define LS1021A_PORT_PHY3 0x0e080e06
#define LS1021A_PORT_PHY4 0x064a080b
#define LS1021A_PORT_PHY5 0x2aa86470
+#define LS1021A_AXICC_ADDR 0xC0
#define SATA_ECC_DISABLE 0x00020000
-/* for ls1043a */
-#define LS1043A_PORT_PHY2 0x28184d1f
-#define LS1043A_PORT_PHY3 0x0e081509
-
enum ahci_qoriq_type {
AHCI_LS1021A,
AHCI_LS1043A,
@@ -137,7 +136,7 @@ static struct ata_port_operations ahci_qoriq_ops = {
.hardreset = ahci_qoriq_hardreset,
};
-static struct ata_port_info ahci_qoriq_port_info = {
+static const struct ata_port_info ahci_qoriq_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
@@ -162,18 +161,19 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4);
writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + LS1021A_AXICC_ADDR);
break;
case AHCI_LS1043A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
- writel(LS1043A_PORT_PHY2, reg_base + PORT_PHY2);
- writel(LS1043A_PORT_PHY3, reg_base + PORT_PHY3);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
case AHCI_LS2080A:
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
+ writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
break;
}
@@ -221,12 +221,6 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
if (rc)
goto disable_resources;
- /* Workaround for ls2080a */
- if (qoriq_priv->type == AHCI_LS2080A) {
- hpriv->flags |= AHCI_HFLAG_NO_NCQ;
- ahci_qoriq_port_info.flags &= ~ATA_FLAG_NCQ;
- }
-
rc = ahci_platform_init_host(pdev, hpriv, &ahci_qoriq_port_info,
&ahci_qoriq_sht);
if (rc)
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c
index 8ff428fe8e0f..bc345f249555 100644
--- a/drivers/ata/ahci_st.c
+++ b/drivers/ata/ahci_st.c
@@ -147,6 +147,7 @@ static struct scsi_host_template ahci_platform_sht = {
static int st_ahci_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct st_ahci_drv_data *drv_data;
struct ahci_host_priv *hpriv;
int err;
@@ -170,6 +171,9 @@ static int st_ahci_probe(struct platform_device *pdev)
st_ahci_configure_oob(hpriv->mmio);
+ of_property_read_u32(dev->of_node,
+ "ports-implemented", &hpriv->force_port_map);
+
err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
&ahci_platform_sht);
if (err) {
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index dcf2c724fd06..0d028ead99e8 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2520,7 +2520,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host,
*/
for (i = 0; i < host->n_ports; i++) {
struct ahci_port_priv *pp = host->ports[i]->private_data;
- int irq = ahci_irq_vector(hpriv, i);
+ int irq = hpriv->get_irq_vector(host, i);
/* Do not receive interrupts sent by dummy ports */
if (!pp) {
@@ -2556,10 +2556,15 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
int irq = hpriv->irq;
int rc;
- if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
+ if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
if (hpriv->irq_handler)
dev_warn(host->dev,
"both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
+ if (!hpriv->get_irq_vector) {
+ dev_err(host->dev,
+ "AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
+ return -EIO;
+ }
rc = ahci_host_activate_multi_irqs(host, sht);
} else {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e207b33e4ce9..9cceb4a875a5 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1159,8 +1159,6 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
{
sdev->use_10_for_rw = 1;
sdev->use_10_for_ms = 1;
- sdev->no_report_opcodes = 1;
- sdev->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer() callback and
* it needs to see every deferred qc. Set dev_blocked to 1 to
@@ -3282,18 +3280,125 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
return 1;
}
+/**
+ * ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
+ * @cmd: SCSI command being translated
+ * @trmax: Maximum number of entries that will fit in sector_size bytes.
+ * @sector: Starting sector
+ * @count: Total Range of request in logical sectors
+ *
+ * Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
+ * descriptor.
+ *
+ * Upto 64 entries of the format:
+ * 63:48 Range Length
+ * 47:0 LBA
+ *
+ * Range Length of 0 is ignored.
+ * LBA's should be sorted order and not overlap.
+ *
+ * NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET
+ *
+ * Return: Number of bytes copied into sglist.
+ */
+static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax,
+ u64 sector, u32 count)
+{
+ struct scsi_device *sdp = cmd->device;
+ size_t len = sdp->sector_size;
+ size_t r;
+ __le64 *buf;
+ u32 i = 0;
+ unsigned long flags;
+
+ WARN_ON(len > ATA_SCSI_RBUF_SIZE);
+
+ if (len > ATA_SCSI_RBUF_SIZE)
+ len = ATA_SCSI_RBUF_SIZE;
+
+ spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+ buf = ((void *)ata_scsi_rbuf);
+ memset(buf, 0, len);
+ while (i < trmax) {
+ u64 entry = sector |
+ ((u64)(count > 0xffff ? 0xffff : count) << 48);
+ buf[i++] = __cpu_to_le64(entry);
+ if (count <= 0xffff)
+ break;
+ count -= 0xffff;
+ sector += 0xffff;
+ }
+ r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
+ spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
+
+ return r;
+}
+
+/**
+ * ata_format_dsm_trim_descr() - SATL Write Same to ATA SCT Write Same
+ * @cmd: SCSI command being translated
+ * @lba: Starting sector
+ * @num: Number of sectors to be zero'd.
+ *
+ * Rewrite the WRITE SAME payload to be an SCT Write Same formatted
+ * descriptor.
+ * NOTE: Writes a pattern (0's) in the foreground.
+ *
+ * Return: Number of bytes copied into sglist.
+ */
+static size_t ata_format_sct_write_same(struct scsi_cmnd *cmd, u64 lba, u64 num)
+{
+ struct scsi_device *sdp = cmd->device;
+ size_t len = sdp->sector_size;
+ size_t r;
+ u16 *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
+ buf = ((void *)ata_scsi_rbuf);
+
+ put_unaligned_le16(0x0002, &buf[0]); /* SCT_ACT_WRITE_SAME */
+ put_unaligned_le16(0x0101, &buf[1]); /* WRITE PTRN FG */
+ put_unaligned_le64(lba, &buf[2]);
+ put_unaligned_le64(num, &buf[6]);
+ put_unaligned_le32(0u, &buf[10]); /* pattern */
+
+ WARN_ON(len > ATA_SCSI_RBUF_SIZE);
+
+ if (len > ATA_SCSI_RBUF_SIZE)
+ len = ATA_SCSI_RBUF_SIZE;
+
+ r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
+ spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
+
+ return r;
+}
+
+/**
+ * ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
+ * @qc: Command to be translated
+ *
+ * Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
+ * an SCT Write Same command.
+ * Based on WRITE SAME has the UNMAP flag
+ * When set translate to DSM TRIM
+ * When clear translate to SCT Write Same
+ */
static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
struct scsi_cmnd *scmd = qc->scsicmd;
+ struct scsi_device *sdp = scmd->device;
+ size_t len = sdp->sector_size;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
u64 block;
u32 n_block;
+ const u32 trmax = len >> 3;
u32 size;
- void *buf;
u16 fp;
u8 bp = 0xff;
+ u8 unmap = cdb[1] & 0x8;
/* we may not issue DMA commands if no DMA mode is set */
if (unlikely(!dev->dma_mode))
@@ -3305,11 +3410,26 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
}
scsi_16_lba_len(cdb, &block, &n_block);
- /* for now we only support WRITE SAME with the unmap bit set */
- if (unlikely(!(cdb[1] & 0x8))) {
- fp = 1;
- bp = 3;
- goto invalid_fld;
+ if (unmap) {
+ /* If trim is not enabled the cmd is invalid. */
+ if ((dev->horkage & ATA_HORKAGE_NOTRIM) ||
+ !ata_id_has_trim(dev->id)) {
+ fp = 1;
+ bp = 3;
+ goto invalid_fld;
+ }
+ /* If the request is too large the cmd is invalid */
+ if (n_block > 0xffff * trmax) {
+ fp = 2;
+ goto invalid_fld;
+ }
+ } else {
+ /* If write same is not available the cmd is invalid */
+ if (!ata_id_sct_write_same(dev->id)) {
+ fp = 1;
+ bp = 3;
+ goto invalid_fld;
+ }
}
/*
@@ -3319,32 +3439,54 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
if (!scsi_sg_count(scmd))
goto invalid_param_len;
- buf = page_address(sg_page(scsi_sglist(scmd)));
-
- if (n_block <= 65535 * ATA_MAX_TRIM_RNUM) {
- size = ata_set_lba_range_entries(buf, ATA_MAX_TRIM_RNUM, block, n_block);
- } else {
- fp = 2;
- goto invalid_fld;
- }
+ /*
+ * size must match sector size in bytes
+ * For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count)
+ * is defined as number of 512 byte blocks to be transferred.
+ */
+ if (unmap) {
+ size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block);
+ if (size != len)
+ goto invalid_param_len;
- if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
- /* Newer devices support queued TRIM commands */
- tf->protocol = ATA_PROT_NCQ;
- tf->command = ATA_CMD_FPDMA_SEND;
- tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
- tf->nsect = qc->tag << 3;
- tf->hob_feature = (size / 512) >> 8;
- tf->feature = size / 512;
+ if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
+ /* Newer devices support queued TRIM commands */
+ tf->protocol = ATA_PROT_NCQ;
+ tf->command = ATA_CMD_FPDMA_SEND;
+ tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
+ tf->nsect = qc->tag << 3;
+ tf->hob_feature = (size / 512) >> 8;
+ tf->feature = size / 512;
- tf->auxiliary = 1;
+ tf->auxiliary = 1;
+ } else {
+ tf->protocol = ATA_PROT_DMA;
+ tf->hob_feature = 0;
+ tf->feature = ATA_DSM_TRIM;
+ tf->hob_nsect = (size / 512) >> 8;
+ tf->nsect = size / 512;
+ tf->command = ATA_CMD_DSM;
+ }
} else {
- tf->protocol = ATA_PROT_DMA;
+ size = ata_format_sct_write_same(scmd, block, n_block);
+ if (size != len)
+ goto invalid_param_len;
+
tf->hob_feature = 0;
- tf->feature = ATA_DSM_TRIM;
- tf->hob_nsect = (size / 512) >> 8;
- tf->nsect = size / 512;
- tf->command = ATA_CMD_DSM;
+ tf->feature = 0;
+ tf->hob_nsect = 0;
+ tf->nsect = 1;
+ tf->lbah = 0;
+ tf->lbam = 0;
+ tf->lbal = ATA_CMD_STANDBYNOW1;
+ tf->hob_lbah = 0;
+ tf->hob_lbam = 0;
+ tf->hob_lbal = 0;
+ tf->device = ATA_CMD_STANDBYNOW1;
+ tf->protocol = ATA_PROT_DMA;
+ tf->command = ATA_CMD_WRITE_LOG_DMA_EXT;
+ if (unlikely(dev->flags & ATA_DFLAG_PIO))
+ tf->command = ATA_CMD_WRITE_LOG_EXT;
}
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
@@ -3368,6 +3510,76 @@ invalid_opcode:
}
/**
+ * ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
+ * @args: device MAINTENANCE_IN data / SCSI command of interest.
+ * @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
+ *
+ * Yields a subset to satisfy scsi_report_opcode()
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
+{
+ struct ata_device *dev = args->dev;
+ u8 *cdb = args->cmd->cmnd;
+ u8 supported = 0;
+ unsigned int err = 0;
+
+ if (cdb[2] != 1) {
+ ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
+ err = 2;
+ goto out;
+ }
+ switch (cdb[3]) {
+ case INQUIRY:
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ case READ_CAPACITY:
+ case SERVICE_ACTION_IN_16:
+ case REPORT_LUNS:
+ case REQUEST_SENSE:
+ case SYNCHRONIZE_CACHE:
+ case REZERO_UNIT:
+ case SEEK_6:
+ case SEEK_10:
+ case TEST_UNIT_READY:
+ case SEND_DIAGNOSTIC:
+ case MAINTENANCE_IN:
+ case READ_6:
+ case READ_10:
+ case READ_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_16:
+ case ATA_12:
+ case ATA_16:
+ case VERIFY:
+ case VERIFY_16:
+ case MODE_SELECT:
+ case MODE_SELECT_10:
+ case START_STOP:
+ supported = 3;
+ break;
+ case WRITE_SAME_16:
+ if (!ata_id_sct_write_same(dev->id))
+ break;
+ /* fallthrough: if SCT ... only enable for ZBC */
+ case ZBC_IN:
+ case ZBC_OUT:
+ if (ata_id_zoned_cap(dev->id) ||
+ dev->class == ATA_DEV_ZAC)
+ supported = 3;
+ break;
+ default:
+ break;
+ }
+out:
+ rbuf[1] = supported; /* supported */
+ return err;
+}
+
+/**
* ata_scsi_report_zones_complete - convert ATA output
* @qc: command structure returning the data
*
@@ -3610,7 +3822,7 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc,
{
struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev;
- char mpage[CACHE_MPAGE_LEN];
+ u8 mpage[CACHE_MPAGE_LEN];
u8 wce;
int i;
@@ -3666,7 +3878,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
const u8 *buf, int len, u16 *fp)
{
struct ata_device *dev = qc->dev;
- char mpage[CONTROL_MPAGE_LEN];
+ u8 mpage[CONTROL_MPAGE_LEN];
u8 d_sense;
int i;
@@ -3701,8 +3913,6 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
dev->flags |= ATA_DFLAG_D_SENSE;
else
dev->flags &= ~ATA_DFLAG_D_SENSE;
- qc->scsicmd->result = SAM_STAT_GOOD;
- qc->scsicmd->scsi_done(qc->scsicmd);
return 0;
}
@@ -3829,6 +4039,8 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
if (ata_mselect_control(qc, p, pg_len, &fp) < 0) {
fp += hdr_len + bd_len;
goto invalid_param;
+ } else {
+ goto skip; /* No ATA command to send */
}
break;
default: /* invalid page code */
@@ -4147,6 +4359,13 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
ata_scsi_invalid_field(dev, cmd, 1);
break;
+ case MAINTENANCE_IN:
+ if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
+ ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
+ else
+ ata_scsi_invalid_field(dev, cmd, 1);
+ break;
+
/* all other commands */
default:
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
@@ -4179,7 +4398,6 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
shost->max_lun = 1;
shost->max_channel = 1;
shost->max_cmd_len = 16;
- shost->no_write_same = 1;
/* Schedule policy is determined by ->qc_defer()
* callback and it needs to see every deferred qc.
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 9f27b14009f9..1611e0e8d767 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -347,10 +347,8 @@ static int at91sam9_smc_fields_init(struct device *dev)
field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC);
fields.mode = devm_regmap_field_alloc(dev, smc, field);
- if (IS_ERR(fields.mode))
- return PTR_ERR(fields.mode);
- return 0;
+ return PTR_ERR_OR_ZERO(fields.mode);
}
static int pata_at91_probe(struct platform_device *pdev)
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 27245957eee3..475a00669427 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -152,8 +152,7 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
div = 8;
T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
- if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
- BUG();
+ BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T));
t1 = timing.setup;
if (t1)
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 745489a1c86a..efc48bf89d51 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1727,15 +1727,13 @@ static int mv_port_start(struct ata_port *ap)
return -ENOMEM;
ap->private_data = pp;
- pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
+ pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
if (!pp->crqb)
return -ENOMEM;
- memset(pp->crqb, 0, MV_CRQB_Q_SZ);
- pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
+ pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
if (!pp->crpb)
goto out_port_free_dma_mem;
- memset(pp->crpb, 0, MV_CRPB_Q_SZ);
/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index c07e725ea93d..10e1b9eee10e 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -119,4 +119,13 @@ config CFAG12864B_RATE
If you compile this as a module, you can still override this
value using the module parameters.
+config IMG_ASCII_LCD
+ tristate "Imagination Technologies ASCII LCD Display"
+ default y if MIPS_MALTA || MIPS_SEAD3
+ select SYSCON
+ help
+ Enable this to support the simple ASCII LCD displays found on
+ development boards such as the MIPS Boston, MIPS Malta & MIPS SEAD3
+ from Imagination Technologies.
+
endif # AUXDISPLAY
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 8a8936a468b9..3127175c89df 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -4,3 +4,4 @@
obj-$(CONFIG_KS0108) += ks0108.o
obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
+obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
new file mode 100644
index 000000000000..bf43b5d2aafc
--- /dev/null
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <generated/utsrelease.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+struct img_ascii_lcd_ctx;
+
+/**
+ * struct img_ascii_lcd_config - Configuration information about an LCD model
+ * @num_chars: the number of characters the LCD can display
+ * @external_regmap: true if registers are in a system controller, else false
+ * @update: function called to update the LCD
+ */
+struct img_ascii_lcd_config {
+ unsigned int num_chars;
+ bool external_regmap;
+ void (*update)(struct img_ascii_lcd_ctx *ctx);
+};
+
+/**
+ * struct img_ascii_lcd_ctx - Private data structure
+ * @pdev: the ASCII LCD platform device
+ * @base: the base address of the LCD registers
+ * @regmap: the regmap through which LCD registers are accessed
+ * @offset: the offset within regmap to the start of the LCD registers
+ * @cfg: pointer to the LCD model configuration
+ * @message: the full message to display or scroll on the LCD
+ * @message_len: the length of the @message string
+ * @scroll_pos: index of the first character of @message currently displayed
+ * @scroll_rate: scroll interval in jiffies
+ * @timer: timer used to implement scrolling
+ * @curr: the string currently displayed on the LCD
+ */
+struct img_ascii_lcd_ctx {
+ struct platform_device *pdev;
+ union {
+ void __iomem *base;
+ struct regmap *regmap;
+ };
+ u32 offset;
+ const struct img_ascii_lcd_config *cfg;
+ char *message;
+ unsigned int message_len;
+ unsigned int scroll_pos;
+ unsigned int scroll_rate;
+ struct timer_list timer;
+ char curr[] __aligned(8);
+};
+
+/*
+ * MIPS Boston development board
+ */
+
+static void boston_update(struct img_ascii_lcd_ctx *ctx)
+{
+ ulong val;
+
+#if BITS_PER_LONG == 64
+ val = *((u64 *)&ctx->curr[0]);
+ __raw_writeq(val, ctx->base);
+#elif BITS_PER_LONG == 32
+ val = *((u32 *)&ctx->curr[0]);
+ __raw_writel(val, ctx->base);
+ val = *((u32 *)&ctx->curr[4]);
+ __raw_writel(val, ctx->base + 4);
+#else
+# error Not 32 or 64 bit
+#endif
+}
+
+static struct img_ascii_lcd_config boston_config = {
+ .num_chars = 8,
+ .update = boston_update,
+};
+
+/*
+ * MIPS Malta development board
+ */
+
+static void malta_update(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ctx->cfg->num_chars; i++) {
+ err = regmap_write(ctx->regmap,
+ ctx->offset + (i * 8), ctx->curr[i]);
+ if (err)
+ break;
+ }
+
+ if (unlikely(err))
+ pr_err_ratelimited("Failed to update LCD display: %d\n", err);
+}
+
+static struct img_ascii_lcd_config malta_config = {
+ .num_chars = 8,
+ .external_regmap = true,
+ .update = malta_update,
+};
+
+/*
+ * MIPS SEAD3 development board
+ */
+
+enum {
+ SEAD3_REG_LCD_CTRL = 0x00,
+#define SEAD3_REG_LCD_CTRL_SETDRAM BIT(7)
+ SEAD3_REG_LCD_DATA = 0x08,
+ SEAD3_REG_CPLD_STATUS = 0x10,
+#define SEAD3_REG_CPLD_STATUS_BUSY BIT(0)
+ SEAD3_REG_CPLD_DATA = 0x18,
+#define SEAD3_REG_CPLD_DATA_BUSY BIT(7)
+};
+
+static int sead3_wait_sm_idle(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int status;
+ int err;
+
+ do {
+ err = regmap_read(ctx->regmap,
+ ctx->offset + SEAD3_REG_CPLD_STATUS,
+ &status);
+ if (err)
+ return err;
+ } while (status & SEAD3_REG_CPLD_STATUS_BUSY);
+
+ return 0;
+
+}
+
+static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int cpld_data;
+ int err;
+
+ err = sead3_wait_sm_idle(ctx);
+ if (err)
+ return err;
+
+ do {
+ err = regmap_read(ctx->regmap,
+ ctx->offset + SEAD3_REG_LCD_CTRL,
+ &cpld_data);
+ if (err)
+ return err;
+
+ err = sead3_wait_sm_idle(ctx);
+ if (err)
+ return err;
+
+ err = regmap_read(ctx->regmap,
+ ctx->offset + SEAD3_REG_CPLD_DATA,
+ &cpld_data);
+ if (err)
+ return err;
+ } while (cpld_data & SEAD3_REG_CPLD_DATA_BUSY);
+
+ return 0;
+}
+
+static void sead3_update(struct img_ascii_lcd_ctx *ctx)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < ctx->cfg->num_chars; i++) {
+ err = sead3_wait_lcd_idle(ctx);
+ if (err)
+ break;
+
+ err = regmap_write(ctx->regmap,
+ ctx->offset + SEAD3_REG_LCD_CTRL,
+ SEAD3_REG_LCD_CTRL_SETDRAM | i);
+ if (err)
+ break;
+
+ err = sead3_wait_lcd_idle(ctx);
+ if (err)
+ break;
+
+ err = regmap_write(ctx->regmap,
+ ctx->offset + SEAD3_REG_LCD_DATA,
+ ctx->curr[i]);
+ if (err)
+ break;
+ }
+
+ if (unlikely(err))
+ pr_err_ratelimited("Failed to update LCD display: %d\n", err);
+}
+
+static struct img_ascii_lcd_config sead3_config = {
+ .num_chars = 16,
+ .external_regmap = true,
+ .update = sead3_update,
+};
+
+static const struct of_device_id img_ascii_lcd_matches[] = {
+ { .compatible = "img,boston-lcd", .data = &boston_config },
+ { .compatible = "mti,malta-lcd", .data = &malta_config },
+ { .compatible = "mti,sead3-lcd", .data = &sead3_config },
+};
+
+/**
+ * img_ascii_lcd_scroll() - scroll the display by a character
+ * @arg: really a pointer to the private data structure
+ *
+ * Scroll the current message along the LCD by one character, rearming the
+ * timer if required.
+ */
+static void img_ascii_lcd_scroll(unsigned long arg)
+{
+ struct img_ascii_lcd_ctx *ctx = (struct img_ascii_lcd_ctx *)arg;
+ unsigned int i, ch = ctx->scroll_pos;
+ unsigned int num_chars = ctx->cfg->num_chars;
+
+ /* update the current message string */
+ for (i = 0; i < num_chars;) {
+ /* copy as many characters from the string as possible */
+ for (; i < num_chars && ch < ctx->message_len; i++, ch++)
+ ctx->curr[i] = ctx->message[ch];
+
+ /* wrap around to the start of the string */
+ ch = 0;
+ }
+
+ /* update the LCD */
+ ctx->cfg->update(ctx);
+
+ /* move on to the next character */
+ ctx->scroll_pos++;
+ ctx->scroll_pos %= ctx->message_len;
+
+ /* rearm the timer */
+ if (ctx->message_len > ctx->cfg->num_chars)
+ mod_timer(&ctx->timer, jiffies + ctx->scroll_rate);
+}
+
+/**
+ * img_ascii_lcd_display() - set the message to be displayed
+ * @ctx: pointer to the private data structure
+ * @msg: the message to display
+ * @count: length of msg, or -1
+ *
+ * Display a new message @msg on the LCD. @msg can be longer than the number of
+ * characters the LCD can display, in which case it will begin scrolling across
+ * the LCD display.
+ *
+ * Return: 0 on success, -ENOMEM on memory allocation failure
+ */
+static int img_ascii_lcd_display(struct img_ascii_lcd_ctx *ctx,
+ const char *msg, ssize_t count)
+{
+ char *new_msg;
+
+ /* stop the scroll timer */
+ del_timer_sync(&ctx->timer);
+
+ if (count == -1)
+ count = strlen(msg);
+
+ /* if the string ends with a newline, trim it */
+ if (msg[count - 1] == '\n')
+ count--;
+
+ new_msg = devm_kmalloc(&ctx->pdev->dev, count + 1, GFP_KERNEL);
+ if (!new_msg)
+ return -ENOMEM;
+
+ memcpy(new_msg, msg, count);
+ new_msg[count] = 0;
+
+ if (ctx->message)
+ devm_kfree(&ctx->pdev->dev, ctx->message);
+
+ ctx->message = new_msg;
+ ctx->message_len = count;
+ ctx->scroll_pos = 0;
+
+ /* update the LCD */
+ img_ascii_lcd_scroll((unsigned long)ctx);
+
+ return 0;
+}
+
+/**
+ * message_show() - read message via sysfs
+ * @dev: the LCD device
+ * @attr: the LCD message attribute
+ * @buf: the buffer to read the message into
+ *
+ * Read the current message being displayed or scrolled across the LCD display
+ * into @buf, for reads from sysfs.
+ *
+ * Return: the number of characters written to @buf
+ */
+static ssize_t message_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", ctx->message);
+}
+
+/**
+ * message_store() - write a new message via sysfs
+ * @dev: the LCD device
+ * @attr: the LCD message attribute
+ * @buf: the buffer containing the new message
+ * @count: the size of the message in @buf
+ *
+ * Write a new message to display or scroll across the LCD display from sysfs.
+ *
+ * Return: the size of the message on success, else -ERRNO
+ */
+static ssize_t message_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct img_ascii_lcd_ctx *ctx = dev_get_drvdata(dev);
+ int err;
+
+ err = img_ascii_lcd_display(ctx, buf, count);
+ return err ?: count;
+}
+
+static DEVICE_ATTR_RW(message);
+
+/**
+ * img_ascii_lcd_probe() - probe an LCD display device
+ * @pdev: the LCD platform device
+ *
+ * Probe an LCD display device, ensuring that we have the required resources in
+ * order to access the LCD & setting up private data as well as sysfs files.
+ *
+ * Return: 0 on success, else -ERRNO
+ */
+static int img_ascii_lcd_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const struct img_ascii_lcd_config *cfg;
+ struct img_ascii_lcd_ctx *ctx;
+ struct resource *res;
+ int err;
+
+ match = of_match_device(img_ascii_lcd_matches, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ cfg = match->data;
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx) + cfg->num_chars,
+ GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (cfg->external_regmap) {
+ ctx->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(ctx->regmap))
+ return PTR_ERR(ctx->regmap);
+
+ if (of_property_read_u32(pdev->dev.of_node, "offset",
+ &ctx->offset))
+ return -EINVAL;
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->base))
+ return PTR_ERR(ctx->base);
+ }
+
+ ctx->pdev = pdev;
+ ctx->cfg = cfg;
+ ctx->message = NULL;
+ ctx->scroll_pos = 0;
+ ctx->scroll_rate = HZ / 2;
+
+ /* initialise a timer for scrolling the message */
+ init_timer(&ctx->timer);
+ ctx->timer.function = img_ascii_lcd_scroll;
+ ctx->timer.data = (unsigned long)ctx;
+
+ platform_set_drvdata(pdev, ctx);
+
+ /* display a default message */
+ err = img_ascii_lcd_display(ctx, "Linux " UTS_RELEASE " ", -1);
+ if (err)
+ goto out_del_timer;
+
+ err = device_create_file(&pdev->dev, &dev_attr_message);
+ if (err)
+ goto out_del_timer;
+
+ return 0;
+out_del_timer:
+ del_timer_sync(&ctx->timer);
+ return err;
+}
+
+/**
+ * img_ascii_lcd_remove() - remove an LCD display device
+ * @pdev: the LCD platform device
+ *
+ * Remove an LCD display device, freeing private resources & ensuring that the
+ * driver stops using the LCD display registers.
+ *
+ * Return: 0
+ */
+static int img_ascii_lcd_remove(struct platform_device *pdev)
+{
+ struct img_ascii_lcd_ctx *ctx = platform_get_drvdata(pdev);
+
+ device_remove_file(&pdev->dev, &dev_attr_message);
+ del_timer_sync(&ctx->timer);
+ return 0;
+}
+
+static struct platform_driver img_ascii_lcd_driver = {
+ .driver = {
+ .name = "img-ascii-lcd",
+ .of_match_table = img_ascii_lcd_matches,
+ },
+ .probe = img_ascii_lcd_probe,
+ .remove = img_ascii_lcd_remove,
+};
+module_platform_driver(img_ascii_lcd_driver);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fdf44cac08e6..d02e7c0f5bfd 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -213,14 +213,16 @@ config DEBUG_DEVRES
If you are unsure about this, Say N here.
config DEBUG_TEST_DRIVER_REMOVE
- bool "Test driver remove calls during probe"
+ bool "Test driver remove calls during probe (UNSTABLE)"
depends on DEBUG_KERNEL
help
Say Y here if you want the Driver core to test driver remove functions
by calling probe, remove, probe. This tests the remove path without
having to unbind the driver or unload the driver module.
- If you are unsure about this, say N here.
+ This option is expected to find errors and may render your system
+ unusable. You should say N here unless you are explicitly looking to
+ test this functionality.
config SYS_HYPERVISOR
bool
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 811e11c82f32..0809cda93cc0 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2954,7 +2954,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_PD_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)) {
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
@@ -2990,7 +2990,7 @@ DAC960_DetectController(struct pci_dev *PCI_Device,
case DAC960_P_Controller:
if (!request_region(Controller->IO_Address, 0x80,
Controller->FullModelName)){
- DAC960_Error("IO port 0x%d busy for Controller at\n",
+ DAC960_Error("IO port 0x%lx busy for Controller at\n",
Controller, Controller->IO_Address);
goto Failure;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index ba405b55329f..19a16b2dbb91 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -164,7 +164,7 @@ static void sock_shutdown(struct nbd_device *nbd)
spin_lock(&nbd->sock_lock);
if (!nbd->sock) {
- spin_unlock_irq(&nbd->sock_lock);
+ spin_unlock(&nbd->sock_lock);
return;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index abb71628ab61..7b274ff4632c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -415,15 +415,15 @@ struct rbd_device {
};
/*
- * Flag bits for rbd_dev->flags. If atomicity is required,
- * rbd_dev->lock is used to protect access.
- *
- * Currently, only the "removing" flag (which is coupled with the
- * "open_count" field) requires atomic access.
+ * Flag bits for rbd_dev->flags:
+ * - REMOVING (which is coupled with rbd_dev->open_count) is protected
+ * by rbd_dev->lock
+ * - BLACKLISTED is protected by rbd_dev->lock_rwsem
*/
enum rbd_dev_flags {
RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
+ RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
};
static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
@@ -3926,6 +3926,7 @@ static void rbd_reregister_watch(struct work_struct *work)
struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
struct rbd_device, watch_dwork);
bool was_lock_owner = false;
+ bool need_to_wake = false;
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
@@ -3935,19 +3936,27 @@ static void rbd_reregister_watch(struct work_struct *work)
was_lock_owner = rbd_release_lock(rbd_dev);
mutex_lock(&rbd_dev->watch_mutex);
- if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR)
- goto fail_unlock;
+ if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
+ mutex_unlock(&rbd_dev->watch_mutex);
+ goto out;
+ }
ret = __rbd_register_watch(rbd_dev);
if (ret) {
rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
- if (ret != -EBLACKLISTED)
+ if (ret == -EBLACKLISTED || ret == -ENOENT) {
+ set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
+ need_to_wake = true;
+ } else {
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->watch_dwork,
RBD_RETRY_DELAY);
- goto fail_unlock;
+ }
+ mutex_unlock(&rbd_dev->watch_mutex);
+ goto out;
}
+ need_to_wake = true;
rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
mutex_unlock(&rbd_dev->watch_mutex);
@@ -3963,13 +3972,10 @@ static void rbd_reregister_watch(struct work_struct *work)
ret);
}
+out:
up_write(&rbd_dev->lock_rwsem);
- wake_requests(rbd_dev, true);
- return;
-
-fail_unlock:
- mutex_unlock(&rbd_dev->watch_mutex);
- up_write(&rbd_dev->lock_rwsem);
+ if (need_to_wake)
+ wake_requests(rbd_dev, true);
}
/*
@@ -4074,7 +4080,9 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
up_read(&rbd_dev->lock_rwsem);
schedule();
down_read(&rbd_dev->lock_rwsem);
- } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
+ } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+
finish_wait(&rbd_dev->lock_waitq, &wait);
}
@@ -4166,8 +4174,16 @@ static void rbd_queue_workfn(struct work_struct *work)
if (must_be_locked) {
down_read(&rbd_dev->lock_rwsem);
- if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
+ if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
rbd_wait_state_locked(rbd_dev);
+
+ WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+ if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+ result = -EBLACKLISTED;
+ goto err_unlock;
+ }
}
img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index ef51c9c864c5..b6bb58c41df5 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -310,7 +310,7 @@ static int bt_ti_probe(struct platform_device *pdev)
BT_DBG("HCI device registered (hdev %p)", hdev);
dev_set_drvdata(&pdev->dev, hst);
- return err;
+ return 0;
}
static int bt_ti_remove(struct platform_device *pdev)
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 5ccb90ef0146..8f6c23c20c52 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -643,6 +643,14 @@ static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = {
},
.driver_data = &acpi_active_low,
},
+ { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
+ .ident = "Lenovo ThinkPad 8",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ },
+ .driver_data = &acpi_active_low,
+ },
{ }
};
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 7010dcac9328..78751057164a 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -111,6 +111,7 @@ config OMAP_OCP2SCP
config QCOM_EBI2
bool "Qualcomm External Bus Interface 2 (EBI2)"
depends on HAS_IOMEM
+ depends on ARCH_QCOM || COMPILE_TEST
help
Say y here to enable support for the Qualcomm External Bus
Interface 2, which can be used to connect things like NAND Flash,
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 482794526e8c..d2d2c89de5b4 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -84,14 +84,14 @@ static size_t rng_buffer_size(void)
static void add_early_randomness(struct hwrng *rng)
{
- unsigned char bytes[16];
int bytes_read;
+ size_t size = min_t(size_t, 16, rng_buffer_size());
mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ bytes_read = rng_get_data(rng, rng_buffer, size, 1);
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
- add_device_randomness(bytes, bytes_read);
+ add_device_randomness(rng_buffer, bytes_read);
}
static inline void cleanup_rng(struct kref *kref)
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 5a9350b1069a..7f816655cbbf 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -76,3 +76,11 @@ config IPMI_POWEROFF
the IPMI management controller is capable of this.
endif # IPMI_HANDLER
+
+config ASPEED_BT_IPMI_BMC
+ depends on ARCH_ASPEED
+ tristate "BT IPMI bmc driver"
+ help
+ Provides a driver for the BT (Block Transfer) IPMI interface
+ found on Aspeed SOCs (AST2400 and AST2500). The driver
+ implements the BMC side of the BT interface.
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index f3ffde1f5f1f..0d98cd91def1 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
+obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
new file mode 100644
index 000000000000..b49e61320952
--- /dev/null
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bt-bmc.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+/*
+ * This is a BMC device used to communicate to the host
+ */
+#define DEVICE_NAME "ipmi-bt-host"
+
+#define BT_IO_BASE 0xe4
+#define BT_IRQ 10
+
+#define BT_CR0 0x0
+#define BT_CR0_IO_BASE 16
+#define BT_CR0_IRQ 12
+#define BT_CR0_EN_CLR_SLV_RDP 0x8
+#define BT_CR0_EN_CLR_SLV_WRP 0x4
+#define BT_CR0_ENABLE_IBT 0x1
+#define BT_CR1 0x4
+#define BT_CR1_IRQ_H2B 0x01
+#define BT_CR1_IRQ_HBUSY 0x40
+#define BT_CR2 0x8
+#define BT_CR2_IRQ_H2B 0x01
+#define BT_CR2_IRQ_HBUSY 0x40
+#define BT_CR3 0xc
+#define BT_CTRL 0x10
+#define BT_CTRL_B_BUSY 0x80
+#define BT_CTRL_H_BUSY 0x40
+#define BT_CTRL_OEM0 0x20
+#define BT_CTRL_SMS_ATN 0x10
+#define BT_CTRL_B2H_ATN 0x08
+#define BT_CTRL_H2B_ATN 0x04
+#define BT_CTRL_CLR_RD_PTR 0x02
+#define BT_CTRL_CLR_WR_PTR 0x01
+#define BT_BMC2HOST 0x14
+#define BT_INTMASK 0x18
+#define BT_INTMASK_B2H_IRQEN 0x01
+#define BT_INTMASK_B2H_IRQ 0x02
+#define BT_INTMASK_BMC_HWRST 0x80
+
+#define BT_BMC_BUFFER_SIZE 256
+
+struct bt_bmc {
+ struct device dev;
+ struct miscdevice miscdev;
+ void __iomem *base;
+ int irq;
+ wait_queue_head_t queue;
+ struct timer_list poll_timer;
+ struct mutex mutex;
+};
+
+static atomic_t open_count = ATOMIC_INIT(0);
+
+static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
+{
+ return ioread8(bt_bmc->base + reg);
+}
+
+static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
+{
+ iowrite8(data, bt_bmc->base + reg);
+}
+
+static void clr_rd_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL);
+}
+
+static void clr_wr_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL);
+}
+
+static void clr_h2b_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL);
+}
+
+static void set_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY))
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void clr_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void set_b2h_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL);
+}
+
+static u8 bt_read(struct bt_bmc *bt_bmc)
+{
+ return bt_inb(bt_bmc, BT_BMC2HOST);
+}
+
+static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ buf[i] = bt_read(bt_bmc);
+ return n;
+}
+
+static void bt_write(struct bt_bmc *bt_bmc, u8 c)
+{
+ bt_outb(bt_bmc, c, BT_BMC2HOST);
+}
+
+static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ bt_write(bt_bmc, buf[i]);
+ return n;
+}
+
+static void set_sms_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL);
+}
+
+static struct bt_bmc *file_bt_bmc(struct file *file)
+{
+ return container_of(file->private_data, struct bt_bmc, miscdev);
+}
+
+static int bt_bmc_open(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ if (atomic_inc_return(&open_count) == 1) {
+ clr_b_busy(bt_bmc);
+ return 0;
+ }
+
+ atomic_dec(&open_count);
+ return -EBUSY;
+}
+
+/*
+ * The BT (Block Transfer) interface means that entire messages are
+ * buffered by the host before a notification is sent to the BMC that
+ * there is data to be read. The first byte is the length and the
+ * message data follows. The read operation just tries to capture the
+ * whole before returning it to userspace.
+ *
+ * BT Message format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5:N
+ * Length NetFn/LUN Seq Cmd Data
+ *
+ */
+static ssize_t bt_bmc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 len;
+ int len_byte = 1;
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nread;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ WARN_ON(*ppos);
+
+ if (wait_event_interruptible(bt_bmc->queue,
+ bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ set_b_busy(bt_bmc);
+ clr_h2b_atn(bt_bmc);
+ clr_rd_ptr(bt_bmc);
+
+ /*
+ * The BT frames start with the message length, which does not
+ * include the length byte.
+ */
+ kbuffer[0] = bt_read(bt_bmc);
+ len = kbuffer[0];
+
+ /* We pass the length back to userspace as well */
+ if (len + 1 > count)
+ len = count - 1;
+
+ while (len) {
+ nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte);
+
+ bt_readn(bt_bmc, kbuffer + len_byte, nread);
+
+ if (copy_to_user(buf, kbuffer, nread + len_byte)) {
+ ret = -EFAULT;
+ break;
+ }
+ len -= nread;
+ buf += nread + len_byte;
+ ret += nread + len_byte;
+ len_byte = 0;
+ }
+
+ clr_b_busy(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+/*
+ * BT Message response format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6:N
+ * Length NetFn/LUN Seq Cmd Code Data
+ */
+static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nwritten;
+
+ /*
+ * send a minimum response size
+ */
+ if (count < 5)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ WARN_ON(*ppos);
+
+ /*
+ * There's no interrupt for clearing bmc busy so we have to
+ * poll
+ */
+ if (wait_event_interruptible(bt_bmc->queue,
+ !(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ clr_wr_ptr(bt_bmc);
+
+ while (count) {
+ nwritten = min_t(ssize_t, count, sizeof(kbuffer));
+ if (copy_from_user(&kbuffer, buf, nwritten)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ bt_writen(bt_bmc, kbuffer, nwritten);
+
+ count -= nwritten;
+ buf += nwritten;
+ ret += nwritten;
+ }
+
+ set_b2h_atn(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+static long bt_bmc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ switch (cmd) {
+ case BT_BMC_IOCTL_SMS_ATN:
+ set_sms_atn(bt_bmc);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int bt_bmc_release(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ atomic_dec(&open_count);
+ set_b_busy(bt_bmc);
+ return 0;
+}
+
+static unsigned int bt_bmc_poll(struct file *file, poll_table *wait)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ unsigned int mask = 0;
+ u8 ctrl;
+
+ poll_wait(file, &bt_bmc->queue, wait);
+
+ ctrl = bt_inb(bt_bmc, BT_CTRL);
+
+ if (ctrl & BT_CTRL_H2B_ATN)
+ mask |= POLLIN;
+
+ if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))
+ mask |= POLLOUT;
+
+ return mask;
+}
+
+static const struct file_operations bt_bmc_fops = {
+ .owner = THIS_MODULE,
+ .open = bt_bmc_open,
+ .read = bt_bmc_read,
+ .write = bt_bmc_write,
+ .release = bt_bmc_release,
+ .poll = bt_bmc_poll,
+ .unlocked_ioctl = bt_bmc_ioctl,
+};
+
+static void poll_timer(unsigned long data)
+{
+ struct bt_bmc *bt_bmc = (void *)data;
+
+ bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
+ wake_up(&bt_bmc->queue);
+ add_timer(&bt_bmc->poll_timer);
+}
+
+static irqreturn_t bt_bmc_irq(int irq, void *arg)
+{
+ struct bt_bmc *bt_bmc = arg;
+ u32 reg;
+
+ reg = ioread32(bt_bmc->base + BT_CR2);
+ reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
+ if (!reg)
+ return IRQ_NONE;
+
+ /* ack pending IRQs */
+ iowrite32(reg, bt_bmc->base + BT_CR2);
+
+ wake_up(&bt_bmc->queue);
+ return IRQ_HANDLED;
+}
+
+static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ u32 reg;
+ int rc;
+
+ bt_bmc->irq = platform_get_irq(pdev, 0);
+ if (!bt_bmc->irq)
+ return -ENODEV;
+
+ rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
+ DEVICE_NAME, bt_bmc);
+ if (rc < 0) {
+ dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
+ bt_bmc->irq = 0;
+ return rc;
+ }
+
+ /*
+ * Configure IRQs on the bmc clearing the H2B and HBUSY bits;
+ * H2B will be asserted when the bmc has data for us; HBUSY
+ * will be cleared (along with B2H) when we can write the next
+ * message to the BT buffer
+ */
+ reg = ioread32(bt_bmc->base + BT_CR1);
+ reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY;
+ iowrite32(reg, bt_bmc->base + BT_CR1);
+
+ return 0;
+}
+
+static int bt_bmc_probe(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc;
+ struct device *dev;
+ struct resource *res;
+ int rc;
+
+ if (!pdev || !pdev->dev.of_node)
+ return -ENODEV;
+
+ dev = &pdev->dev;
+ dev_info(dev, "Found bt bmc device\n");
+
+ bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL);
+ if (!bt_bmc)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, bt_bmc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bt_bmc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(bt_bmc->base))
+ return PTR_ERR(bt_bmc->base);
+
+ mutex_init(&bt_bmc->mutex);
+ init_waitqueue_head(&bt_bmc->queue);
+
+ bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR,
+ bt_bmc->miscdev.name = DEVICE_NAME,
+ bt_bmc->miscdev.fops = &bt_bmc_fops,
+ bt_bmc->miscdev.parent = dev;
+ rc = misc_register(&bt_bmc->miscdev);
+ if (rc) {
+ dev_err(dev, "Unable to register misc device\n");
+ return rc;
+ }
+
+ bt_bmc_config_irq(bt_bmc, pdev);
+
+ if (bt_bmc->irq) {
+ dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
+ } else {
+ dev_info(dev, "No IRQ; using timer\n");
+ setup_timer(&bt_bmc->poll_timer, poll_timer,
+ (unsigned long)bt_bmc);
+ bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
+ add_timer(&bt_bmc->poll_timer);
+ }
+
+ iowrite32((BT_IO_BASE << BT_CR0_IO_BASE) |
+ (BT_IRQ << BT_CR0_IRQ) |
+ BT_CR0_EN_CLR_SLV_RDP |
+ BT_CR0_EN_CLR_SLV_WRP |
+ BT_CR0_ENABLE_IBT,
+ bt_bmc->base + BT_CR0);
+
+ clr_b_busy(bt_bmc);
+
+ return 0;
+}
+
+static int bt_bmc_remove(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&bt_bmc->miscdev);
+ if (!bt_bmc->irq)
+ del_timer_sync(&bt_bmc->poll_timer);
+ return 0;
+}
+
+static const struct of_device_id bt_bmc_match[] = {
+ { .compatible = "aspeed,ast2400-bt-bmc" },
+ { },
+};
+
+static struct platform_driver bt_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = bt_bmc_match,
+ },
+ .probe = bt_bmc_probe,
+ .remove = bt_bmc_remove,
+};
+
+module_platform_driver(bt_bmc_driver);
+
+MODULE_DEVICE_TABLE(of, bt_bmc_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
+MODULE_DESCRIPTION("Linux device interface to the BT interface");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index d8619998cfb5..fcdd886819f5 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2891,11 +2891,11 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
intf->curr_channel = IPMI_MAX_CHANNELS;
}
+ rv = ipmi_bmc_register(intf, i);
+
if (rv == 0)
rv = add_proc_entries(intf, i);
- rv = ipmi_bmc_register(intf, i);
-
out:
if (rv) {
if (intf->proc_dir)
@@ -2982,8 +2982,6 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
int intf_num = intf->intf_num;
ipmi_user_t user;
- ipmi_bmc_unregister(intf);
-
mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
intf->intf_num = -1;
@@ -3007,6 +3005,7 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
mutex_unlock(&ipmi_interfaces_mutex);
remove_proc_entries(intf);
+ ipmi_bmc_unregister(intf);
/*
* Call all the watcher interfaces to tell them that
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d131e152c8ce..d6876d506220 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -479,8 +479,8 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
static void push_to_pool(struct work_struct *work);
-static __u32 input_pool_data[INPUT_POOL_WORDS];
-static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
+static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 190122e64a3a..85a449cf61e3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -203,7 +203,7 @@ at91_clk_register_programmable(struct regmap *regmap,
ret = clk_hw_register(NULL, &prog->hw);
if (ret) {
kfree(prog);
- hw = &prog->hw;
+ hw = ERR_PTR(ret);
}
return hw;
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index b68bf573dcfb..8c7763fd9efc 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -502,8 +502,12 @@ static long bcm2835_pll_rate_from_divisors(unsigned long parent_rate,
static long bcm2835_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ const struct bcm2835_pll_data *data = pll->data;
u32 ndiv, fdiv;
+ rate = clamp(rate, data->min_rate, data->max_rate);
+
bcm2835_pll_choose_ndiv_and_fdiv(rate, *parent_rate, &ndiv, &fdiv);
return bcm2835_pll_rate_from_divisors(*parent_rate, ndiv, fdiv, 1);
@@ -608,13 +612,6 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
u32 ana[4];
int i;
- if (rate < data->min_rate || rate > data->max_rate) {
- dev_err(cprman->dev, "%s: rate out of spec: %lu vs (%lu, %lu)\n",
- clk_hw_get_name(hw), rate,
- data->min_rate, data->max_rate);
- return -EINVAL;
- }
-
if (rate > data->max_fb_rate) {
use_fb_prediv = true;
rate /= 2;
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index b637f5979023..eb953d3b0b69 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -216,6 +216,7 @@ static int max77686_clk_probe(struct platform_device *pdev)
return -EINVAL;
}
+ drv_data->num_clks = num_clks;
drv_data->max_clk_data = devm_kcalloc(dev, num_clks,
sizeof(*drv_data->max_clk_data),
GFP_KERNEL);
diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c
index fe364e63f8de..c0e8e1f196aa 100644
--- a/drivers/clk/hisilicon/clk-hi6220.c
+++ b/drivers/clk/hisilicon/clk-hi6220.c
@@ -195,7 +195,7 @@ static void __init hi6220_clk_sys_init(struct device_node *np)
hi6220_clk_register_divider(hi6220_div_clks_sys,
ARRAY_SIZE(hi6220_div_clks_sys), clk_data);
}
-CLK_OF_DECLARE(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
+CLK_OF_DECLARE_DRIVER(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init);
/* clocks in media controller */
@@ -252,7 +252,7 @@ static void __init hi6220_clk_media_init(struct device_node *np)
hi6220_clk_register_divider(hi6220_div_clks_media,
ARRAY_SIZE(hi6220_div_clks_media), clk_data);
}
-CLK_OF_DECLARE(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
+CLK_OF_DECLARE_DRIVER(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init);
/* clocks in pmctrl */
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 380c372d528e..f042bd2a6a99 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -8,6 +8,7 @@ config COMMON_CLK_MEDIATEK
config COMMON_CLK_MT8135
bool "Clock driver for Mediatek MT8135"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
---help---
@@ -15,6 +16,7 @@ config COMMON_CLK_MT8135
config COMMON_CLK_MT8173
bool "Clock driver for Mediatek MT8173"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
---help---
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 45905fc0d75b..cecb0fdfaef6 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -305,7 +305,7 @@ static const struct of_device_id armada_3700_periph_clock_of_match[] = {
};
static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
void __iomem *reg, spinlock_t *lock,
- struct device *dev, struct clk_hw *hw)
+ struct device *dev, struct clk_hw **hw)
{
const struct clk_ops *mux_ops = NULL, *gate_ops = NULL,
*rate_ops = NULL;
@@ -329,6 +329,7 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
gate->lock = lock;
gate_ops = gate_hw->init->ops;
gate->reg = reg + (u64)gate->reg;
+ gate->flags = CLK_GATE_SET_TO_DISABLE;
}
if (data->rate_hw) {
@@ -353,13 +354,13 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
}
}
- hw = clk_hw_register_composite(dev, data->name, data->parent_names,
+ *hw = clk_hw_register_composite(dev, data->name, data->parent_names,
data->num_parents, mux_hw,
mux_ops, rate_hw, rate_ops,
gate_hw, gate_ops, CLK_IGNORE_UNUSED);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ if (IS_ERR(*hw))
+ return PTR_ERR(*hw);
return 0;
}
@@ -400,7 +401,7 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
spin_lock_init(&driver_data->lock);
for (i = 0; i < num_periph; i++) {
- struct clk_hw *hw = driver_data->hw_data->hws[i];
+ struct clk_hw **hw = &driver_data->hw_data->hws[i];
if (armada_3700_add_composite_clk(&data[i], reg,
&driver_data->lock, dev, hw))
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 51d152f735cc..17e68a724945 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -106,6 +106,7 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
},
{ },
};
+MODULE_DEVICE_TABLE(of, exynos_audss_clk_of_match);
static void exynos_audss_clk_teardown(void)
{
diff --git a/drivers/clk/uniphier/clk-uniphier-core.c b/drivers/clk/uniphier/clk-uniphier-core.c
index 5ffb898d0839..26c53f7963a4 100644
--- a/drivers/clk/uniphier/clk-uniphier-core.c
+++ b/drivers/clk/uniphier/clk-uniphier-core.c
@@ -79,7 +79,7 @@ static int uniphier_clk_probe(struct platform_device *pdev)
hw_data->num = clk_num;
/* avoid returning NULL for unused idx */
- for (; clk_num >= 0; clk_num--)
+ while (--clk_num >= 0)
hw_data->hws[clk_num] = ERR_PTR(-EINVAL);
for (p = data; p->name; p++) {
@@ -111,6 +111,10 @@ static int uniphier_clk_remove(struct platform_device *pdev)
static const struct of_device_id uniphier_clk_match[] = {
/* System clock */
{
+ .compatible = "socionext,uniphier-sld3-clock",
+ .data = uniphier_sld3_sys_clk_data,
+ },
+ {
.compatible = "socionext,uniphier-ld4-clock",
.data = uniphier_ld4_sys_clk_data,
},
@@ -138,7 +142,7 @@ static const struct of_device_id uniphier_clk_match[] = {
.compatible = "socionext,uniphier-ld20-clock",
.data = uniphier_ld20_sys_clk_data,
},
- /* Media I/O clock */
+ /* Media I/O clock, SD clock */
{
.compatible = "socionext,uniphier-sld3-mio-clock",
.data = uniphier_sld3_mio_clk_data,
@@ -156,20 +160,20 @@ static const struct of_device_id uniphier_clk_match[] = {
.data = uniphier_sld3_mio_clk_data,
},
{
- .compatible = "socionext,uniphier-pro5-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-pro5-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
{
- .compatible = "socionext,uniphier-pxs2-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-pxs2-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
{
.compatible = "socionext,uniphier-ld11-mio-clock",
.data = uniphier_sld3_mio_clk_data,
},
{
- .compatible = "socionext,uniphier-ld20-mio-clock",
- .data = uniphier_pro5_mio_clk_data,
+ .compatible = "socionext,uniphier-ld20-sd-clock",
+ .data = uniphier_pro5_sd_clk_data,
},
/* Peripheral clock */
{
diff --git a/drivers/clk/uniphier/clk-uniphier-mio.c b/drivers/clk/uniphier/clk-uniphier-mio.c
index 6aa7ec768d0b..218d20f099ce 100644
--- a/drivers/clk/uniphier/clk-uniphier-mio.c
+++ b/drivers/clk/uniphier/clk-uniphier-mio.c
@@ -93,7 +93,7 @@ const struct uniphier_clk_data uniphier_sld3_mio_clk_data[] = {
{ /* sentinel */ }
};
-const struct uniphier_clk_data uniphier_pro5_mio_clk_data[] = {
+const struct uniphier_clk_data uniphier_pro5_sd_clk_data[] = {
UNIPHIER_MIO_CLK_SD_FIXED,
UNIPHIER_MIO_CLK_SD(0, 0),
UNIPHIER_MIO_CLK_SD(1, 1),
diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
index 15a2f2cbe0d9..2c243a894f3b 100644
--- a/drivers/clk/uniphier/clk-uniphier-mux.c
+++ b/drivers/clk/uniphier/clk-uniphier-mux.c
@@ -42,7 +42,7 @@ static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
int num_parents = clk_hw_get_num_parents(hw);
int ret;
- u32 val;
+ unsigned int val;
u8 i;
ret = regmap_read(mux->regmap, mux->reg, &val);
diff --git a/drivers/clk/uniphier/clk-uniphier.h b/drivers/clk/uniphier/clk-uniphier.h
index 3ae184062388..0244dba1f4cf 100644
--- a/drivers/clk/uniphier/clk-uniphier.h
+++ b/drivers/clk/uniphier/clk-uniphier.h
@@ -115,7 +115,7 @@ extern const struct uniphier_clk_data uniphier_pxs2_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld11_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_ld20_sys_clk_data[];
extern const struct uniphier_clk_data uniphier_sld3_mio_clk_data[];
-extern const struct uniphier_clk_data uniphier_pro5_mio_clk_data[];
+extern const struct uniphier_clk_data uniphier_pro5_sd_clk_data[];
extern const struct uniphier_clk_data uniphier_ld4_peri_clk_data[];
extern const struct uniphier_clk_data uniphier_pro4_peri_clk_data[];
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 245190839359..e2c6e43cf8ca 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -417,6 +417,16 @@ config SYS_SUPPORTS_SH_TMU
config SYS_SUPPORTS_EM_STI
bool
+config CLKSRC_JCORE_PIT
+ bool "J-Core PIT timer driver" if COMPILE_TEST
+ depends on OF
+ depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ This enables build of clocksource and clockevent driver for
+ the integrated PIT in the J-Core synthesizable, open source SoC.
+
config SH_TIMER_CMT
bool "Renesas CMT timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index fd9d6df0bbc0..cf87f407f1ad 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
+obj-$(CONFIG_CLKSRC_JCORE_PIT) += jcore-pit.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
new file mode 100644
index 000000000000..54e1665aa03c
--- /dev/null
+++ b/drivers/clocksource/jcore-pit.c
@@ -0,0 +1,249 @@
+/*
+ * J-Core SoC PIT/clocksource driver
+ *
+ * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define PIT_IRQ_SHIFT 12
+#define PIT_PRIO_SHIFT 20
+#define PIT_ENABLE_SHIFT 26
+#define PIT_PRIO_MASK 0xf
+
+#define REG_PITEN 0x00
+#define REG_THROT 0x10
+#define REG_COUNT 0x14
+#define REG_BUSPD 0x18
+#define REG_SECHI 0x20
+#define REG_SECLO 0x24
+#define REG_NSEC 0x28
+
+struct jcore_pit {
+ struct clock_event_device ced;
+ void __iomem *base;
+ unsigned long periodic_delta;
+ u32 enable_val;
+};
+
+static void __iomem *jcore_pit_base;
+static struct jcore_pit __percpu *jcore_pit_percpu;
+
+static notrace u64 jcore_sched_clock_read(void)
+{
+ u32 seclo, nsec, seclo0;
+ __iomem void *base = jcore_pit_base;
+
+ seclo = readl(base + REG_SECLO);
+ do {
+ seclo0 = seclo;
+ nsec = readl(base + REG_NSEC);
+ seclo = readl(base + REG_SECLO);
+ } while (seclo0 != seclo);
+
+ return seclo * NSEC_PER_SEC + nsec;
+}
+
+static cycle_t jcore_clocksource_read(struct clocksource *cs)
+{
+ return jcore_sched_clock_read();
+}
+
+static int jcore_pit_disable(struct jcore_pit *pit)
+{
+ writel(0, pit->base + REG_PITEN);
+ return 0;
+}
+
+static int jcore_pit_set(unsigned long delta, struct jcore_pit *pit)
+{
+ jcore_pit_disable(pit);
+ writel(delta, pit->base + REG_THROT);
+ writel(pit->enable_val, pit->base + REG_PITEN);
+ return 0;
+}
+
+static int jcore_pit_set_state_shutdown(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_disable(pit);
+}
+
+static int jcore_pit_set_state_oneshot(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_disable(pit);
+}
+
+static int jcore_pit_set_state_periodic(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_set(pit->periodic_delta, pit);
+}
+
+static int jcore_pit_set_next_event(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_set(delta, pit);
+}
+
+static int jcore_pit_local_init(unsigned cpu)
+{
+ struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
+ unsigned buspd, freq;
+
+ pr_info("Local J-Core PIT init on cpu %u\n", cpu);
+
+ buspd = readl(pit->base + REG_BUSPD);
+ freq = DIV_ROUND_CLOSEST(NSEC_PER_SEC, buspd);
+ pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
+
+ clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
+
+ return 0;
+}
+
+static irqreturn_t jcore_timer_interrupt(int irq, void *dev_id)
+{
+ struct jcore_pit *pit = this_cpu_ptr(dev_id);
+
+ if (clockevent_state_oneshot(&pit->ced))
+ jcore_pit_disable(pit);
+
+ pit->ced.event_handler(&pit->ced);
+
+ return IRQ_HANDLED;
+}
+
+static int __init jcore_pit_init(struct device_node *node)
+{
+ int err;
+ unsigned pit_irq, cpu;
+ unsigned long hwirq;
+ u32 irqprio, enable_val;
+
+ jcore_pit_base = of_iomap(node, 0);
+ if (!jcore_pit_base) {
+ pr_err("Error: Cannot map base address for J-Core PIT\n");
+ return -ENXIO;
+ }
+
+ pit_irq = irq_of_parse_and_map(node, 0);
+ if (!pit_irq) {
+ pr_err("Error: J-Core PIT has no IRQ\n");
+ return -ENXIO;
+ }
+
+ pr_info("Initializing J-Core PIT at %p IRQ %d\n",
+ jcore_pit_base, pit_irq);
+
+ err = clocksource_mmio_init(jcore_pit_base, "jcore_pit_cs",
+ NSEC_PER_SEC, 400, 32,
+ jcore_clocksource_read);
+ if (err) {
+ pr_err("Error registering clocksource device: %d\n", err);
+ return err;
+ }
+
+ sched_clock_register(jcore_sched_clock_read, 32, NSEC_PER_SEC);
+
+ jcore_pit_percpu = alloc_percpu(struct jcore_pit);
+ if (!jcore_pit_percpu) {
+ pr_err("Failed to allocate memory for clock event device\n");
+ return -ENOMEM;
+ }
+
+ err = request_irq(pit_irq, jcore_timer_interrupt,
+ IRQF_TIMER | IRQF_PERCPU,
+ "jcore_pit", jcore_pit_percpu);
+ if (err) {
+ pr_err("pit irq request failed: %d\n", err);
+ free_percpu(jcore_pit_percpu);
+ return err;
+ }
+
+ /*
+ * The J-Core PIT is not hard-wired to a particular IRQ, but
+ * integrated with the interrupt controller such that the IRQ it
+ * generates is programmable, as follows:
+ *
+ * The bit layout of the PIT enable register is:
+ *
+ * .....e..ppppiiiiiiii............
+ *
+ * where the .'s indicate unrelated/unused bits, e is enable,
+ * p is priority, and i is hard irq number.
+ *
+ * For the PIT included in AIC1 (obsolete but still in use),
+ * any hard irq (trap number) can be programmed via the 8
+ * iiiiiiii bits, and a priority (0-15) is programmable
+ * separately in the pppp bits.
+ *
+ * For the PIT included in AIC2 (current), the programming
+ * interface is equivalent modulo interrupt mapping. This is
+ * why a different compatible tag was not used. However only
+ * traps 64-127 (the ones actually intended to be used for
+ * interrupts, rather than syscalls/exceptions/etc.) can be
+ * programmed (the high 2 bits of i are ignored) and the
+ * priority pppp is <<2'd and or'd onto the irq number. This
+ * choice seems to have been made on the hardware engineering
+ * side under an assumption that preserving old AIC1 priority
+ * mappings was important. Future models will likely ignore
+ * the pppp field.
+ */
+ hwirq = irq_get_irq_data(pit_irq)->hwirq;
+ irqprio = (hwirq >> 2) & PIT_PRIO_MASK;
+ enable_val = (1U << PIT_ENABLE_SHIFT)
+ | (hwirq << PIT_IRQ_SHIFT)
+ | (irqprio << PIT_PRIO_SHIFT);
+
+ for_each_present_cpu(cpu) {
+ struct jcore_pit *pit = per_cpu_ptr(jcore_pit_percpu, cpu);
+
+ pit->base = of_iomap(node, cpu);
+ if (!pit->base) {
+ pr_err("Unable to map PIT for cpu %u\n", cpu);
+ continue;
+ }
+
+ pit->ced.name = "jcore_pit";
+ pit->ced.features = CLOCK_EVT_FEAT_PERIODIC
+ | CLOCK_EVT_FEAT_ONESHOT
+ | CLOCK_EVT_FEAT_PERCPU;
+ pit->ced.cpumask = cpumask_of(cpu);
+ pit->ced.rating = 400;
+ pit->ced.irq = pit_irq;
+ pit->ced.set_state_shutdown = jcore_pit_set_state_shutdown;
+ pit->ced.set_state_periodic = jcore_pit_set_state_periodic;
+ pit->ced.set_state_oneshot = jcore_pit_set_state_oneshot;
+ pit->ced.set_next_event = jcore_pit_set_next_event;
+
+ pit->enable_val = enable_val;
+ }
+
+ cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
+ "AP_JCORE_TIMER_STARTING",
+ jcore_pit_local_init, NULL);
+
+ return 0;
+}
+
+CLOCKSOURCE_OF_DECLARE(jcore_pit, "jcore,pit", jcore_pit_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index c184eb84101e..4f87f3e76d83 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -152,6 +152,13 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static cycle_t sun5i_clksrc_read(struct clocksource *clksrc)
+{
+ struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
+
+ return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1));
+}
+
static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -210,8 +217,13 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
base + TIMER_CTL_REG(1));
- ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name,
- rate, 340, 32, clocksource_mmio_readl_down);
+ cs->clksrc.name = node->name;
+ cs->clksrc.rating = 340;
+ cs->clksrc.read = sun5i_clksrc_read;
+ cs->clksrc.mask = CLOCKSOURCE_MASK(32);
+ cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
pr_err("Couldn't register clock source.\n");
goto err_remove_notifier;
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 1b2f28f69a81..4852d9efe74e 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -80,11 +80,17 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
{
struct cppc_cpudata *cpu;
struct cpufreq_freqs freqs;
+ u32 desired_perf;
int ret = 0;
cpu = all_cpu_data[policy->cpu];
- cpu->perf_ctrls.desired_perf = (u64)target_freq * policy->max / cppc_dmi_max_khz;
+ desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz;
+ /* Return if it is exactly the same perf */
+ if (desired_perf == cpu->perf_ctrls.desired_perf)
+ return ret;
+
+ cpu->perf_ctrls.desired_perf = desired_perf;
freqs.old = policy->cur;
freqs.new = target_freq;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 18da4f8051d3..13475890d792 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -17,6 +17,7 @@
struct cs_policy_dbs_info {
struct policy_dbs_info policy_dbs;
unsigned int down_skip;
+ unsigned int requested_freq;
};
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
@@ -61,6 +62,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
+ unsigned int requested_freq = dbs_info->requested_freq;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int load = dbs_update(policy);
@@ -72,10 +74,16 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
if (cs_tuners->freq_step == 0)
goto out;
+ /*
+ * If requested_freq is out of range, it is likely that the limits
+ * changed in the meantime, so fall back to current frequency in that
+ * case.
+ */
+ if (requested_freq > policy->max || requested_freq < policy->min)
+ requested_freq = policy->cur;
+
/* Check for frequency increase */
if (load > dbs_data->up_threshold) {
- unsigned int requested_freq = policy->cur;
-
dbs_info->down_skip = 0;
/* if we are already at full speed then break out early */
@@ -83,8 +91,11 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
goto out;
requested_freq += get_freq_target(cs_tuners, policy);
+ if (requested_freq > policy->max)
+ requested_freq = policy->max;
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
+ dbs_info->requested_freq = requested_freq;
goto out;
}
@@ -95,7 +106,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
/* Check for frequency decrease */
if (load < cs_tuners->down_threshold) {
- unsigned int freq_target, requested_freq = policy->cur;
+ unsigned int freq_target;
/*
* if we cannot reduce the frequency anymore, break out early
*/
@@ -109,6 +120,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
requested_freq = policy->min;
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
+ dbs_info->requested_freq = requested_freq;
}
out:
@@ -287,6 +299,7 @@ static void cs_start(struct cpufreq_policy *policy)
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
dbs_info->down_skip = 0;
+ dbs_info->requested_freq = policy->cur;
}
static struct dbs_governor cs_governor = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 806f2039571e..4737520ec823 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -179,6 +179,7 @@ struct _pid {
/**
* struct cpudata - Per CPU instance data storage
* @cpu: CPU number for this instance data
+ * @policy: CPUFreq policy value
* @update_util: CPUFreq utility callback information
* @update_util_set: CPUFreq utility callback is set
* @iowait_boost: iowait-related boost fraction
@@ -201,6 +202,7 @@ struct _pid {
struct cpudata {
int cpu;
+ unsigned int policy;
struct update_util_data update_util;
bool update_util_set;
@@ -225,7 +227,7 @@ struct cpudata {
static struct cpudata **all_cpu_data;
/**
- * struct pid_adjust_policy - Stores static PID configuration data
+ * struct pstate_adjust_policy - Stores static PID configuration data
* @sample_rate_ms: PID calculation sample rate in ms
* @sample_rate_ns: Sample rate calculation in ns
* @deadband: PID deadband
@@ -562,12 +564,12 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
int min, hw_min, max, hw_max, cpu, range, adj_range;
u64 value, cap;
- rdmsrl(MSR_HWP_CAPABILITIES, cap);
- hw_min = HWP_LOWEST_PERF(cap);
- hw_max = HWP_HIGHEST_PERF(cap);
- range = hw_max - hw_min;
-
for_each_cpu(cpu, cpumask) {
+ rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
+ hw_min = HWP_LOWEST_PERF(cap);
+ hw_max = HWP_HIGHEST_PERF(cap);
+ range = hw_max - hw_min;
+
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
adj_range = limits->min_perf_pct * range / 100;
min = hw_min + adj_range;
@@ -1142,10 +1144,8 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
-static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
- int pstate = cpu->pstate.min_pstate;
-
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
cpu->pstate.current_pstate = pstate;
/*
@@ -1157,6 +1157,20 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
pstate_funcs.get_val(cpu, pstate));
}
+static void intel_pstate_set_min_pstate(struct cpudata *cpu)
+{
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+}
+
+static void intel_pstate_max_within_limits(struct cpudata *cpu)
+{
+ int min_pstate, max_pstate;
+
+ update_turbo_state();
+ intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
+ intel_pstate_set_pstate(cpu, max_pstate);
+}
+
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -1232,6 +1246,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
int32_t busy_frac, boost;
+ int target, avg_pstate;
busy_frac = div_fp(sample->mperf, sample->tsc);
@@ -1242,7 +1257,26 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
busy_frac = boost;
sample->busy_scaled = busy_frac * 100;
- return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled);
+
+ target = limits->no_turbo || limits->turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+ target += target >> 2;
+ target = mul_fp(target, busy_frac);
+ if (target < cpu->pstate.min_pstate)
+ target = cpu->pstate.min_pstate;
+
+ /*
+ * If the average P-state during the previous cycle was higher than the
+ * current target, add 50% of the difference to the target to reduce
+ * possible performance oscillations and offset possible performance
+ * loss related to moving the workload from one CPU to another within
+ * a package/module.
+ */
+ avg_pstate = get_avg_pstate(cpu);
+ if (avg_pstate > target)
+ target += (avg_pstate - target) >> 1;
+
+ return target;
}
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
@@ -1251,10 +1285,11 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
u64 duration_ns;
/*
- * perf_scaled is the average performance during the last sampling
- * period scaled by the ratio of the maximum P-state to the P-state
- * requested last time (in percent). That measures the system's
- * response to the previous P-state selection.
+ * perf_scaled is the ratio of the average P-state during the last
+ * sampling period to the P-state requested last time (in percent).
+ *
+ * That measures the system's response to the previous P-state
+ * selection.
*/
max_pstate = cpu->pstate.max_pstate_physical;
current_pstate = cpu->pstate.current_pstate;
@@ -1304,7 +1339,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
from = cpu->pstate.current_pstate;
- target_pstate = pstate_funcs.get_target_pstate(cpu);
+ target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
+ cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
intel_pstate_update_pstate(cpu, target_pstate);
@@ -1470,7 +1506,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
policy->cpuinfo.max_freq, policy->max);
- cpu = all_cpu_data[0];
+ cpu = all_cpu_data[policy->cpu];
+ cpu->policy = policy->policy;
+
if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
policy->max < policy->cpuinfo.max_freq &&
policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
@@ -1478,7 +1516,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
policy->max = policy->cpuinfo.max_freq;
}
- if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
limits = &performance_limits;
if (policy->max >= policy->cpuinfo.max_freq) {
pr_debug("set performance\n");
@@ -1514,6 +1552,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
out:
+ if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ /*
+ * NOHZ_FULL CPUs need this as the governor callback may not
+ * be invoked on them.
+ */
+ intel_pstate_clear_update_util_hook(policy->cpu);
+ intel_pstate_max_within_limits(cpu);
+ }
+
intel_pstate_set_update_util_hook(policy->cpu);
intel_pstate_hwp_set_policy(policy);
diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips
index 4102be01d06a..512ee37b374b 100644
--- a/drivers/cpuidle/Kconfig.mips
+++ b/drivers/cpuidle/Kconfig.mips
@@ -5,7 +5,7 @@ config MIPS_CPS_CPUIDLE
bool "CPU Idle driver for MIPS CPS platforms"
depends on CPU_IDLE && MIPS_CPS
depends on SYS_SUPPORTS_MIPS_CPS
- select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT
+ select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT || CPU_MIPSR6
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select MIPS_CPS_PM
default y
diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c
index 1adb6980b707..926ba9871c62 100644
--- a/drivers/cpuidle/cpuidle-cps.c
+++ b/drivers/cpuidle/cpuidle-cps.c
@@ -163,7 +163,7 @@ static int __init cps_cpuidle_init(void)
core = cpu_data[cpu].core;
device = &per_cpu(cpuidle_dev, cpu);
device->cpu = cpu;
-#ifdef CONFIG_MIPS_MT
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
#endif
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index daadd20aa936..3e2ab3b14eea 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -14,7 +14,7 @@ if DEV_DAX
config DEV_DAX_PMEM
tristate "PMEM DAX: direct access to persistent memory"
- depends on NVDIMM_DAX
+ depends on LIBNVDIMM && NVDIMM_DAX
default DEV_DAX
help
Support raw access to persistent memory. Note that this
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 9630d8837ba9..4a15fa5df98b 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -44,7 +44,6 @@ static void dax_pmem_percpu_exit(void *data)
dev_dbg(dax_pmem->dev, "%s\n", __func__);
percpu_ref_exit(ref);
- wait_for_completion(&dax_pmem->cmp);
}
static void dax_pmem_percpu_kill(void *data)
@@ -54,6 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
dev_dbg(dax_pmem->dev, "%s\n", __func__);
percpu_ref_kill(ref);
+ wait_for_completion(&dax_pmem->cmp);
}
static int dax_pmem_probe(struct device *dev)
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 478006b7764a..bf3ea7603a58 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -137,6 +137,10 @@ static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
cur_time = jiffies;
+ /* Immediately exit if previous_freq is not initialized yet. */
+ if (!devfreq->previous_freq)
+ goto out;
+
prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
if (prev_lev < 0) {
ret = prev_lev;
@@ -594,17 +598,19 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (devfreq->governor)
err = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_START, NULL);
- mutex_unlock(&devfreq_list_lock);
if (err) {
dev_err(dev, "%s: Unable to start governor for the device\n",
__func__);
goto err_init;
}
+ mutex_unlock(&devfreq_list_lock);
return devfreq;
err_init:
list_del(&devfreq->node);
+ mutex_unlock(&devfreq_list_lock);
+
device_unregister(&devfreq->dev);
err_out:
return ERR_PTR(err);
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index 0fdae8608961..cd949800eed9 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -17,6 +17,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP
tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
depends on ARCH_EXYNOS || COMPILE_TEST
select PM_OPP
+ select REGMAP_MMIO
help
This add the devfreq-event driver for Exynos SoC. It provides NoC
(Network on Chip) Probe counters to measure the bandwidth of AXI bus.
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
index a5841403bde8..49e712aca0c1 100644
--- a/drivers/devfreq/event/exynos-nocp.c
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -176,9 +176,6 @@ static int exynos_nocp_get_event(struct devfreq_event_dev *edev,
return 0;
out:
- edata->load_count = 0;
- edata->total_count = 0;
-
dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n");
return ret;
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index ca957a5f4291..b8cde096a808 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -51,7 +51,7 @@ static void qcom_usb_extcon_detect_cable(struct work_struct *work)
if (ret)
return;
- extcon_set_state(info->edev, EXTCON_USB_HOST, !id);
+ extcon_set_state_sync(info->edev, EXTCON_USB_HOST, !id);
}
static irqreturn_t qcom_usb_irq_handler(int irq, void *dev_id)
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 631c977b0da5..180f0a96528c 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -566,6 +566,11 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
PCILYNX_MAX_REGISTER);
+ if (lynx->registers == NULL) {
+ dev_err(&dev->dev, "Failed to map registers\n");
+ ret = -ENOMEM;
+ goto fail_deallocate_lynx;
+ }
lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
@@ -578,7 +583,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
lynx->rcv_buffer == NULL) {
dev_err(&dev->dev, "Failed to allocate receive buffer\n");
ret = -ENOMEM;
- goto fail_deallocate;
+ goto fail_deallocate_buffers;
}
lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
@@ -641,7 +646,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
dev_err(&dev->dev,
"Failed to allocate shared interrupt %d\n", dev->irq);
ret = -EIO;
- goto fail_deallocate;
+ goto fail_deallocate_buffers;
}
lynx->misc.parent = &dev->dev;
@@ -668,7 +673,7 @@ fail_free_irq:
reg_write(lynx, PCI_INT_ENABLE, 0);
free_irq(lynx->pci_device->irq, lynx);
-fail_deallocate:
+fail_deallocate_buffers:
if (lynx->rcv_start_pcl)
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
@@ -679,6 +684,8 @@ fail_deallocate:
pci_free_consistent(lynx->pci_device, PAGE_SIZE,
lynx->rcv_buffer, lynx->rcv_buffer_bus);
iounmap(lynx->registers);
+
+fail_deallocate_lynx:
kfree(lynx);
fail_disable:
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c06945160a41..5e23e2d305e7 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
-mno-mmx -mno-sse
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
-cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
+cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \
-fno-builtin -fpic -mno-single-pic-base
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
@@ -79,5 +79,6 @@ quiet_cmd_stubcopy = STUBCPY $@
# decompressor. So move our .data to .data.efistub, which is preserved
# explicitly by the decompressor linker script.
#
-STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
+STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
+ -R ___ksymtab+sort -R ___kcrctab+sort
STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 26ee00f6bd58..d011cb89d25e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -284,7 +284,7 @@ config GPIO_MM_LANTIQ
config GPIO_MOCKUP
tristate "GPIO Testing Driver"
- depends on GPIOLIB
+ depends on GPIOLIB && SYSFS
select GPIO_SYSFS
help
This enables GPIO Testing driver, which provides a way to test GPIO
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index 9457e2022bf6..dc37dbe4b46d 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -219,6 +219,7 @@ static const struct of_device_id ath79_gpio_of_match[] = {
{ .compatible = "qca,ar9340-gpio" },
{},
};
+MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
static int ath79_gpio_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 425501c39527..793518a30afe 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -239,7 +239,7 @@ static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(irq, h->host_data);
- irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_edge_irq);
return 0;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b9daa0bf32a4..ee1724806f46 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -308,8 +308,10 @@ static int mxs_gpio_probe(struct platform_device *pdev)
writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
- if (irq_base < 0)
- return irq_base;
+ if (irq_base < 0) {
+ err = irq_base;
+ goto out_iounmap;
+ }
port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
&irq_domain_simple_ops, NULL);
@@ -349,6 +351,8 @@ out_irqdomain_remove:
irq_domain_remove(port->domain);
out_irqdesc_free:
irq_free_descs(irq_base, 32);
+out_iounmap:
+ iounmap(port->base);
return err;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 45c8817d068c..e422568e14ad 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -794,6 +794,22 @@ static int pca953x_probe(struct i2c_client *client,
}
mutex_init(&chip->i2c_lock);
+ /*
+ * In case we have an i2c-mux controlled by a GPIO provided by an
+ * expander using the same driver higher on the device tree, read the
+ * i2c adapter nesting depth and use the retrieved value as lockdep
+ * subclass for chip->i2c_lock.
+ *
+ * REVISIT: This solution is not complete. It protects us from lockdep
+ * false positives when the expander controlling the i2c-mux is on
+ * a different level on the device tree, but not when it's on the same
+ * level on a different branch (in which case the subclass number
+ * would be the same).
+ *
+ * TODO: Once a correct solution is developed, a similar fix should be
+ * applied to all other i2c-controlled GPIO expanders (and potentially
+ * regmap-i2c).
+ */
lockdep_set_subclass(&chip->i2c_lock,
i2c_adapter_depth(client->adapter));
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index e7d422a6b90b..5b0042776ec7 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -409,7 +409,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
* 801/1801/1600, bits are cleared when read.
* Edge detect register is not present on 801/1600/1801
*/
- if (stmpe->partnum != STMPE801 || stmpe->partnum != STMPE1600 ||
+ if (stmpe->partnum != STMPE801 && stmpe->partnum != STMPE1600 &&
stmpe->partnum != STMPE1801) {
stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
stmpe_reg_write(stmpe,
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 99256115bea5..c2a80b4cbf32 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -66,6 +66,7 @@ static const struct of_device_id ts4800_gpio_of_match[] = {
{ .compatible = "technologic,ts4800-gpio", },
{},
};
+MODULE_DEVICE_TABLE(of, ts4800_gpio_of_match);
static struct platform_driver ts4800_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 58ece201b8e6..72a4b326fd0d 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -653,14 +653,17 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
int idx, i;
unsigned int irq_flags;
+ int ret = -ENOENT;
for (i = 0, idx = 0; idx <= index; i++) {
struct acpi_gpio_info info;
struct gpio_desc *desc;
desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
- if (IS_ERR(desc))
+ if (IS_ERR(desc)) {
+ ret = PTR_ERR(desc);
break;
+ }
if (info.gpioint && idx++ == index) {
int irq = gpiod_to_irq(desc);
@@ -679,7 +682,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
}
}
- return -ENOENT;
+ return ret;
}
EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f0fc3a0d37c8..20e09b7c2de3 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -333,6 +333,13 @@ struct linehandle_state {
u32 numdescs;
};
+#define GPIOHANDLE_REQUEST_VALID_FLAGS \
+ (GPIOHANDLE_REQUEST_INPUT | \
+ GPIOHANDLE_REQUEST_OUTPUT | \
+ GPIOHANDLE_REQUEST_ACTIVE_LOW | \
+ GPIOHANDLE_REQUEST_OPEN_DRAIN | \
+ GPIOHANDLE_REQUEST_OPEN_SOURCE)
+
static long linehandle_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
@@ -344,6 +351,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
+ memset(&ghd, 0, sizeof(ghd));
+
/* TODO: check if descriptors are really input */
for (i = 0; i < lh->numdescs; i++) {
val = gpiod_get_value_cansleep(lh->descs[i]);
@@ -444,6 +453,17 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
u32 lflags = handlereq.flags;
struct gpio_desc *desc;
+ if (offset >= gdev->ngpio) {
+ ret = -EINVAL;
+ goto out_free_descs;
+ }
+
+ /* Return an error if a unknown flag is set */
+ if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {
+ ret = -EINVAL;
+ goto out_free_descs;
+ }
+
desc = &gdev->descs[offset];
ret = gpiod_request(desc, lh->label);
if (ret)
@@ -536,6 +556,10 @@ struct lineevent_state {
struct mutex read_lock;
};
+#define GPIOEVENT_REQUEST_VALID_FLAGS \
+ (GPIOEVENT_REQUEST_RISING_EDGE | \
+ GPIOEVENT_REQUEST_FALLING_EDGE)
+
static unsigned int lineevent_poll(struct file *filep,
struct poll_table_struct *wait)
{
@@ -623,6 +647,8 @@ static long lineevent_ioctl(struct file *filep, unsigned int cmd,
if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
int val;
+ memset(&ghd, 0, sizeof(ghd));
+
val = gpiod_get_value_cansleep(le->desc);
if (val < 0)
return val;
@@ -726,6 +752,18 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
lflags = eventreq.handleflags;
eflags = eventreq.eventflags;
+ if (offset >= gdev->ngpio) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
+ /* Return an error if a unknown flag is set */
+ if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
+ (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {
+ ret = -EINVAL;
+ goto out_free_label;
+ }
+
/* This is just wrong: we don't look for events on output lines */
if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
ret = -EINVAL;
@@ -823,6 +861,8 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
struct gpiochip_info chipinfo;
+ memset(&chipinfo, 0, sizeof(chipinfo));
+
strncpy(chipinfo.name, dev_name(&gdev->dev),
sizeof(chipinfo.name));
chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
@@ -839,7 +879,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
return -EFAULT;
- if (lineinfo.line_offset > gdev->ngpio)
+ if (lineinfo.line_offset >= gdev->ngpio)
return -EINVAL;
desc = &gdev->descs[lineinfo.line_offset];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 2e3a0543760d..e3281d4e3e41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -765,7 +765,7 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
return ret;
}
-static void amdgpu_connector_destroy(struct drm_connector *connector)
+static void amdgpu_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
@@ -773,6 +773,12 @@ static void amdgpu_connector_destroy(struct drm_connector *connector)
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
amdgpu_connector->ddc_bus->has_aux = false;
}
+}
+
+static void amdgpu_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
amdgpu_connector_free_edid(connector);
kfree(amdgpu_connector->con_priv);
drm_connector_unregister(connector);
@@ -826,6 +832,7 @@ static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.set_property = amdgpu_connector_set_lcd_property,
};
@@ -936,6 +943,7 @@ static const struct drm_connector_funcs amdgpu_connector_vga_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.set_property = amdgpu_connector_set_property,
};
@@ -1203,6 +1211,7 @@ static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = {
.detect = amdgpu_connector_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
@@ -1493,6 +1502,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
.detect = amdgpu_connector_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
@@ -1502,6 +1512,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.detect = amdgpu_connector_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_lcd_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e203e5561107..a5e2fcbef0f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
}
+
+ ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7dbe85d67d26..b4f4a9239069 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1408,16 +1408,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
- adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
- continue;
- /* enable clockgating to save power */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_GATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
- return r;
- }
if (adev->ip_blocks[i].funcs->late_init) {
r = adev->ip_blocks[i].funcs->late_init((void *)adev);
if (r) {
@@ -1426,6 +1416,18 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
}
adev->ip_block_status[i].late_initialized = true;
}
+ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+ /* enable clockgating to save power */
+ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_GATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ return r;
+ }
+ }
}
return 0;
@@ -1435,6 +1437,30 @@ static int amdgpu_fini(struct amdgpu_device *adev)
{
int i, r;
+ /* need to disable SMC first */
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].hw)
+ continue;
+ if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+ /* ungate blocks before hw fini so that we can shutdown the blocks safely */
+ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ return r;
+ }
+ r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ }
+ adev->ip_block_status[i].hw = false;
+ break;
+ }
+ }
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].hw)
continue;
@@ -2073,7 +2099,8 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
if (!adev->ip_block_status[i].valid)
continue;
if (adev->ip_blocks[i].funcs->check_soft_reset)
- adev->ip_blocks[i].funcs->check_soft_reset(adev);
+ adev->ip_block_status[i].hang =
+ adev->ip_blocks[i].funcs->check_soft_reset(adev);
if (adev->ip_block_status[i].hang) {
DRM_INFO("IP block:%d is hang!\n", i);
asic_hang = true;
@@ -2102,12 +2129,20 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
{
- if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) {
- DRM_INFO("Some block need full reset!\n");
- return true;
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
+ if (adev->ip_block_status[i].hang) {
+ DRM_INFO("Some block need full reset!\n");
+ return true;
+ }
+ }
}
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index fe36caf1b7d7..14f57d9915e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
printk("\n");
}
+
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
- u32 line_time_us, vblank_lines;
+ u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
- amdgpu_crtc->hw_mode.clock;
- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+ vblank_in_pixels =
+ amdgpu_crtc->hw_mode.crtc_htotal *
+ (amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
+ (amdgpu_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index aa074fac0c7f..f3efb1c5dae9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -754,6 +754,10 @@ static const char *amdgpu_vram_names[] = {
int amdgpu_bo_init(struct amdgpu_device *adev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(adev->mc.aper_base,
+ adev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
adev->mc.aper_size);
@@ -769,6 +773,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
arch_phys_wc_del(adev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
}
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e1fa8731d1e2..3cb5e903cd62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -345,8 +345,8 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
ent = debugfs_create_file(name,
S_IFREG | S_IRUGO, root,
ring, &amdgpu_debugfs_ring_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
+ if (!ent)
+ return -ENOMEM;
i_size_write(ent->d_inode, ring->ring_size + 12);
ring->ent = ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 887483b8b818..dcaf691f56b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -555,10 +555,13 @@ struct amdgpu_ttm_tt {
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+ unsigned int flags = 0;
unsigned pinned = 0;
int r;
+ if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
+ flags |= FOLL_WRITE;
+
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only use anonymous memory
to prevent problems with writeback */
@@ -581,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
list_add(&guptask.list, &gtt->guptasks);
spin_unlock(&gtt->guptasklock);
- r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
+ r = get_user_pages(userptr, num_pages, flags, p, NULL);
spin_lock(&gtt->guptasklock);
list_del(&guptask.list);
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index f80a0834e889..3c082e143730 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1514,14 +1514,16 @@ static int cz_dpm_set_powergating_state(void *handle,
return 0;
}
-/* borrowed from KV, need future unify */
static int cz_dpm_get_temperature(struct amdgpu_device *adev)
{
int actual_temp = 0;
- uint32_t temp = RREG32_SMC(0xC0300E0C);
+ uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP);
+ uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
- if (temp)
+ if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
actual_temp = 1000 * ((temp / 8) - 49);
+ else
+ actual_temp = 1000 * (temp / 8);
return actual_temp;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 613ebb7ed50f..4108c686aa7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3188,16 +3188,11 @@ static int dce_v10_0_wait_for_idle(void *handle)
return 0;
}
-static int dce_v10_0_check_soft_reset(void *handle)
+static bool dce_v10_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (dce_v10_0_is_display_hung(adev))
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
- else
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
-
- return 0;
+ return dce_v10_0_is_display_hung(adev);
}
static int dce_v10_0_soft_reset(void *handle)
@@ -3205,9 +3200,6 @@ static int dce_v10_0_soft_reset(void *handle)
u32 srbm_soft_reset = 0, tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
- return 0;
-
if (dce_v10_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6c6ff57b1c95..ee6a48a09214 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4087,14 +4087,21 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
+ u32 tmp;
gfx_v8_0_rlc_stop(adev);
/* disable CG */
- WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
+ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
+ tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
+ RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+ WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
if (adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10)
- WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
+ adev->asic_type == CHIP_POLARIS10) {
+ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
+ tmp &= ~0x3;
+ WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
+ }
/* disable PG */
WREG32(mmRLC_PG_CNTL, 0);
@@ -5137,7 +5144,7 @@ static int gfx_v8_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v8_0_check_soft_reset(void *handle)
+static bool gfx_v8_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@@ -5189,16 +5196,14 @@ static int gfx_v8_0_check_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
if (grbm_soft_reset || srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true;
adev->gfx.grbm_soft_reset = grbm_soft_reset;
adev->gfx.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false;
adev->gfx.grbm_soft_reset = 0;
adev->gfx.srbm_soft_reset = 0;
+ return false;
}
-
- return 0;
}
static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
@@ -5226,7 +5231,8 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5264,7 +5270,8 @@ static int gfx_v8_0_soft_reset(void *handle)
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5334,7 +5341,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1b319f5bc696..c22ef140a542 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1099,7 +1099,7 @@ static int gmc_v8_0_wait_for_idle(void *handle)
}
-static int gmc_v8_0_check_soft_reset(void *handle)
+static bool gmc_v8_0_check_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1116,20 +1116,19 @@ static int gmc_v8_0_check_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true;
adev->mc.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false;
adev->mc.srbm_soft_reset = 0;
+ return false;
}
- return 0;
}
static int gmc_v8_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ if (!adev->mc.srbm_soft_reset)
return 0;
gmc_v8_0_mc_stop(adev, &adev->mc.save);
@@ -1145,7 +1144,7 @@ static int gmc_v8_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ if (!adev->mc.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->mc.srbm_soft_reset;
@@ -1175,7 +1174,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ if (!adev->mc.srbm_soft_reset)
return 0;
gmc_v8_0_mc_resume(adev, &adev->mc.save);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index f325fd86430b..a9d10941fb53 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1268,7 +1268,7 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v3_0_check_soft_reset(void *handle)
+static bool sdma_v3_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -1281,14 +1281,12 @@ static int sdma_v3_0_check_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
adev->sdma.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
adev->sdma.srbm_soft_reset = 0;
+ return false;
}
-
- return 0;
}
static int sdma_v3_0_pre_soft_reset(void *handle)
@@ -1296,7 +1294,7 @@ static int sdma_v3_0_pre_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1315,7 +1313,7 @@ static int sdma_v3_0_post_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1335,7 +1333,7 @@ static int sdma_v3_0_soft_reset(void *handle)
u32 srbm_soft_reset = 0;
u32 tmp;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 8bd08925b370..3de7bca5854b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3499,6 +3499,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
max_sclk = 75000;
max_mclk = 80000;
}
+ /* Limit clocks for some HD8600 parts */
+ if (adev->pdev->device == 0x6660 &&
+ adev->pdev->revision == 0x83) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index d127d59f953a..b4ea229bb449 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -373,7 +373,7 @@ static int tonga_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int tonga_ih_check_soft_reset(void *handle)
+static bool tonga_ih_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -384,21 +384,19 @@ static int tonga_ih_check_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true;
adev->irq.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false;
adev->irq.srbm_soft_reset = 0;
+ return false;
}
-
- return 0;
}
static int tonga_ih_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ if (!adev->irq.srbm_soft_reset)
return 0;
return tonga_ih_hw_fini(adev);
@@ -408,7 +406,7 @@ static int tonga_ih_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ if (!adev->irq.srbm_soft_reset)
return 0;
return tonga_ih_hw_init(adev);
@@ -419,7 +417,7 @@ static int tonga_ih_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ if (!adev->irq.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->irq.srbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index e0fd9f21ed95..ab3df6d75656 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -770,7 +770,7 @@ static int uvd_v6_0_wait_for_idle(void *handle)
}
#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
-static int uvd_v6_0_check_soft_reset(void *handle)
+static bool uvd_v6_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -782,19 +782,19 @@ static int uvd_v6_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true;
adev->uvd.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false;
adev->uvd.srbm_soft_reset = 0;
+ return false;
}
- return 0;
}
+
static int uvd_v6_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ if (!adev->uvd.srbm_soft_reset)
return 0;
uvd_v6_0_stop(adev);
@@ -806,7 +806,7 @@ static int uvd_v6_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ if (!adev->uvd.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->uvd.srbm_soft_reset;
@@ -836,7 +836,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ if (!adev->uvd.srbm_soft_reset)
return 0;
mdelay(5);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 3f6db4ec0102..8533269ec160 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -561,7 +561,7 @@ static int vce_v3_0_wait_for_idle(void *handle)
#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
-static int vce_v3_0_check_soft_reset(void *handle)
+static bool vce_v3_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -591,16 +591,15 @@ static int vce_v3_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
adev->vce.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
adev->vce.srbm_soft_reset = 0;
+ return false;
}
- mutex_unlock(&adev->grbm_idx_mutex);
- return 0;
}
static int vce_v3_0_soft_reset(void *handle)
@@ -608,7 +607,7 @@ static int vce_v3_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ if (!adev->vce.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->vce.srbm_soft_reset;
@@ -638,7 +637,7 @@ static int vce_v3_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
@@ -651,7 +650,7 @@ static int vce_v3_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index c934b78c9e2f..bec8125bceb0 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -165,7 +165,7 @@ struct amd_ip_funcs {
/* poll for idle */
int (*wait_for_idle)(void *handle);
/* check soft reset the IP block */
- int (*check_soft_reset)(void *handle);
+ bool (*check_soft_reset)(void *handle);
/* pre soft reset the IP block */
int (*pre_soft_reset)(void *handle);
/* soft reset the IP block */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 92b117843875..8cee4e0f9fde 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -49,6 +49,7 @@ static const pem_event_action * const uninitialize_event[] = {
uninitialize_display_phy_access_tasks,
disable_gfx_voltage_island_power_gating_tasks,
disable_gfx_clock_gating_tasks,
+ uninitialize_thermal_controller_tasks,
set_boot_state_tasks,
adjust_power_state_tasks,
disable_dynamic_state_management_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 7e4fcbbbe086..960424913496 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1785,6 +1785,21 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
return 0;
}
+static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+ int actual_temp = 0;
+ uint32_t val = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
+ uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
+
+ if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
+ actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ else
+ actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return actual_temp;
+}
+
static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1881,6 +1896,9 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
case AMDGPU_PP_SENSOR_VCE_POWER:
*value = cz_hwmgr->vce_power_gated ? 0 : 1;
return 0;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ *value = cz_thermal_get_temperature(hwmgr);
+ return 0;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 508245d49d33..609996c84ad5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1030,20 +1030,19 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
/* disable SCLK dpm */
- if (!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Disable) == 0),
- "Failed to disable SCLK DPM!",
- return -EINVAL);
+ if (!data->sclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable SCLK DPM when DPM is disabled",
+ return 0);
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
+ }
/* disable MCLK dpm */
if (!data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Disable) == 0),
- "Failed to disable MCLK DPM!",
- return -EINVAL);
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable MCLK DPM when DPM is disabled",
+ return 0);
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
}
return 0;
@@ -1069,10 +1068,13 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
return -EINVAL);
}
- if (smu7_disable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
- return -EINVAL;
- }
+ smu7_disable_sclk_mclk_dpm(hwmgr);
+
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable voltage DPM when DPM is disabled",
+ return 0);
+
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
return 0;
}
@@ -1226,7 +1228,7 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
+ smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
tmp_result = smu7_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1306,6 +1308,12 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable thermal auto throttle!", result = tmp_result);
+ if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
+ PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)),
+ "Failed to disable AVFS!",
+ return -EINVAL);
+ }
+
tmp_result = smu7_stop_dpm(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to stop DPM!", result = tmp_result);
@@ -1452,8 +1460,10 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
- if (table_info != NULL)
- sclk_table = table_info->vdd_dep_on_sclk;
+ if (table_info == NULL)
+ return -EINVAL;
+
+ sclk_table = table_info->vdd_dep_on_sclk;
for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
@@ -3802,13 +3812,15 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
{
- const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1);
- const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2);
+ const struct smu7_power_state *psa;
+ const struct smu7_power_state *psb;
int i;
if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
return -EINVAL;
+ psa = cast_const_phw_smu7_power_state(pstate1);
+ psb = cast_const_phw_smu7_power_state(pstate2);
/* If the two states don't even have the same number of performance levels they cannot be the same state. */
if (psa->performance_level_count != psb->performance_level_count) {
*equal = false;
@@ -4324,6 +4336,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.set_mclk_od = smu7_set_mclk_od,
.get_clock_by_type = smu7_get_clock_by_type,
.read_sensor = smu7_read_sensor,
+ .dynamic_state_management_disable = smu7_disable_dpm_tasks,
};
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index eda802bc63c8..8c889caba420 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2458,7 +2458,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
- if (!data->is_memory_gddr5) {
+ if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++) {
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2f58e9e2a59c..a51f8cbcfe26 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -332,17 +332,19 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- if (dcrtc->dpms != dpms) {
- dcrtc->dpms = dpms;
- if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
- armada_drm_crtc_update(dcrtc);
- if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
- clk_disable_unprepare(dcrtc->clk);
+ if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
if (dpms_blanked(dpms))
armada_drm_vblank_off(dcrtc);
- else
+ else if (!IS_ERR(dcrtc->clk))
+ WARN_ON(clk_prepare_enable(dcrtc->clk));
+ dcrtc->dpms = dpms;
+ armada_drm_crtc_update(dcrtc);
+ if (!dpms_blanked(dpms))
drm_crtc_vblank_on(&dcrtc->crtc);
+ else if (!IS_ERR(dcrtc->clk))
+ clk_disable_unprepare(dcrtc->clk);
+ } else if (dcrtc->dpms != dpms) {
+ dcrtc->dpms = dpms;
}
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 608df4c90520..0743e65cb240 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -267,6 +267,8 @@ int ast_mm_init(struct ast_private *ast)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -275,11 +277,15 @@ int ast_mm_init(struct ast_private *ast)
void ast_mm_fini(struct ast_private *ast)
{
+ struct drm_device *dev = ast->dev;
+
ttm_bo_device_release(&ast->ttm.bdev);
ast_ttm_global_release(ast);
arch_phys_wc_del(ast->fb_mtrr);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void ast_ttm_placement(struct ast_bo *bo, int domain)
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bb2438dd8733..5e7e63ce7bce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -267,6 +267,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -276,6 +279,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
void cirrus_mm_fini(struct cirrus_device *cirrus)
{
+ struct drm_device *dev = cirrus->dev;
+
if (!cirrus->mm_inited)
return;
@@ -285,6 +290,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
arch_phys_wc_del(cirrus->fb_mtrr);
cirrus->fb_mtrr = 0;
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 1df2d33d0b40..ffb2ab389d1d 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -54,9 +54,6 @@ int drm_name_info(struct seq_file *m, void *data)
mutex_lock(&dev->master_mutex);
master = dev->master;
- if (!master)
- goto out_unlock;
-
seq_printf(m, "%s", dev->driver->name);
if (dev->dev)
seq_printf(m, " dev=%s", dev_name(dev->dev));
@@ -65,7 +62,6 @@ int drm_name_info(struct seq_file *m, void *data)
if (dev->unique)
seq_printf(m, " unique=%s", dev->unique);
seq_printf(m, "\n");
-out_unlock:
mutex_unlock(&dev->master_mutex);
return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index cb86c7e5495c..d9230132dfbc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -329,20 +329,34 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
/*
* Append a LINK to the submitted command buffer to return to
* the ring buffer. return_target is the ring target address.
- * We need three dwords: event, wait, link.
+ * We need at most 7 dwords in the return target: 2 cache flush +
+ * 2 semaphore stall + 1 event + 1 wait + 1 link.
*/
- return_dwords = 3;
+ return_dwords = 7;
return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
CMD_LINK(cmdbuf, return_dwords, return_target);
/*
- * Append event, wait and link pointing back to the wait
- * command to the ring buffer.
+ * Append a cache flush, stall, event, wait and link pointing back to
+ * the wait command to the ring buffer.
*/
+ if (gpu->exec_state == ETNA_PIPE_2D) {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ VIVS_GL_FLUSH_CACHE_PE2D);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ VIVS_GL_FLUSH_CACHE_DEPTH |
+ VIVS_GL_FLUSH_CACHE_COLOR);
+ CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+ VIVS_TS_FLUSH_CACHE_FLUSH);
+ }
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, return_target + 8);
+ CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+ buffer->user_size - 4);
if (drm_debug & DRM_UT_DRIVER)
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5ce3603e6eac..0370b842d9cc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct page **pvec;
uintptr_t ptr;
+ unsigned int flags = 0;
pvec = drm_malloc_ab(npages, sizeof(struct page *));
if (!pvec)
return ERR_PTR(-ENOMEM);
+ if (!etnaviv_obj->userptr.ro)
+ flags |= FOLL_WRITE;
+
pinned = 0;
ptr = etnaviv_obj->userptr.ptr;
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
- !etnaviv_obj->userptr.ro, 0,
- pvec + pinned, NULL);
+ flags, pvec + pinned, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index d3796ed8d8c5..169ac96e8f08 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -330,7 +330,8 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
return (u32)buf->vram_node.start;
mutex_lock(&mmu->lock);
- ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size);
+ ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
+ buf->size + SZ_64K);
if (ret < 0) {
mutex_unlock(&mmu->lock);
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index aa92decf4233..fbd13fabdf2d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -488,7 +488,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
goto err_free;
}
- ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
+ ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+ g2d_userptr->vec);
if (ret != npages) {
DRM_ERROR("failed to get user pages from userptr.\n");
if (ret < 0)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 3371635cd4d7..b2d5e188b1b8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -51,6 +51,7 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
DCU_MODE_DCU_MODE(DCU_MODE_OFF));
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
DCU_UPDATE_MODE_READREG);
+ clk_disable_unprepare(fsl_dev->pix_clk);
}
static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
@@ -58,6 +59,7 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ clk_prepare_enable(fsl_dev->pix_clk);
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
DCU_MODE_DCU_MODE_MASK,
DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
@@ -116,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
return;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0884c45aefe8..e04efbed1a54 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -267,12 +267,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
return ret;
}
- ret = clk_prepare_enable(fsl_dev->pix_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable pix clk\n");
- goto disable_dcu_clk;
- }
-
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_enable(fsl_dev->tcon);
fsl_dcu_drm_init_planes(fsl_dev->drm);
drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
@@ -284,10 +280,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
enable_irq(fsl_dev->irq);
return 0;
-
-disable_dcu_clk:
- clk_disable_unprepare(fsl_dev->clk);
- return ret;
}
#endif
@@ -401,18 +393,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
goto disable_clk;
}
- ret = clk_prepare_enable(fsl_dev->pix_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable pix clk\n");
- goto unregister_pix_clk;
- }
-
fsl_dev->tcon = fsl_tcon_init(dev);
drm = drm_dev_alloc(driver, dev);
if (IS_ERR(drm)) {
ret = PTR_ERR(drm);
- goto disable_pix_clk;
+ goto unregister_pix_clk;
}
fsl_dev->dev = dev;
@@ -433,8 +419,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
unref:
drm_dev_unref(drm);
-disable_pix_clk:
- clk_disable_unprepare(fsl_dev->pix_clk);
unregister_pix_clk:
clk_unregister(fsl_dev->pix_clk);
disable_clk:
@@ -447,7 +431,6 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
clk_disable_unprepare(fsl_dev->clk);
- clk_disable_unprepare(fsl_dev->pix_clk);
clk_unregister(fsl_dev->pix_clk);
drm_put_dev(fsl_dev->drm);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index a7e5486bd1e9..9e6f7d8112b3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -211,11 +211,6 @@ void fsl_dcu_drm_init_planes(struct drm_device *dev)
for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
}
- regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_OFF));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
}
struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 26edcc899712..e1dd75b18118 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -20,38 +20,6 @@
#include "fsl_dcu_drm_drv.h"
#include "fsl_tcon.h"
-static int
-fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return 0;
-}
-
-static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
- if (fsl_dev->tcon)
- fsl_tcon_bypass_disable(fsl_dev->tcon);
-}
-
-static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
- if (fsl_dev->tcon)
- fsl_tcon_bypass_enable(fsl_dev->tcon);
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .atomic_check = fsl_dcu_drm_encoder_atomic_check,
- .disable = fsl_dcu_drm_encoder_disable,
- .enable = fsl_dcu_drm_encoder_enable,
-};
-
static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -68,13 +36,16 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
int ret;
encoder->possible_crtcs = 1;
+
+ /* Use bypass mode for parallel RGB/LVDS encoder */
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_enable(fsl_dev->tcon);
+
ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
DRM_MODE_ENCODER_LVDS, NULL);
if (ret < 0)
return ret;
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e537930c64b5..c6f780f5abc9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
+ unsigned int flags = 0;
+
+ if (!obj->userptr.read_only)
+ flags |= FOLL_WRITE;
ret = -EFAULT;
if (atomic_inc_not_zero(&mm->mm_users)) {
@@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
- !obj->userptr.read_only, 0,
+ flags,
pvec + pinned, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 919b35f2ad24..dcf7d11ac380 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -266,6 +266,9 @@ int mgag200_mm_init(struct mga_device *mdev)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -274,10 +277,14 @@ int mgag200_mm_init(struct mga_device *mdev)
void mgag200_mm_fini(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
+
ttm_bo_device_release(&mdev->ttm.bdev);
mgag200_ttm_global_release(mdev);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
mdev->fb_mtrr = 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 1825dbc33192..a6dbe8258040 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -398,6 +398,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* VRAM init */
drm->gem.vram_available = drm->device.info.ram_user;
+ arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
drm->gem.vram_available >> PAGE_SHIFT);
if (ret) {
@@ -430,6 +433,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
+ struct nvkm_device *device = nvxx_device(&drm->device);
+
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
@@ -439,4 +444,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
+ arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 6a4b020dd0b4..5a26eb4545aa 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
struct drm_device *dev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
- u32 line_time_us, vblank_lines;
+ u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
- line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
- radeon_crtc->hw_mode.clock;
- vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
- radeon_crtc->hw_mode.crtc_vdisplay +
- (radeon_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
+ vblank_in_pixels =
+ radeon_crtc->hw_mode.crtc_htotal *
+ (radeon_crtc->hw_mode.crtc_vblank_end -
+ radeon_crtc->hw_mode.crtc_vdisplay +
+ (radeon_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 50e96d2c593d..e18839d52e3e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -927,6 +927,16 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
return ret;
}
+static void radeon_connector_unregister(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->ddc_bus->has_aux) {
+ drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
+ radeon_connector->ddc_bus->has_aux = false;
+ }
+}
+
static void radeon_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -984,6 +994,7 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_lvds_set_property,
};
@@ -1111,6 +1122,7 @@ static const struct drm_connector_funcs radeon_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_connector_set_property,
};
@@ -1188,6 +1200,7 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_tv_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_connector_set_property,
};
@@ -1519,6 +1532,7 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
.detect = radeon_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1832,6 +1846,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1841,6 +1856,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1850,6 +1866,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b8ab30a7dd6d..cdb8cb568c15 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1675,20 +1675,20 @@ int radeon_modeset_init(struct radeon_device *rdev)
void radeon_modeset_fini(struct radeon_device *rdev)
{
- radeon_fbdev_fini(rdev);
- kfree(rdev->mode_info.bios_hardcoded_edid);
-
- /* free i2c buses */
- radeon_i2c_fini(rdev);
-
if (rdev->mode_info.mode_config_initialized) {
- radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_crtc_force_disable_all(rdev->ddev);
+ radeon_fbdev_fini(rdev);
+ radeon_afmt_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
+
+ kfree(rdev->mode_info.bios_hardcoded_edid);
+
+ /* free i2c buses */
+ radeon_i2c_fini(rdev);
}
static bool is_hdtv_mode(const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 91c8f4339566..00ea0002b539 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -96,9 +96,10 @@
* 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
* 2.46.0 - Add PFP_SYNC_ME support on evergreen
* 2.47.0 - Add UVD_NO_OP register support
+ * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 47
+#define KMS_DRIVER_MINOR 48
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 021aa005623f..29f7817af821 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -982,9 +982,8 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
return;
+ WARN_ON(i2c->has_aux);
i2c_del_adapter(&i2c->adapter);
- if (i2c->has_aux)
- drm_dp_aux_unregister(&i2c->aux);
kfree(i2c);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index be30861afae9..41b72ce6613f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -446,6 +446,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
int radeon_bo_init(struct radeon_device *rdev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(rdev->mc.aper_base,
+ rdev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
if (!rdev->fastfb_working) {
rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
@@ -463,6 +467,7 @@ void radeon_bo_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
arch_phys_wc_del(rdev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
}
/* Returns how many bytes TTM can move per IB.
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 455268214b89..3de5e6e21662 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
struct page **pages = ttm->pages + pinned;
- r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
+ r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
+ pages, NULL);
if (r < 0)
goto release_pages;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 7ee9aafbdf74..e402be8821c4 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4431,6 +4431,7 @@ static bool si_vm_reg_valid(u32 reg)
case SPI_CONFIG_CNTL:
case SPI_CONFIG_CNTL_1:
case TA_CNTL_AUX:
+ case TA_CS_BC_BASE_ADDR:
return true;
default:
DRM_ERROR("Invalid register 0x%x in CS\n", reg);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index eb220eecba78..65a911ddd509 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1145,6 +1145,7 @@
#define SPI_LB_CU_MASK 0x9354
#define TA_CNTL_AUX 0x9508
+#define TA_CS_BC_BASE_ADDR 0x950C
#define CC_RB_BACKEND_DISABLE 0x98F4
#define BACKEND_DISABLE(x) ((x) << 16)
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 7e2a12c4fed2..1a3ad769f8c8 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
down_read(&current->mm->mmap_sem);
ret = get_user_pages((unsigned long)xfer->mem_addr,
vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE),
- 0, vsg->pages, NULL);
+ (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
+ vsg->pages, NULL);
up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e8ae3dc476d1..18061a4bc2f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr);
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
-module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
-module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
+module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
-module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
+module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
-module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
-module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 070d750af16d..1e59a486bba8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -43,7 +43,7 @@
#define VMWGFX_DRIVER_DATE "20160210"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 10
+#define VMWGFX_DRIVER_MINOR 11
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dc5beff2b4aa..c7b53d987f06 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -35,17 +35,37 @@
#define VMW_RES_HT_ORDER 12
/**
+ * enum vmw_resource_relocation_type - Relocation type for resources
+ *
+ * @vmw_res_rel_normal: Traditional relocation. The resource id in the
+ * command stream is replaced with the actual id after validation.
+ * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
+ * with a NOP.
+ * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
+ * after validation is -1, the command is replaced with a NOP. Otherwise no
+ * action.
+ */
+enum vmw_resource_relocation_type {
+ vmw_res_rel_normal,
+ vmw_res_rel_nop,
+ vmw_res_rel_cond_nop,
+ vmw_res_rel_max
+};
+
+/**
* struct vmw_resource_relocation - Relocation info for resources
*
* @head: List head for the software context's relocation list.
* @res: Non-ref-counted pointer to the resource.
- * @offset: Offset of 4 byte entries into the command buffer where the
+ * @offset: Offset of single byte entries into the command buffer where the
* id that needs fixup is located.
+ * @rel_type: Type of relocation.
*/
struct vmw_resource_relocation {
struct list_head head;
const struct vmw_resource *res;
- unsigned long offset;
+ u32 offset:29;
+ enum vmw_resource_relocation_type rel_type:3;
};
/**
@@ -109,7 +129,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_dma_buffer *vbo,
bool validate_as_mob,
uint32_t *p_val_node);
-
+/**
+ * vmw_ptr_diff - Compute the offset from a to b in bytes
+ *
+ * @a: A starting pointer.
+ * @b: A pointer offset in the same address space.
+ *
+ * Returns: The offset in bytes between the two pointers.
+ */
+static size_t vmw_ptr_diff(void *a, void *b)
+{
+ return (unsigned long) b - (unsigned long) a;
+}
/**
* vmw_resources_unreserve - unreserve resources previously reserved for
@@ -409,11 +440,14 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
* @list: Pointer to head of relocation list.
* @res: The resource.
* @offset: Offset into the command buffer currently being parsed where the
- * id that needs fixup is located. Granularity is 4 bytes.
+ * id that needs fixup is located. Granularity is one byte.
+ * @rel_type: Relocation type.
*/
static int vmw_resource_relocation_add(struct list_head *list,
const struct vmw_resource *res,
- unsigned long offset)
+ unsigned long offset,
+ enum vmw_resource_relocation_type
+ rel_type)
{
struct vmw_resource_relocation *rel;
@@ -425,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
rel->res = res;
rel->offset = offset;
+ rel->rel_type = rel_type;
list_add_tail(&rel->head, list);
return 0;
@@ -459,11 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{
struct vmw_resource_relocation *rel;
+ /* Validate the struct vmw_resource_relocation member size */
+ BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
+ BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
+
list_for_each_entry(rel, list, head) {
- if (likely(rel->res != NULL))
- cb[rel->offset] = rel->res->id;
- else
- cb[rel->offset] = SVGA_3D_CMD_NOP;
+ u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
+ switch (rel->rel_type) {
+ case vmw_res_rel_normal:
+ *addr = rel->res->id;
+ break;
+ case vmw_res_rel_nop:
+ *addr = SVGA_3D_CMD_NOP;
+ break;
+ default:
+ if (rel->res->id == -1)
+ *addr = SVGA_3D_CMD_NOP;
+ break;
+ }
}
}
@@ -655,7 +703,9 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
*p_val = NULL;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
- id_loc - sw_context->buf_start);
+ vmw_ptr_diff(sw_context->buf_start,
+ id_loc),
+ vmw_res_rel_normal);
if (unlikely(ret != 0))
return ret;
@@ -721,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
- id_loc - sw_context->buf_start);
+ vmw_ptr_diff(sw_context->buf_start, id_loc),
+ vmw_res_rel_normal);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
@@ -2143,10 +2194,10 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
- NULL, &cmd->header.id -
- sw_context->buf_start);
-
- return 0;
+ NULL,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_nop);
}
/**
@@ -2188,10 +2239,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
- NULL, &cmd->header.id -
- sw_context->buf_start);
-
- return 0;
+ NULL,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_nop);
}
/**
@@ -2848,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
* @header: Pointer to the command header in the command stream.
*
* Check that the view exists, and if it was not created using this
- * command batch, make sure it's validated (present in the device) so that
- * the remove command will not confuse the device.
+ * command batch, conditionally make this command a NOP.
*/
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -2877,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
return ret;
/*
- * Add view to the validate list iff it was not created using this
- * command batch.
+ * If the view wasn't created during this command batch, it might
+ * have been removed due to a context swapout, so add a
+ * relocation to conditionally make this command a NOP to avoid
+ * device errors.
*/
- return vmw_view_res_val_add(sw_context, view);
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ view,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_cond_nop);
}
/**
@@ -3029,6 +3085,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
cmd->body.shaderResourceViewId);
}
+/**
+ * vmw_cmd_dx_transfer_from_buffer -
+ * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXTransferFromBuffer body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcSid, NULL);
+ if (ret != 0)
+ return ret;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.destSid, NULL);
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3379,6 +3464,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_buffer_copy_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
&vmw_cmd_pred_copy_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
+ &vmw_cmd_dx_transfer_from_buffer,
+ true, false, true),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -3848,14 +3936,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
int ret;
*header = NULL;
- if (!dev_priv->cman || kernel_commands)
- return kernel_commands;
-
if (command_size > SVGA_CB_MAX_SIZE) {
DRM_ERROR("Command buffer is too large.\n");
return ERR_PTR(-EINVAL);
}
+ if (!dev_priv->cman || kernel_commands)
+ return kernel_commands;
+
/* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512;
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
@@ -4232,9 +4320,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
vmw_dmabuf_unreference(&dev_priv->pinned_bo);
- DRM_INFO("Dummy query bo pin count: %d\n",
- dev_priv->dummy_query_bo->pin_count);
-
out_unlock:
return;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6a328d507a28..52ca1c9d070e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
- if (nonblock)
- return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
-
- lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
+ lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index c2a721a8cef9..b445ce9b9757 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
if (res->id != -1) {
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
return;
@@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
submit_size = vmw_surface_define_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
ret = -ENOMEM;
@@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
uint8_t *cmd;
struct vmw_private *dev_priv = res->dev_priv;
- BUG_ON(val_buf->bo == NULL);
-
+ BUG_ON(!val_buf->bo);
submit_size = vmw_surface_dma_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"DMA.\n");
return -ENOMEM;
@@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
submit_size = vmw_surface_destroy_size();
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"eviction.\n");
return -ENOMEM;
@@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
int ret;
struct vmw_resource *res = &srf->res;
- BUG_ON(res_free == NULL);
+ BUG_ON(!res_free);
if (!dev_priv->has_mob)
vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_init(dev_priv, res, true, res_free,
@@ -700,7 +699,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct drm_vmw_size __user *user_sizes;
int ret;
int i, j;
uint32_t cur_bo_offset;
@@ -748,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
+ if (unlikely(!user_srf)) {
ret = -ENOMEM;
goto out_no_user_srf;
}
@@ -763,29 +761,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
srf->num_sizes = num_sizes;
user_srf->size = size;
-
- srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
+ srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr,
+ sizeof(*srf->sizes) * srf->num_sizes);
+ if (IS_ERR(srf->sizes)) {
+ ret = PTR_ERR(srf->sizes);
goto out_no_sizes;
}
- srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
- GFP_KERNEL);
- if (unlikely(srf->offsets == NULL)) {
+ srf->offsets = kmalloc_array(srf->num_sizes,
+ sizeof(*srf->offsets),
+ GFP_KERNEL);
+ if (unlikely(!srf->offsets)) {
ret = -ENOMEM;
goto out_no_offsets;
}
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- req->size_addr;
-
- ret = copy_from_user(srf->sizes, user_sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- goto out_no_copy;
- }
-
srf->base_size = *srf->sizes;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->multisample_count = 0;
@@ -923,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
- if (unlikely(base == NULL)) {
+ if (unlikely(!base)) {
DRM_ERROR("Could not find surface to reference.\n");
goto out_no_lookup;
}
@@ -1069,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd = vmw_fifo_reserve(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
ret = -ENOMEM;
@@ -1135,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd1 == NULL)) {
+ if (unlikely(!cmd1)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"binding.\n");
return -ENOMEM;
@@ -1185,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"unbinding.\n");
return -ENOMEM;
@@ -1244,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
mutex_unlock(&dev_priv->binding_mutex);
@@ -1410,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- if (srf->res.backup == NULL) {
+ if (!srf->res.backup) {
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
goto out_bad_resource;
}
@@ -1524,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
+ if (unlikely(!user_srf)) {
ret = -ENOMEM;
goto out_no_user_srf;
}
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 8fd4bf77f264..818ea7d93533 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -234,58 +234,6 @@ static __u8 pid0011_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 pid0006_rdesc_fixed[] = {
- 0x05, 0x01, /* Usage Page (Generic Desktop) */
- 0x09, 0x04, /* Usage (Joystick) */
- 0xA1, 0x01, /* Collection (Application) */
- 0xA1, 0x02, /* Collection (Logical) */
- 0x75, 0x08, /* Report Size (8) */
- 0x95, 0x05, /* Report Count (5) */
- 0x15, 0x00, /* Logical Minimum (0) */
- 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
- 0x35, 0x00, /* Physical Minimum (0) */
- 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
- 0x09, 0x30, /* Usage (X) */
- 0x09, 0x33, /* Usage (Ry) */
- 0x09, 0x32, /* Usage (Z) */
- 0x09, 0x31, /* Usage (Y) */
- 0x09, 0x34, /* Usage (Ry) */
- 0x81, 0x02, /* Input (Variable) */
- 0x75, 0x04, /* Report Size (4) */
- 0x95, 0x01, /* Report Count (1) */
- 0x25, 0x07, /* Logical Maximum (7) */
- 0x46, 0x3B, 0x01, /* Physical Maximum (315) */
- 0x65, 0x14, /* Unit (Centimeter) */
- 0x09, 0x39, /* Usage (Hat switch) */
- 0x81, 0x42, /* Input (Variable) */
- 0x65, 0x00, /* Unit (None) */
- 0x75, 0x01, /* Report Size (1) */
- 0x95, 0x0C, /* Report Count (12) */
- 0x25, 0x01, /* Logical Maximum (1) */
- 0x45, 0x01, /* Physical Maximum (1) */
- 0x05, 0x09, /* Usage Page (Button) */
- 0x19, 0x01, /* Usage Minimum (0x01) */
- 0x29, 0x0C, /* Usage Maximum (0x0C) */
- 0x81, 0x02, /* Input (Variable) */
- 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined) */
- 0x75, 0x01, /* Report Size (1) */
- 0x95, 0x08, /* Report Count (8) */
- 0x25, 0x01, /* Logical Maximum (1) */
- 0x45, 0x01, /* Physical Maximum (1) */
- 0x09, 0x01, /* Usage (0x01) */
- 0x81, 0x02, /* Input (Variable) */
- 0xC0, /* End Collection */
- 0xA1, 0x02, /* Collection (Logical) */
- 0x75, 0x08, /* Report Size (8) */
- 0x95, 0x07, /* Report Count (7) */
- 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
- 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
- 0x09, 0x02, /* Usage (0x02) */
- 0x91, 0x02, /* Output (Variable) */
- 0xC0, /* End Collection */
- 0xC0 /* End Collection */
-};
-
static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -296,16 +244,34 @@ static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*rsize = sizeof(pid0011_rdesc_fixed);
}
break;
- case 0x0006:
- if (*rsize == sizeof(pid0006_rdesc_fixed)) {
- rdesc = pid0006_rdesc_fixed;
- *rsize = sizeof(pid0006_rdesc_fixed);
- }
- break;
}
return rdesc;
}
+#define map_abs(c) hid_map_usage(hi, usage, bit, max, EV_ABS, (c))
+#define map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c))
+
+static int dr_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid) {
+ /*
+ * revert to the old hid-input behavior where axes
+ * can be randomly assigned when hid->usage is reused.
+ */
+ case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
+ case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
+ if (field->flags & HID_MAIN_ITEM_RELATIVE)
+ map_rel(usage->hid & 0xf);
+ else
+ map_abs(usage->hid & 0xf);
+ return 1;
+ }
+
+ return 0;
+}
+
static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
@@ -352,6 +318,7 @@ static struct hid_driver dr_driver = {
.id_table = dr_devices,
.report_fixup = dr_report_fixup,
.probe = dr_probe,
+ .input_mapping = dr_input_mapping,
};
module_hid_driver(dr_driver);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index cd59c79eebdd..6cfb5cacc253 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -64,6 +64,9 @@
#define USB_VENDOR_ID_AKAI 0x2011
#define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715
+#define USB_VENDOR_ID_AKAI_09E8 0x09E8
+#define USB_DEVICE_ID_AKAI_09E8_MIDIMIX 0x0031
+
#define USB_VENDOR_ID_ALCOR 0x058f
#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c
index d8d55f37b4f5..d3e1ab162f7c 100644
--- a/drivers/hid/hid-led.c
+++ b/drivers/hid/hid-led.c
@@ -100,6 +100,7 @@ struct hidled_device {
const struct hidled_config *config;
struct hid_device *hdev;
struct hidled_rgb *rgb;
+ u8 *buf;
struct mutex lock;
};
@@ -118,13 +119,19 @@ static int hidled_send(struct hidled_device *ldev, __u8 *buf)
mutex_lock(&ldev->lock);
+ /*
+ * buffer provided to hid_hw_raw_request must not be on the stack
+ * and must not be part of a data structure
+ */
+ memcpy(ldev->buf, buf, ldev->config->report_size);
+
if (ldev->config->report_type == RAW_REQUEST)
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
else if (ldev->config->report_type == OUTPUT_REPORT)
- ret = hid_hw_output_report(ldev->hdev, buf,
+ ret = hid_hw_output_report(ldev->hdev, ldev->buf,
ldev->config->report_size);
else
ret = -EINVAL;
@@ -147,17 +154,21 @@ static int hidled_recv(struct hidled_device *ldev, __u8 *buf)
mutex_lock(&ldev->lock);
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ memcpy(ldev->buf, buf, ldev->config->report_size);
+
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
if (ret < 0)
goto err;
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
+
+ memcpy(buf, ldev->buf, ldev->config->report_size);
err:
mutex_unlock(&ldev->lock);
@@ -447,6 +458,10 @@ static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!ldev)
return -ENOMEM;
+ ldev->buf = devm_kmalloc(&hdev->dev, MAX_REPORT_SIZE, GFP_KERNEL);
+ if (!ldev->buf)
+ return -ENOMEM;
+
ret = hid_parse(hdev);
if (ret)
return ret;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0a0eca5da47d..354d49ea36dd 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -56,6 +56,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 4aa3cb63fd41..bcd06306f3e8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -314,10 +314,14 @@ static void heartbeat_onchannelcallback(void *context)
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
struct icmsg_negotiate *negop = NULL;
- vmbus_recvpacket(channel, hbeat_txf_buf,
- PAGE_SIZE, &recvlen, &requestid);
+ while (1) {
+
+ vmbus_recvpacket(channel, hbeat_txf_buf,
+ PAGE_SIZE, &recvlen, &requestid);
+
+ if (!recvlen)
+ break;
- if (recvlen > 0) {
icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
sizeof(struct vmbuspipe_hdr)];
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 98114cef1e43..2fe1828bd10b 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -194,10 +194,10 @@ static struct adm9240_data *adm9240_update_device(struct device *dev)
* 0.5'C per two measurement cycles thus ignore possible
* but unlikely aliasing error on lsb reading. --Grant
*/
- data->temp = ((i2c_smbus_read_byte_data(client,
+ data->temp = (i2c_smbus_read_byte_data(client,
ADM9240_REG_TEMP) << 8) |
i2c_smbus_read_byte_data(client,
- ADM9240_REG_TEMP_CONF)) / 128;
+ ADM9240_REG_TEMP_CONF);
for (i = 0; i < 2; i++) { /* read fans */
data->fan[i] = i2c_smbus_read_byte_data(client,
@@ -263,7 +263,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *dummy,
char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
- return sprintf(buf, "%d\n", data->temp * 500); /* 9-bit value */
+ return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
}
static ssize_t show_max(struct device *dev, struct device_attribute *devattr,
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index bef84e085973..c1b9275978f9 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -268,11 +268,13 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
long *val)
{
struct max31790_data *data = max31790_update_device(dev);
- u8 fan_config = data->fan_config[channel];
+ u8 fan_config;
if (IS_ERR(data))
return PTR_ERR(data);
+ fan_config = data->fan_config[channel];
+
switch (attr) {
case hwmon_pwm_input:
*val = data->pwm[channel] >> 8;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6d94e2ec5b4f..d252276feadf 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -79,12 +79,12 @@ config I2C_AMD8111
config I2C_HIX5HD2
tristate "Hix5hd2 high-speed I2C driver"
- depends on ARCH_HIX5HD2 || COMPILE_TEST
+ depends on ARCH_HISI || ARCH_HIX5HD2 || COMPILE_TEST
help
- Say Y here to include support for high-speed I2C controller in the
- Hisilicon based hix5hd2 SoCs.
+ Say Y here to include support for the high-speed I2C controller
+ used in HiSilicon hix5hd2 SoCs.
- This driver can also be built as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called i2c-hix5hd2.
config I2C_I801
@@ -589,10 +589,10 @@ config I2C_IMG
config I2C_IMX
tristate "IMX I2C interface"
- depends on ARCH_MXC || ARCH_LAYERSCAPE
+ depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
help
Say Y here if you want to use the IIC bus controller on
- the Freescale i.MX/MXC or Layerscape processors.
+ the Freescale i.MX/MXC, Layerscape or ColdFire processors.
This driver can also be built as a module. If so, the module
will be called i2c-imx.
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 1fe93c43215c..11e866d05368 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -95,6 +95,9 @@
#define DW_IC_STATUS_TFE BIT(2)
#define DW_IC_STATUS_MST_ACTIVITY BIT(5)
+#define DW_IC_SDA_HOLD_RX_SHIFT 16
+#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)
+
#define DW_IC_ERR_TX_ABRT 0x1
#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
@@ -420,12 +423,20 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
/* Configure SDA Hold Time if required */
reg = dw_readl(dev, DW_IC_COMP_VERSION);
if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
- if (dev->sda_hold_time) {
- dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
- } else {
+ if (!dev->sda_hold_time) {
/* Keep previous hold time setting if no one set it */
dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
}
+ /*
+ * Workaround for avoiding TX arbitration lost in case I2C
+ * slave pulls SDA down "too quickly" after falling egde of
+ * SCL by enabling non-zero SDA RX hold. Specification says it
+ * extends incoming SDA low to high transition while SCL is
+ * high but it apprears to help also above issue.
+ */
+ if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
+ dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
+ dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
} else {
dev_warn(dev->dev,
"Hardware too old to adjust SDA hold time.\n");
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 9604024e0eb0..49f2084f7bb5 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -368,6 +368,7 @@ static const struct of_device_id dc_i2c_match[] = {
{ .compatible = "cnxt,cx92755-i2c" },
{ },
};
+MODULE_DEVICE_TABLE(of, dc_i2c_match);
static struct platform_driver dc_i2c_driver = {
.probe = dc_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 08847e8b8998..eb3627f35d12 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -146,6 +146,7 @@
#define SMBHSTCFG_HST_EN 1
#define SMBHSTCFG_SMB_SMI_EN 2
#define SMBHSTCFG_I2C_EN 4
+#define SMBHSTCFG_SPD_WD 0x10
/* TCO configuration bits for TCOCTL */
#define TCOCTL_EN 0x0100
@@ -865,9 +866,16 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
block = 1;
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
- /* NB: page 240 of ICH5 datasheet shows that the R/#W
- * bit should be cleared here, even when reading */
- outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
+ /*
+ * NB: page 240 of ICH5 datasheet shows that the R/#W
+ * bit should be cleared here, even when reading.
+ * However if SPD Write Disable is set (Lynx Point and later),
+ * the read will fail if we don't set the R/#W bit.
+ */
+ outb_p(((addr & 0x7f) << 1) |
+ ((priv->original_hstcfg & SMBHSTCFG_SPD_WD) ?
+ (read_write & 0x01) : 0),
+ SMBHSTADD(priv));
if (read_write == I2C_SMBUS_READ) {
/* NB: page 240 of ICH5 datasheet also shows
* that DATA1 is the cmd field when reading */
@@ -1573,6 +1581,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* Disable SMBus interrupt feature if SMBus using SMI# */
priv->features &= ~FEATURE_IRQ;
}
+ if (temp & SMBHSTCFG_SPD_WD)
+ dev_info(&dev->dev, "SPD Write Disable is set\n");
/* Clear special mode bits */
if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 592a8f26a708..47fc1f1acff7 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1009,10 +1009,13 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
rinfo->sda_gpio = of_get_named_gpio(pdev->dev.of_node, "sda-gpios", 0);
rinfo->scl_gpio = of_get_named_gpio(pdev->dev.of_node, "scl-gpios", 0);
- if (!gpio_is_valid(rinfo->sda_gpio) ||
- !gpio_is_valid(rinfo->scl_gpio) ||
- IS_ERR(i2c_imx->pinctrl_pins_default) ||
- IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
+ if (rinfo->sda_gpio == -EPROBE_DEFER ||
+ rinfo->scl_gpio == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (!gpio_is_valid(rinfo->sda_gpio) ||
+ !gpio_is_valid(rinfo->scl_gpio) ||
+ IS_ERR(i2c_imx->pinctrl_pins_default) ||
+ IS_ERR(i2c_imx->pinctrl_pins_gpio)) {
dev_dbg(&pdev->dev, "recovery information incomplete\n");
return 0;
}
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index b8ea62105f42..30132c3957cd 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -729,6 +729,7 @@ static const struct of_device_id jz4780_i2c_of_matches[] = {
{ .compatible = "ingenic,jz4780-i2c", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, jz4780_i2c_of_matches);
static int jz4780_i2c_probe(struct platform_device *pdev)
{
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 50702c7bb244..df220666d627 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -694,6 +694,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
t_calc->div_low--;
t_calc->div_high--;
+ /* Give the tuning value 0, that would not update con register */
+ t_calc->tuning = 0;
/* Maximum divider supported by hw is 0xffff */
if (t_calc->div_low > 0xffff) {
t_calc->div_low = 0xffff;
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index 263685c7a512..05cf192ef1ac 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
struct mbox_chan *mbox_chan;
struct mbox_client mbox_client;
struct completion rd_complete;
- u8 dma_buffer[I2C_SMBUS_BLOCK_MAX];
+ u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
u32 *resp_msg;
};
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 2a972ed7aa0d..e29ff37a43bd 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -426,6 +426,7 @@ static const struct of_device_id xlp9xx_i2c_of_match[] = {
{ .compatible = "netlogic,xlp980-i2c", },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, xlp9xx_i2c_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = {
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
index 0968f59b6df5..ad17d88d8573 100644
--- a/drivers/i2c/busses/i2c-xlr.c
+++ b/drivers/i2c/busses/i2c-xlr.c
@@ -358,6 +358,7 @@ static const struct of_device_id xlr_i2c_dt_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, xlr_i2c_dt_ids);
static int xlr_i2c_probe(struct platform_device *pdev)
{
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 98fffa3a09f7..1704fc84d647 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1680,7 +1680,8 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
static void of_i2c_register_devices(struct i2c_adapter *adap)
{
- struct device_node *node;
+ struct device_node *bus, *node;
+ struct i2c_client *client;
/* Only register child devices if the adapter has a node pointer set */
if (!adap->dev.of_node)
@@ -1688,11 +1689,24 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
- for_each_available_child_of_node(adap->dev.of_node, node) {
+ bus = of_get_child_by_name(adap->dev.of_node, "i2c-bus");
+ if (!bus)
+ bus = of_node_get(adap->dev.of_node);
+
+ for_each_available_child_of_node(bus, node) {
if (of_node_test_and_set_flag(node, OF_POPULATED))
continue;
- of_i2c_register_device(adap, node);
+
+ client = of_i2c_register_device(adap, node);
+ if (IS_ERR(client)) {
+ dev_warn(&adap->dev,
+ "Failed to create I2C device for %s\n",
+ node->full_name);
+ of_node_clear_flag(node, OF_POPULATED);
+ }
}
+
+ of_node_put(bus);
}
static int of_dev_node_match(struct device *dev, void *data)
@@ -2293,6 +2307,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
if (IS_ERR(client)) {
dev_err(&adap->dev, "failed to create client for '%s'\n",
rd->dn->full_name);
+ of_node_clear_flag(rd->dn, OF_POPULATED);
return notifier_from_errno(PTR_ERR(client));
}
break;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 7edcf3238620..99c051490eff 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -437,6 +437,8 @@ config STX104
config TI_ADC081C
tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"
depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for Texas Instruments ADC081C,
ADC101C and ADC121C ADC chips.
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index bd321b305a0a..ef761a508630 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -213,13 +213,14 @@ static int atlas_check_ec_calibration(struct atlas_data *data)
struct device *dev = &data->client->dev;
int ret;
unsigned int val;
+ __be16 rval;
- ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);
+ ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2);
if (ret)
return ret;
- dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,
- be16_to_cpu(val) % 100);
+ val = be16_to_cpu(rval);
+ dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100);
ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
if (ret)
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 39dd2026ccc9..066161a4bccd 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -123,22 +123,24 @@ static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,
{
unsigned int storage_bytes = data->chip->read_size;
unsigned int shift = chan->scan_type.shift + (chan->address * 8);
- unsigned int buf;
+ __be16 buf16;
+ __be32 buf32;
int ret;
- ret = spi_read(data->spi, (void *) &buf, storage_bytes);
- if (ret)
- return ret;
-
switch (storage_bytes) {
case 2:
- *val = be16_to_cpu(buf);
+ ret = spi_read(data->spi, (void *)&buf16, storage_bytes);
+ *val = be16_to_cpu(buf16);
break;
case 4:
- *val = be32_to_cpu(buf);
+ ret = spi_read(data->spi, (void *)&buf32, storage_bytes);
+ *val = be32_to_cpu(buf32);
break;
}
+ if (ret)
+ return ret;
+
/* check to be sure this is a valid reading */
if (*val & data->chip->status_bit)
return -EINVAL;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 19a418a1b631..fb3fb89640e5 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -89,4 +89,6 @@ source "drivers/infiniband/sw/rxe/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig"
+source "drivers/infiniband/hw/qedr/Kconfig"
+
endif # INFINIBAND
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c68746ce6624..224ad274ea0b 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -94,6 +94,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
unsigned long dma_attrs = 0;
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
+ unsigned int gup_flags = FOLL_WRITE;
if (dmasync)
dma_attrs |= DMA_ATTR_WRITE_BARRIER;
@@ -183,6 +184,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (ret)
goto out;
+ if (!umem->writable)
+ gup_flags |= FOLL_FORCE;
+
need_release = 1;
sg_list_start = umem->sg_head.sgl;
@@ -190,7 +194,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
- 1, !umem->writable, page_list, vma_list);
+ gup_flags, page_list, vma_list);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 75077a018675..1f0fe3217f23 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
u64 off;
int j, k, ret = 0, start_idx, npages = 0;
u64 base_virt_addr;
+ unsigned int flags = 0;
if (access_mask == 0)
return -EINVAL;
@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
goto out_put_task;
}
+ if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ flags |= FOLL_WRITE;
+
start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
k = start_idx;
@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
*/
npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages,
- access_mask & ODP_WRITE_ALLOWED_BIT,
- 0, local_page_list, NULL);
+ flags, local_page_list, NULL);
up_read(&owning_mm->mmap_sem);
if (npages < 0)
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 21fe401ff178..e7a5ed9f6f3f 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_HNS) += hns/
+obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 875597b0e69c..097365932b09 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -83,8 +83,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
struct hns_roce_mtt *hr_mtt,
struct hns_roce_uar *hr_uar,
- struct hns_roce_cq *hr_cq, int vector,
- int collapsed)
+ struct hns_roce_cq *hr_cq, int vector)
{
struct hns_roce_cmd_mailbox *mailbox = NULL;
struct hns_roce_cq_table *cq_table = NULL;
@@ -153,6 +152,9 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
hr_cq->cons_index = 0;
hr_cq->uar = hr_uar;
+ atomic_set(&hr_cq->refcount, 1);
+ init_completion(&hr_cq->free);
+
return 0;
err_radix:
@@ -192,6 +194,11 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
/* Waiting interrupt process procedure carried out */
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
+ /* wait for all interrupt processed */
+ if (atomic_dec_and_test(&hr_cq->refcount))
+ complete(&hr_cq->free);
+ wait_for_completion(&hr_cq->free);
+
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
spin_unlock_irq(&cq_table->lock);
@@ -300,10 +307,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
hr_cq->ib_cq.cqe = cq_entries - 1;
- mutex_init(&hr_cq->resize_mutex);
spin_lock_init(&hr_cq->lock);
- hr_cq->hr_resize_buf = NULL;
- hr_cq->resize_umem = NULL;
if (context) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
@@ -338,8 +342,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
}
/* Allocate cq index, fill cq_context */
- ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt,
- uar, hr_cq, vector, 0);
+ ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
+ hr_cq, vector);
if (ret) {
dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
goto err_mtt;
@@ -353,12 +357,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
if (context) {
if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
ret = -EFAULT;
- goto err_mtt;
+ goto err_cqc;
}
}
return &hr_cq->ib_cq;
+err_cqc:
+ hns_roce_free_cq(hr_dev, hr_cq);
+
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
if (context)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index ea735800eb18..341731553a60 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -62,7 +62,7 @@
#define HNS_ROCE_AEQE_OF_VEC_NUM 1
/* 4G/4K = 1M */
-#define HNS_ROCE_SL_SHIFT 29
+#define HNS_ROCE_SL_SHIFT 28
#define HNS_ROCE_TCLASS_SHIFT 20
#define HNS_ROCE_FLOW_LABLE_MASK 0xfffff
@@ -74,7 +74,9 @@
#define MR_TYPE_DMA 0x03
#define PKEY_ID 0xffff
+#define GUID_LEN 8
#define NODE_DESC_SIZE 64
+#define DB_REG_OFFSET 0x1000
#define SERV_TYPE_RC 0
#define SERV_TYPE_RD 1
@@ -282,20 +284,11 @@ struct hns_roce_cq_buf {
struct hns_roce_mtt hr_mtt;
};
-struct hns_roce_cq_resize {
- struct hns_roce_cq_buf hr_buf;
- int cqe;
-};
-
struct hns_roce_cq {
struct ib_cq ib_cq;
struct hns_roce_cq_buf hr_buf;
- /* pointer to store information after resize*/
- struct hns_roce_cq_resize *hr_resize_buf;
spinlock_t lock;
- struct mutex resize_mutex;
struct ib_umem *umem;
- struct ib_umem *resize_umem;
void (*comp)(struct hns_roce_cq *);
void (*event)(struct hns_roce_cq *, enum hns_roce_event);
@@ -408,6 +401,7 @@ struct hns_roce_qp {
u32 buff_size;
struct mutex mutex;
u8 port;
+ u8 phy_port;
u8 sl;
u8 resp_depth;
u8 state;
@@ -471,7 +465,6 @@ struct hns_roce_caps {
u32 max_rq_desc_sz; /* 64 */
int max_qp_init_rdma;
int max_qp_dest_rdma;
- int sqp_start;
int num_cqs;
int max_cqes;
int reserved_cqs;
@@ -512,6 +505,8 @@ struct hns_roce_hw {
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector);
+ int (*clear_hem)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj);
int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
@@ -533,7 +528,6 @@ struct hns_roce_dev {
struct hns_roce_uar priv_uar;
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock;
- spinlock_t cq_db_lock;
spinlock_t bt_cmd_lock;
struct hns_roce_ib_iboe iboe;
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.c b/drivers/infiniband/hw/hns/hns_roce_eq.c
index 98af7fecf2f1..21e21b03cfb5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.c
@@ -66,9 +66,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
{
struct device *dev = &hr_dev->pdev->dev;
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
@@ -96,13 +93,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
default:
break;
}
-
- hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
}
static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
@@ -111,9 +101,6 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
{
struct device *dev = &hr_dev->pdev->dev;
- qpn = roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
dev_warn(dev, "Local Access Violation Work Queue Error.\n");
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
@@ -141,13 +128,69 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
default:
break;
}
+}
+
+static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ int phy_port;
+ int qpn;
+
+ qpn = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
+ phy_port = roce_get_field(aeqe->event.qp_event.qp,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
+ HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
+ if (qpn <= 1)
+ qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
+ "QP %d, phy_port %d.\n", qpn, phy_port);
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ break;
+ default:
+ break;
+ }
+
+ hns_roce_qp_event(hr_dev, qpn, event_type);
+}
+
+static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
+ struct hns_roce_aeqe *aeqe,
+ int event_type)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ u32 cqn;
+
+ cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
+ HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
+
+ switch (event_type) {
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_warn(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
+ dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
+ break;
+ default:
+ break;
+ }
- hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ hns_roce_cq_event(hr_dev, cqn, event_type);
}
static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
@@ -185,7 +228,7 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_aeqe *aeqe;
int aeqes_found = 0;
- int qpn = 0;
+ int event_type;
while ((aeqe = next_aeqe_sw(eq))) {
dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
@@ -195,9 +238,10 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
/* Memory barrier */
rmb();
- switch (roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)) {
+ event_type = roce_get_field(aeqe->asyn,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
+ HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
+ switch (event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
dev_warn(dev, "PATH MIG not supported\n");
break;
@@ -211,23 +255,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
dev_warn(dev, "PATH MIG failed\n");
break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "qpn = 0x%lx\n",
- roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S));
- hns_roce_qp_event(hr_dev,
- roce_get_field(aeqe->event.qp_event.qp,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
+ hns_roce_qp_err_handle(hr_dev, aeqe, event_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
@@ -235,40 +265,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
dev_warn(dev, "SRQ not support!\n");
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%lx access err.\n",
- roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
- hns_roce_cq_event(hr_dev,
- le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- break;
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%lx overflow\n",
- roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
- hns_roce_cq_event(hr_dev,
- le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- break;
case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- dev_warn(dev, "CQ ID invalid.\n");
- hns_roce_cq_event(hr_dev,
- le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
+ hns_roce_cq_err_handle(hr_dev, aeqe, event_type);
break;
case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
dev_warn(dev, "port change.\n");
@@ -290,11 +289,8 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
break;
default:
- dev_warn(dev, "Unhandled event 0x%lx on EQ %d at index %u\n",
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S),
- eq->eqn, eq->cons_index);
+ dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
+ event_type, eq->eqn, eq->cons_index);
break;
};
diff --git a/drivers/infiniband/hw/hns/hns_roce_eq.h b/drivers/infiniband/hw/hns/hns_roce_eq.h
index fe4388191a3c..c6d212d12e03 100644
--- a/drivers/infiniband/hw/hns/hns_roce_eq.h
+++ b/drivers/infiniband/hw/hns/hns_roce_eq.h
@@ -107,6 +107,10 @@ struct hns_roce_aeqe {
#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \
(((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
+#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M \
+ (((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S)
+
#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \
(((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index d53d64362389..250d8f280390 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -36,14 +36,10 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
-#define HW_SYNC_TIMEOUT_MSECS 500
-#define HW_SYNC_SLEEP_TIME_INTERVAL 20
-
#define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17)
#define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17)
#define DMA_ADDR_T_SHIFT 12
-#define BT_CMD_SYNC_SHIFT 31
#define BT_BA_SHIFT 32
struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
@@ -213,74 +209,6 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
return ret;
}
-static int hns_roce_clear_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table,
- unsigned long obj)
-{
- struct device *dev = &hr_dev->pdev->dev;
- unsigned long end = 0;
- unsigned long flags;
- void __iomem *bt_cmd;
- uint32_t bt_cmd_val[2];
- u32 bt_cmd_h_val = 0;
- int ret = 0;
-
- switch (table->type) {
- case HEM_TYPE_QPC:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
- break;
- case HEM_TYPE_MTPT:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
- HEM_TYPE_MTPT);
- break;
- case HEM_TYPE_CQC:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
- break;
- case HEM_TYPE_SRQC:
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
- HEM_TYPE_SRQC);
- break;
- default:
- return ret;
- }
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
- roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
- roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
- roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 0);
-
- spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
-
- bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
-
- end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
- while (1) {
- if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
- if (!(time_before(jiffies, end))) {
- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
- spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
- flags);
- return -EBUSY;
- }
- } else {
- break;
- }
- msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
- }
-
- bt_cmd_val[0] = 0;
- bt_cmd_val[1] = bt_cmd_h_val;
- hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
- spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
-
- return ret;
-}
-
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj)
{
@@ -333,7 +261,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
if (--table->hem[i]->refcount == 0) {
/* Clear HEM base address */
- if (hns_roce_clear_hem(hr_dev, table, obj))
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj))
dev_warn(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]);
@@ -456,7 +384,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
for (i = 0; i < table->num_hem; ++i)
if (table->hem[i]) {
- if (hns_roce_clear_hem(hr_dev, table,
+ if (hr_dev->hw->clear_hem(hr_dev, table,
i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
dev_err(dev, "Clear HEM base address failed.\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index ad6617588fba..435748858252 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -34,6 +34,10 @@
#ifndef _HNS_ROCE_HEM_H
#define _HNS_ROCE_HEM_H
+#define HW_SYNC_TIMEOUT_MSECS 500
+#define HW_SYNC_SLEEP_TIME_INTERVAL 20
+#define BT_CMD_SYNC_SHIFT 31
+
enum {
/* MAP HEM(Hardware Entry Memory) */
HEM_TYPE_QPC = 0,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 399f5dedaf2d..71232e5fabf6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -73,8 +73,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
u32 ind = 0;
int ret = 0;
- spin_lock_irqsave(&qp->sq.lock, flags);
+ if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
+ ibqp->qp_type != IB_QPT_RC)) {
+ dev_err(dev, "un-supported QP type\n");
+ *bad_wr = NULL;
+ return -EOPNOTSUPP;
+ }
+ spin_lock_irqsave(&qp->sq.lock, flags);
ind = qp->sq_next_wqe;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
@@ -162,7 +168,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_SGID_INDEX_M,
UD_SEND_WQE_U32_36_SGID_INDEX_S,
- hns_get_gid_index(hr_dev, qp->port,
+ hns_get_gid_index(hr_dev, qp->phy_port,
ah->av.gid_index));
roce_set_field(ud_sq_wqe->u32_40,
@@ -205,8 +211,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
(wr->send_flags & IB_SEND_FENCE ?
(cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
- wqe = (struct hns_roce_wqe_ctrl_seg *)wqe +
- sizeof(struct hns_roce_wqe_ctrl_seg);
+ wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
switch (wr->opcode) {
case IB_WR_RDMA_READ:
@@ -235,8 +240,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break;
}
ctrl->flag |= cpu_to_le32(ps_opcode);
- wqe = (struct hns_roce_wqe_raddr_seg *)wqe +
- sizeof(struct hns_roce_wqe_raddr_seg);
+ wqe += sizeof(struct hns_roce_wqe_raddr_seg);
dseg = wqe;
if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
@@ -253,8 +257,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
memcpy(wqe, ((void *) (uintptr_t)
wr->sg_list[i].addr),
wr->sg_list[i].length);
- wqe = (struct hns_roce_wqe_raddr_seg *)
- wqe + wr->sg_list[i].length;
+ wqe += wr->sg_list[i].length;
}
ctrl->flag |= HNS_ROCE_WQE_INLINE;
} else {
@@ -266,9 +269,6 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
HNS_ROCE_WQE_SGE_NUM_BIT);
}
ind++;
- } else {
- dev_dbg(dev, "unSupported QP type\n");
- break;
}
}
@@ -285,7 +285,7 @@ out:
SQ_DOORBELL_U32_4_SQ_HEAD_S,
(qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
- SQ_DOORBELL_U32_4_PORT_S, qp->port);
+ SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
@@ -365,14 +365,14 @@ out:
/* SW update GSI rq header */
reg_val = roce_read(to_hr_dev(ibqp->device),
ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->port);
+ QP1C_CFGN_OFFSET * hr_qp->phy_port);
roce_set_field(reg_val,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
hr_qp->rq.head);
roce_write(to_hr_dev(ibqp->device),
ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->port, reg_val);
+ QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
} else {
rq_db.u32_4 = 0;
rq_db.u32_8 = 0;
@@ -789,6 +789,66 @@ static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
}
}
+static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ int ret;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+ priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
+ HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
+ GFP_KERNEL);
+ if (!priv->bt_table.qpc_buf.buf)
+ return -ENOMEM;
+
+ priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
+ HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
+ GFP_KERNEL);
+ if (!priv->bt_table.mtpt_buf.buf) {
+ ret = -ENOMEM;
+ goto err_failed_alloc_mtpt_buf;
+ }
+
+ priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
+ HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
+ GFP_KERNEL);
+ if (!priv->bt_table.cqc_buf.buf) {
+ ret = -ENOMEM;
+ goto err_failed_alloc_cqc_buf;
+ }
+
+ return 0;
+
+err_failed_alloc_cqc_buf:
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
+
+err_failed_alloc_mtpt_buf:
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
+
+ return ret;
+}
+
+static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
+
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
+
+ dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
+ priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
+}
+
/**
* hns_roce_v1_reset - reset RoCE
* @hr_dev: RoCE device struct pointer
@@ -879,7 +939,6 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
- caps->sqp_start = 0;
caps->reserved_lkey = 0;
caps->reserved_pds = 0;
caps->reserved_mrws = 1;
@@ -944,8 +1003,18 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
+ ret = hns_roce_bt_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "bt init failed!\n");
+ goto error_failed_bt_init;
+ }
+
return 0;
+error_failed_bt_init:
+ hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
+ hns_roce_raq_free(hr_dev);
+
error_failed_raq_init:
hns_roce_db_free(hr_dev);
return ret;
@@ -953,6 +1022,7 @@ error_failed_raq_init:
void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
{
+ hns_roce_bt_free(hr_dev);
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
hns_roce_raq_free(hr_dev);
hns_roce_db_free(hr_dev);
@@ -1192,9 +1262,7 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
return get_sw_cqe(hr_cq, hr_cq->cons_index);
}
-void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index,
- spinlock_t *doorbell_lock)
-
+void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{
u32 doorbell[2];
@@ -1254,8 +1322,7 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
*/
wmb();
- hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index,
- &to_hr_dev(hr_cq->ib_cq.device)->cq_db_lock);
+ hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
}
}
@@ -1485,7 +1552,8 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
/* SQ conrespond to CQE */
sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
CQE_BYTE_4_WQE_INDEX_M,
- CQE_BYTE_4_WQE_INDEX_S));
+ CQE_BYTE_4_WQE_INDEX_S)&
+ ((*cur_qp)->sq.wqe_cnt-1));
switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) {
case HNS_ROCE_WQE_OPCODE_SEND:
wc->opcode = IB_WC_SEND;
@@ -1591,10 +1659,8 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
break;
}
- if (npolled) {
- hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index,
- &to_hr_dev(ibcq->device)->cq_db_lock);
- }
+ if (npolled)
+ hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
spin_unlock_irqrestore(&hr_cq->lock, flags);
@@ -1604,6 +1670,74 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return ret;
}
+int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table, int obj)
+{
+ struct device *dev = &hr_dev->pdev->dev;
+ struct hns_roce_v1_priv *priv;
+ unsigned long end = 0, flags = 0;
+ uint32_t bt_cmd_val[2] = {0};
+ void __iomem *bt_cmd;
+ u64 bt_ba = 0;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
+
+ switch (table->type) {
+ case HEM_TYPE_QPC:
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
+ bt_ba = priv->bt_table.qpc_buf.map >> 12;
+ break;
+ case HEM_TYPE_MTPT:
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
+ bt_ba = priv->bt_table.mtpt_buf.map >> 12;
+ break;
+ case HEM_TYPE_CQC:
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
+ bt_ba = priv->bt_table.cqc_buf.map >> 12;
+ break;
+ case HEM_TYPE_SRQC:
+ dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
+ return -EINVAL;
+ default:
+ return 0;
+ }
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
+ roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
+ roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
+
+ spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+ end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
+ while (1) {
+ if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+ if (!(time_before(jiffies, end))) {
+ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
+ flags);
+ return -EBUSY;
+ }
+ } else {
+ break;
+ }
+ msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
+ }
+
+ bt_cmd_val[0] = (uint32_t)bt_ba;
+ roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
+ ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
+ hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
+
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
+
+ return 0;
+}
+
static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt,
enum hns_roce_qp_state cur_state,
@@ -1733,13 +1867,10 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
- QP1C_BYTES_16_PORT_NUM_S, hr_qp->port);
+ QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
roce_set_bit(context->qp1c_bytes_16,
QP1C_BYTES_16_SIGNALING_TYPE_S,
hr_qp->sq_signal_bits);
- roce_set_bit(context->qp1c_bytes_16,
- QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S,
- hr_qp->sq_signal_bits);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
1);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
@@ -1784,7 +1915,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
/* Copy context to QP1C register */
addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG +
- hr_qp->port * sizeof(*context));
+ hr_qp->phy_port * sizeof(*context));
writel(context->qp1c_bytes_4, addr);
writel(context->sq_rq_bt_l, addr + 1);
@@ -1795,15 +1926,16 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
writel(context->qp1c_bytes_28, addr + 6);
writel(context->qp1c_bytes_32, addr + 7);
writel(context->cur_sq_wqe_ba_l, addr + 8);
+ writel(context->qp1c_bytes_40, addr + 9);
}
/* Modify QP1C status */
reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
- hr_qp->port * sizeof(*context));
+ hr_qp->phy_port * sizeof(*context));
roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
- hr_qp->port * sizeof(*context), reg_val);
+ hr_qp->phy_port * sizeof(*context), reg_val);
hr_qp->state = new_state;
if (new_state == IB_QPS_RESET) {
@@ -1836,12 +1968,10 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_qp_context *context;
- struct hns_roce_rq_db rq_db;
dma_addr_t dma_handle_2 = 0;
dma_addr_t dma_handle = 0;
uint32_t doorbell[2] = {0};
int rq_pa_start = 0;
- u32 reg_val = 0;
u64 *mtts_2 = NULL;
int ret = -EINVAL;
u64 *mtts = NULL;
@@ -2119,7 +2249,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_68,
QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
- QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, 0);
+ QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
+ hr_qp->rq.head);
roce_set_field(context->qpc_bytes_68,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
@@ -2186,7 +2317,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
- hr_qp->port);
+ hr_qp->phy_port);
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_SL_M,
QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
@@ -2257,20 +2388,17 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_bit(context->qpc_bytes_140,
QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
- roce_set_field(context->qpc_bytes_144,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S,
- attr->qp_state);
-
roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, 0);
+ QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
+ attr->retry_cnt);
roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, 0);
+ QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
+ attr->rnr_retry);
roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_LSN_M,
QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
@@ -2281,10 +2409,19 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
attr->retry_cnt);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
- attr->timeout);
+ if (attr->timeout < 0x12) {
+ dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
+ attr->timeout);
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
+ 0x12);
+ } else {
+ roce_set_field(context->qpc_bytes_156,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
+ QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
+ attr->timeout);
+ }
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
@@ -2292,7 +2429,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
- hr_qp->port);
+ hr_qp->phy_port);
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_SL_M,
QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
@@ -2357,21 +2494,15 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
0);
- } else if ((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
+ } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
- (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
- roce_set_field(context->qpc_bytes_144,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S,
- attr->qp_state);
-
- } else {
- dev_err(dev, "not support this modify\n");
+ (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
+ dev_err(dev, "not support this status migration\n");
goto out;
}
@@ -2397,43 +2528,32 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
/* Memory barrier */
wmb();
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
- /* SW update GSI rq header */
- reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->port);
- roce_set_field(reg_val,
- ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
- ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
- hr_qp->rq.head);
- roce_write(hr_dev, ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->port, reg_val);
- } else {
- rq_db.u32_4 = 0;
- rq_db.u32_8 = 0;
-
- roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
- RQ_DOORBELL_U32_4_RQ_HEAD_S,
- hr_qp->rq.head);
- roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
- RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
- roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
- RQ_DOORBELL_U32_8_CMD_S, 1);
- roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
- 1);
- doorbell[0] = rq_db.u32_4;
- doorbell[1] = rq_db.u32_8;
-
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
+ roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
+ RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
+ roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
+ RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
+ roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
+ RQ_DOORBELL_U32_8_CMD_S, 1);
+ roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
+
+ if (ibqp->uobject) {
+ hr_qp->rq.db_reg_l = hr_dev->reg_base +
+ ROCEE_DB_OTHERS_L_0_REG +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
}
+
+ hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
}
hr_qp->state = new_state;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic;
- if (attr_mask & IB_QP_PORT)
- hr_qp->port = (attr->port_num - 1);
+ if (attr_mask & IB_QP_PORT) {
+ hr_qp->port = attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ }
if (new_state == IB_QPS_RESET && !ibqp->uobject) {
hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
@@ -2789,6 +2909,7 @@ struct hns_roce_hw hns_roce_hw_v1 = {
.set_mtu = hns_roce_v1_set_mtu,
.write_mtpt = hns_roce_v1_write_mtpt,
.write_cqc = hns_roce_v1_write_cqc,
+ .clear_hem = hns_roce_v1_clear_hem,
.modify_qp = hns_roce_v1_modify_qp,
.query_qp = hns_roce_v1_query_qp,
.destroy_qp = hns_roce_v1_destroy_qp,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
index 316b592b1636..539b0a3b92b0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
@@ -102,6 +102,8 @@
#define HNS_ROCE_V1_EXT_ODB_ALFUL \
(HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
+#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17)
+
#define HNS_ROCE_ODB_POLL_MODE 0
#define HNS_ROCE_SDB_NORMAL_MODE 0
@@ -971,9 +973,16 @@ struct hns_roce_db_table {
struct hns_roce_ext_db *ext_db;
};
+struct hns_roce_bt_table {
+ struct hns_roce_buf_list qpc_buf;
+ struct hns_roce_buf_list mtpt_buf;
+ struct hns_roce_buf_list cqc_buf;
+};
+
struct hns_roce_v1_priv {
struct hns_roce_db_table db_table;
struct hns_roce_raq_table raq_table;
+ struct hns_roce_bt_table bt_table;
};
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index f64f0dde9a88..764e35a54457 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -355,8 +355,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_qp = hr_dev->caps.num_qps;
props->max_qp_wr = hr_dev->caps.max_wqes;
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
- IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_LOCAL_DMA_LKEY;
+ IB_DEVICE_RC_RNR_NAK_GEN;
props->max_sge = hr_dev->caps.max_sq_sg;
props->max_sge_rd = 1;
props->max_cq = hr_dev->caps.num_cqs;
@@ -372,6 +371,25 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
return 0;
}
+static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
+ u8 port_num)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+ struct net_device *ndev;
+
+ if (port_num < 1 || port_num > hr_dev->caps.num_ports)
+ return NULL;
+
+ rcu_read_lock();
+
+ ndev = hr_dev->iboe.netdevs[port_num - 1];
+ if (ndev)
+ dev_hold(ndev);
+
+ rcu_read_unlock();
+ return ndev;
+}
+
static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr *props)
{
@@ -584,6 +602,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
struct device *dev = &hr_dev->pdev->dev;
iboe = &hr_dev->iboe;
+ spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev;
strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
@@ -618,6 +637,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->query_port = hns_roce_query_port;
ib_dev->modify_port = hns_roce_modify_port;
ib_dev->get_link_layer = hns_roce_get_link_layer;
+ ib_dev->get_netdev = hns_roce_get_netdev;
ib_dev->query_gid = hns_roce_query_gid;
ib_dev->query_pkey = hns_roce_query_pkey;
ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
@@ -667,8 +687,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
goto error_failed_setup_mtu_gids;
}
- spin_lock_init(&iboe->lock);
-
iboe->nb.notifier_call = hns_roce_netdev_event;
ret = register_netdevice_notifier(&iboe->nb);
if (ret) {
@@ -777,6 +795,15 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
if (IS_ERR(hr_dev->reg_base))
return PTR_ERR(hr_dev->reg_base);
+ /* read the node_guid of IB device from the DT or ACPI */
+ ret = device_property_read_u8_array(dev, "node-guid",
+ (u8 *)&hr_dev->ib_dev.node_guid,
+ GUID_LEN);
+ if (ret) {
+ dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
+ return ret;
+ }
+
/* get the RoCE associated ethernet ports or netdevices */
for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
if (dev_of_node(dev)) {
@@ -923,7 +950,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
struct device *dev = &hr_dev->pdev->dev;
spin_lock_init(&hr_dev->sm_lock);
- spin_lock_init(&hr_dev->cq_db_lock);
spin_lock_init(&hr_dev->bt_cmd_lock);
ret = hns_roce_init_uar_table(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 59f5e2be046b..fb87883ead34 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -564,11 +564,14 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) {
dev_err(dev, "Just support 4K page size but is 0x%x now!\n",
mr->umem->page_size);
+ ret = -EINVAL;
+ goto err_umem;
}
if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
length);
+ ret = -EINVAL;
goto err_umem;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 16271b5bd170..05db7d59812a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -35,19 +35,7 @@
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
{
- struct device *dev = &hr_dev->pdev->dev;
- unsigned long pd_number;
- int ret = 0;
-
- ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number);
- if (ret == -1) {
- dev_err(dev, "alloc pdn from pdbitmap failed\n");
- return -ENOMEM;
- }
-
- *pdn = pd_number;
-
- return 0;
+ return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
}
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
@@ -117,9 +105,15 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
if (ret == -1)
return -ENOMEM;
- uar->index = (uar->index - 1) % hr_dev->caps.phy_num_uars + 1;
+ if (uar->index > 0)
+ uar->index = (uar->index - 1) %
+ (hr_dev->caps.phy_num_uars - 1) + 1;
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
+ return -EINVAL;
+ }
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
return 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 645c18d809a5..e86dd8d06777 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -32,14 +32,14 @@
*/
#include <linux/platform_device.h>
+#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
#include "hns_roce_user.h"
-#define DB_REG_OFFSET 0x1000
-#define SQP_NUM 12
+#define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{
@@ -113,16 +113,8 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
int align, unsigned long *base)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- int ret = 0;
- unsigned long qpn;
-
- ret = hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, &qpn);
- if (ret == -1)
- return -ENOMEM;
-
- *base = qpn;
- return 0;
+ return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
}
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
@@ -255,7 +247,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- if (base_qpn < (hr_dev->caps.sqp_start + 2 * hr_dev->caps.num_ports))
+ if (base_qpn < SQP_NUM)
return;
hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
@@ -345,12 +337,10 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap,
- enum ib_qp_type type,
struct hns_roce_qp *hr_qp)
{
struct device *dev = &hr_dev->pdev->dev;
u32 max_cnt;
- (void)type;
if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg ||
@@ -476,7 +466,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
/* Set SQ size */
ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
- init_attr->qp_type, hr_qp);
+ hr_qp);
if (ret) {
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
goto err_out;
@@ -617,21 +607,19 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return ERR_PTR(-ENOMEM);
hr_qp = &hr_sqp->hr_qp;
+ hr_qp->port = init_attr->port_num - 1;
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
+ hr_dev->iboe.phy_port[hr_qp->port];
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
- hr_dev->caps.sqp_start +
- hr_dev->caps.num_ports +
- init_attr->port_num - 1, hr_qp);
+ hr_qp->ibqp.qp_num, hr_qp);
if (ret) {
dev_err(dev, "Create GSI QP failed!\n");
kfree(hr_sqp);
return ERR_PTR(ret);
}
- hr_qp->port = (init_attr->port_num - 1);
- hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start +
- hr_dev->caps.num_ports +
- init_attr->port_num - 1;
break;
}
default:{
@@ -670,6 +658,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct device *dev = &hr_dev->pdev->dev;
int ret = -EINVAL;
int p;
+ enum ib_mtu active_mtu;
mutex_lock(&hr_qp->mutex);
@@ -700,6 +689,19 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
}
+ if (attr_mask & IB_QP_PATH_MTU) {
+ p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+ active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
+
+ if (attr->path_mtu > IB_MTU_2048 ||
+ attr->path_mtu < IB_MTU_256 ||
+ attr->path_mtu > active_mtu) {
+ dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
+ attr->path_mtu);
+ goto out;
+ }
+ }
+
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
@@ -782,29 +784,11 @@ static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
{
- struct ib_qp *ibqp = &hr_qp->ibqp;
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
-
- if ((n < 0) || (n > hr_qp->rq.wqe_cnt)) {
- dev_err(&hr_dev->pdev->dev, "rq wqe index:%d,rq wqe cnt:%d\r\n",
- n, hr_qp->rq.wqe_cnt);
- return NULL;
- }
-
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
}
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
{
- struct ib_qp *ibqp = &hr_qp->ibqp;
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
-
- if ((n < 0) || (n > hr_qp->sq.wqe_cnt)) {
- dev_err(&hr_dev->pdev->dev, "sq wqe index:%d,sq wqe cnt:%d\r\n",
- n, hr_qp->sq.wqe_cnt);
- return NULL;
- }
-
return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
}
@@ -837,8 +821,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
/* A port include two SQP, six port total 12 */
ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
- hr_dev->caps.num_qps - 1,
- hr_dev->caps.sqp_start + SQP_NUM,
+ hr_dev->caps.num_qps - 1, SQP_NUM,
reserved_from_top);
if (ret) {
dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 22174774dbb8..63036c731626 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1019,7 +1019,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
- resp.cache_line_size = L1_CACHE_BYTES;
+ resp.cache_line_size = cache_line_size();
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 41f4c2afbcdd..7ce97daf26c6 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -52,7 +52,6 @@ enum {
enum {
MLX5_IB_SQ_STRIDE = 6,
- MLX5_IB_CACHE_LINE_SIZE = 64,
};
static const u32 mlx5_ib_opcode[] = {
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 6c00d04b8b28..c6fe89d79248 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
+ ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig
new file mode 100644
index 000000000000..6c9f3923e838
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/Kconfig
@@ -0,0 +1,8 @@
+config INFINIBAND_QEDR
+ tristate "QLogic RoCE driver"
+ depends on 64BIT && QEDE
+ select QED_LL2
+ select QED_RDMA
+ ---help---
+ This driver provides low-level InfiniBand over Ethernet
+ support for QLogic QED host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/qedr/Makefile b/drivers/infiniband/hw/qedr/Makefile
new file mode 100644
index 000000000000..ba7067c77f2f
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INFINIBAND_QEDR) := qedr.o
+
+qedr-y := main.o verbs.o qedr_cm.o
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
new file mode 100644
index 000000000000..7b74d09a8217
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -0,0 +1,914 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/module.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_user_verbs.h>
+#include <linux/netdevice.h>
+#include <linux/iommu.h>
+#include <net/addrconf.h>
+#include <linux/qed/qede_roce.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+
+MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(QEDR_MODULE_VERSION);
+
+#define QEDR_WQ_MULTIPLIER_DFT (3)
+
+void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
+ enum ib_event_type type)
+{
+ struct ib_event ibev;
+
+ ibev.device = &dev->ibdev;
+ ibev.element.port_num = port_num;
+ ibev.event = type;
+
+ ib_dispatch_event(&ibev);
+}
+
+static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
+ u8 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str,
+ size_t str_len)
+{
+ struct qedr_dev *qedr = get_qedr_dev(ibdev);
+ u32 fw_ver = (u32)qedr->attr.fw_ver;
+
+ snprintf(str, str_len, "%d. %d. %d. %d",
+ (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
+ (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
+}
+
+static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
+{
+ struct qedr_dev *qdev;
+
+ qdev = get_qedr_dev(dev);
+ dev_hold(qdev->ndev);
+
+ /* The HW vendor's device driver must guarantee
+ * that this function returns NULL before the net device reaches
+ * NETDEV_UNREGISTER_FINAL state.
+ */
+ return qdev->ndev;
+}
+
+static int qedr_register_device(struct qedr_dev *dev)
+{
+ strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
+
+ dev->ibdev.node_guid = dev->attr.node_guid;
+ memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
+ dev->ibdev.owner = THIS_MODULE;
+ dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
+
+ dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
+ QEDR_UVERBS(QUERY_DEVICE) |
+ QEDR_UVERBS(QUERY_PORT) |
+ QEDR_UVERBS(ALLOC_PD) |
+ QEDR_UVERBS(DEALLOC_PD) |
+ QEDR_UVERBS(CREATE_COMP_CHANNEL) |
+ QEDR_UVERBS(CREATE_CQ) |
+ QEDR_UVERBS(RESIZE_CQ) |
+ QEDR_UVERBS(DESTROY_CQ) |
+ QEDR_UVERBS(REQ_NOTIFY_CQ) |
+ QEDR_UVERBS(CREATE_QP) |
+ QEDR_UVERBS(MODIFY_QP) |
+ QEDR_UVERBS(QUERY_QP) |
+ QEDR_UVERBS(DESTROY_QP) |
+ QEDR_UVERBS(REG_MR) |
+ QEDR_UVERBS(DEREG_MR) |
+ QEDR_UVERBS(POLL_CQ) |
+ QEDR_UVERBS(POST_SEND) |
+ QEDR_UVERBS(POST_RECV);
+
+ dev->ibdev.phys_port_cnt = 1;
+ dev->ibdev.num_comp_vectors = dev->num_cnq;
+ dev->ibdev.node_type = RDMA_NODE_IB_CA;
+
+ dev->ibdev.query_device = qedr_query_device;
+ dev->ibdev.query_port = qedr_query_port;
+ dev->ibdev.modify_port = qedr_modify_port;
+
+ dev->ibdev.query_gid = qedr_query_gid;
+ dev->ibdev.add_gid = qedr_add_gid;
+ dev->ibdev.del_gid = qedr_del_gid;
+
+ dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
+ dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
+ dev->ibdev.mmap = qedr_mmap;
+
+ dev->ibdev.alloc_pd = qedr_alloc_pd;
+ dev->ibdev.dealloc_pd = qedr_dealloc_pd;
+
+ dev->ibdev.create_cq = qedr_create_cq;
+ dev->ibdev.destroy_cq = qedr_destroy_cq;
+ dev->ibdev.resize_cq = qedr_resize_cq;
+ dev->ibdev.req_notify_cq = qedr_arm_cq;
+
+ dev->ibdev.create_qp = qedr_create_qp;
+ dev->ibdev.modify_qp = qedr_modify_qp;
+ dev->ibdev.query_qp = qedr_query_qp;
+ dev->ibdev.destroy_qp = qedr_destroy_qp;
+
+ dev->ibdev.query_pkey = qedr_query_pkey;
+
+ dev->ibdev.create_ah = qedr_create_ah;
+ dev->ibdev.destroy_ah = qedr_destroy_ah;
+
+ dev->ibdev.get_dma_mr = qedr_get_dma_mr;
+ dev->ibdev.dereg_mr = qedr_dereg_mr;
+ dev->ibdev.reg_user_mr = qedr_reg_user_mr;
+ dev->ibdev.alloc_mr = qedr_alloc_mr;
+ dev->ibdev.map_mr_sg = qedr_map_mr_sg;
+
+ dev->ibdev.poll_cq = qedr_poll_cq;
+ dev->ibdev.post_send = qedr_post_send;
+ dev->ibdev.post_recv = qedr_post_recv;
+
+ dev->ibdev.process_mad = qedr_process_mad;
+ dev->ibdev.get_port_immutable = qedr_port_immutable;
+ dev->ibdev.get_netdev = qedr_get_netdev;
+
+ dev->ibdev.dma_device = &dev->pdev->dev;
+
+ dev->ibdev.get_link_layer = qedr_link_layer;
+ dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
+
+ return ib_register_device(&dev->ibdev, NULL);
+}
+
+/* This function allocates fast-path status block memory */
+static int qedr_alloc_mem_sb(struct qedr_dev *dev,
+ struct qed_sb_info *sb_info, u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = dma_alloc_coherent(&dev->pdev->dev,
+ sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
+ if (!sb_virt)
+ return -ENOMEM;
+
+ rc = dev->ops->common->sb_init(dev->cdev, sb_info,
+ sb_virt, sb_phys, sb_id,
+ QED_SB_TYPE_CNQ);
+ if (rc) {
+ pr_err("Status block initialization failed\n");
+ dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
+ sb_virt, sb_phys);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qedr_free_mem_sb(struct qedr_dev *dev,
+ struct qed_sb_info *sb_info, int sb_id)
+{
+ if (sb_info->sb_virt) {
+ dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
+ dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt, sb_info->sb_phys);
+ }
+}
+
+static void qedr_free_resources(struct qedr_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->num_cnq; i++) {
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+ dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+ }
+
+ kfree(dev->cnq_array);
+ kfree(dev->sb_array);
+ kfree(dev->sgid_tbl);
+}
+
+static int qedr_alloc_resources(struct qedr_dev *dev)
+{
+ struct qedr_cnq *cnq;
+ __le16 *cons_pi;
+ u16 n_entries;
+ int i, rc;
+
+ dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
+ QEDR_MAX_SGID, GFP_KERNEL);
+ if (!dev->sgid_tbl)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->sgid_lock);
+
+ /* Allocate Status blocks for CNQ */
+ dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
+ GFP_KERNEL);
+ if (!dev->sb_array) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ dev->cnq_array = kcalloc(dev->num_cnq,
+ sizeof(*dev->cnq_array), GFP_KERNEL);
+ if (!dev->cnq_array) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
+
+ /* Allocate CNQ PBLs */
+ n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
+ for (i = 0; i < dev->num_cnq; i++) {
+ cnq = &dev->cnq_array[i];
+
+ rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
+ dev->sb_start + i);
+ if (rc)
+ goto err3;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
+ n_entries,
+ sizeof(struct regpair *),
+ &cnq->pbl);
+ if (rc)
+ goto err4;
+
+ cnq->dev = dev;
+ cnq->sb = &dev->sb_array[i];
+ cons_pi = dev->sb_array[i].sb_virt->pi_array;
+ cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
+ cnq->index = i;
+ sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
+ i, qed_chain_get_cons_idx(&cnq->pbl));
+ }
+
+ return 0;
+err4:
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+err3:
+ for (--i; i >= 0; i--) {
+ dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
+ qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
+ }
+ kfree(dev->cnq_array);
+err2:
+ kfree(dev->sb_array);
+err1:
+ kfree(dev->sgid_tbl);
+ return rc;
+}
+
+/* QEDR sysfs interface */
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qedr_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+}
+
+static ssize_t show_hca_type(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
+
+static struct device_attribute *qedr_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_hca_type
+};
+
+static void qedr_remove_sysfiles(struct qedr_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
+ device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
+}
+
+static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
+{
+ struct pci_dev *bridge;
+ u32 val;
+
+ dev->atomic_cap = IB_ATOMIC_NONE;
+
+ bridge = pdev->bus->self;
+ if (!bridge)
+ return;
+
+ /* Check whether we are connected directly or via a switch */
+ while (bridge && bridge->bus->parent) {
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n",
+ bridge->bus->number, bridge->bus->primary);
+ /* Need to check Atomic Op Routing Supported all the way to
+ * root complex.
+ */
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
+ if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) {
+ pcie_capability_clear_word(pdev,
+ PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ return;
+ }
+ bridge = bridge->bus->parent->self;
+ }
+ bridge = pdev->bus->self;
+
+ /* according to bridge capability */
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
+ if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) {
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ dev->atomic_cap = IB_ATOMIC_GLOB;
+ } else {
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ }
+}
+
+static const struct qed_rdma_ops *qed_ops;
+
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+static irqreturn_t qedr_irq_handler(int irq, void *handle)
+{
+ u16 hw_comp_cons, sw_comp_cons;
+ struct qedr_cnq *cnq = handle;
+ struct regpair *cq_handle;
+ struct qedr_cq *cq;
+
+ qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
+
+ qed_sb_update_sb_idx(cnq->sb);
+
+ hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+ /* Align protocol-index and chain reads */
+ rmb();
+
+ while (sw_comp_cons != hw_comp_cons) {
+ cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
+ cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
+ cq_handle->lo);
+
+ if (cq == NULL) {
+ DP_ERR(cnq->dev,
+ "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
+ cq_handle->hi, cq_handle->lo, sw_comp_cons,
+ hw_comp_cons);
+
+ break;
+ }
+
+ if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
+ DP_ERR(cnq->dev,
+ "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
+ cq_handle->hi, cq_handle->lo, cq);
+ break;
+ }
+
+ cq->arm_flags = 0;
+
+ if (cq->ibcq.comp_handler)
+ (*cq->ibcq.comp_handler)
+ (&cq->ibcq, cq->ibcq.cq_context);
+
+ sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
+
+ cnq->n_comp++;
+
+ }
+
+ qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
+ sw_comp_cons);
+
+ qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
+
+ return IRQ_HANDLED;
+}
+
+static void qedr_sync_free_irqs(struct qedr_dev *dev)
+{
+ u32 vector;
+ int i;
+
+ for (i = 0; i < dev->int_info.used_cnt; i++) {
+ if (dev->int_info.msix_cnt) {
+ vector = dev->int_info.msix[i * dev->num_hwfns].vector;
+ synchronize_irq(vector);
+ free_irq(vector, &dev->cnq_array[i]);
+ }
+ }
+
+ dev->int_info.used_cnt = 0;
+}
+
+static int qedr_req_msix_irqs(struct qedr_dev *dev)
+{
+ int i, rc = 0;
+
+ if (dev->num_cnq > dev->int_info.msix_cnt) {
+ DP_ERR(dev,
+ "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
+ dev->num_cnq, dev->int_info.msix_cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->num_cnq; i++) {
+ rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
+ qedr_irq_handler, 0, dev->cnq_array[i].name,
+ &dev->cnq_array[i]);
+ if (rc) {
+ DP_ERR(dev, "Request cnq %d irq failed\n", i);
+ qedr_sync_free_irqs(dev);
+ } else {
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
+ dev->cnq_array[i].name, i,
+ &dev->cnq_array[i]);
+ dev->int_info.used_cnt++;
+ }
+ }
+
+ return rc;
+}
+
+static int qedr_setup_irqs(struct qedr_dev *dev)
+{
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
+
+ /* Learn Interrupt configuration */
+ rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
+ if (rc < 0)
+ return rc;
+
+ rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
+ if (rc) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
+ return rc;
+ }
+
+ if (dev->int_info.msix_cnt) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
+ dev->int_info.msix_cnt);
+ rc = qedr_req_msix_irqs(dev);
+ if (rc)
+ return rc;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
+
+ return 0;
+}
+
+static int qedr_set_device_attr(struct qedr_dev *dev)
+{
+ struct qed_rdma_device *qed_attr;
+ struct qedr_device_attr *attr;
+ u32 page_size;
+
+ /* Part 1 - query core capabilities */
+ qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
+
+ /* Part 2 - check capabilities */
+ page_size = ~dev->attr.page_size_caps + 1;
+ if (page_size > PAGE_SIZE) {
+ DP_ERR(dev,
+ "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
+ PAGE_SIZE, page_size);
+ return -ENODEV;
+ }
+
+ /* Part 3 - copy and update capabilities */
+ attr = &dev->attr;
+ attr->vendor_id = qed_attr->vendor_id;
+ attr->vendor_part_id = qed_attr->vendor_part_id;
+ attr->hw_ver = qed_attr->hw_ver;
+ attr->fw_ver = qed_attr->fw_ver;
+ attr->node_guid = qed_attr->node_guid;
+ attr->sys_image_guid = qed_attr->sys_image_guid;
+ attr->max_cnq = qed_attr->max_cnq;
+ attr->max_sge = qed_attr->max_sge;
+ attr->max_inline = qed_attr->max_inline;
+ attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
+ attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
+ attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
+ attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
+ attr->max_dev_resp_rd_atomic_resc =
+ qed_attr->max_dev_resp_rd_atomic_resc;
+ attr->max_cq = qed_attr->max_cq;
+ attr->max_qp = qed_attr->max_qp;
+ attr->max_mr = qed_attr->max_mr;
+ attr->max_mr_size = qed_attr->max_mr_size;
+ attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
+ attr->max_mw = qed_attr->max_mw;
+ attr->max_fmr = qed_attr->max_fmr;
+ attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
+ attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
+ attr->max_pd = qed_attr->max_pd;
+ attr->max_ah = qed_attr->max_ah;
+ attr->max_pkey = qed_attr->max_pkey;
+ attr->max_srq = qed_attr->max_srq;
+ attr->max_srq_wr = qed_attr->max_srq_wr;
+ attr->dev_caps = qed_attr->dev_caps;
+ attr->page_size_caps = qed_attr->page_size_caps;
+ attr->dev_ack_delay = qed_attr->dev_ack_delay;
+ attr->reserved_lkey = qed_attr->reserved_lkey;
+ attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
+ attr->max_stats_queues = qed_attr->max_stats_queues;
+
+ return 0;
+}
+
+void qedr_unaffiliated_event(void *context,
+ u8 event_code)
+{
+ pr_err("unaffiliated event not implemented yet\n");
+}
+
+void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
+{
+#define EVENT_TYPE_NOT_DEFINED 0
+#define EVENT_TYPE_CQ 1
+#define EVENT_TYPE_QP 2
+ struct qedr_dev *dev = (struct qedr_dev *)context;
+ union event_ring_data *data = fw_handle;
+ u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) +
+ data->roce_handle.lo;
+ u8 event_type = EVENT_TYPE_NOT_DEFINED;
+ struct ib_event event;
+ struct ib_cq *ibcq;
+ struct ib_qp *ibqp;
+ struct qedr_cq *cq;
+ struct qedr_qp *qp;
+
+ switch (e_code) {
+ case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
+ event.event = IB_EVENT_CQ_ERR;
+ event_type = EVENT_TYPE_CQ;
+ break;
+ case ROCE_ASYNC_EVENT_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
+ event.event = IB_EVENT_QP_FATAL;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ event_type = EVENT_TYPE_QP;
+ break;
+ default:
+ DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
+ roce_handle64);
+ }
+
+ switch (event_type) {
+ case EVENT_TYPE_CQ:
+ cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
+ if (cq) {
+ ibcq = &cq->ibcq;
+ if (ibcq->event_handler) {
+ event.device = ibcq->device;
+ event.element.cq = ibcq;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
+ } else {
+ WARN(1,
+ "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
+ roce_handle64);
+ }
+ DP_ERR(dev, "CQ event %d on hanlde %p\n", e_code, cq);
+ break;
+ case EVENT_TYPE_QP:
+ qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
+ if (qp) {
+ ibqp = &qp->ibqp;
+ if (ibqp->event_handler) {
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+ } else {
+ WARN(1,
+ "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
+ roce_handle64);
+ }
+ DP_ERR(dev, "QP event %d on hanlde %p\n", e_code, qp);
+ break;
+ default:
+ break;
+ }
+}
+
+static int qedr_init_hw(struct qedr_dev *dev)
+{
+ struct qed_rdma_add_user_out_params out_params;
+ struct qed_rdma_start_in_params *in_params;
+ struct qed_rdma_cnq_params *cur_pbl;
+ struct qed_rdma_events events;
+ dma_addr_t p_phys_table;
+ u32 page_cnt;
+ int rc = 0;
+ int i;
+
+ in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
+ if (!in_params) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ in_params->desired_cnq = dev->num_cnq;
+ for (i = 0; i < dev->num_cnq; i++) {
+ cur_pbl = &in_params->cnq_pbl_list[i];
+
+ page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
+ cur_pbl->num_pbl_pages = page_cnt;
+
+ p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
+ cur_pbl->pbl_ptr = (u64)p_phys_table;
+ }
+
+ events.affiliated_event = qedr_affiliated_event;
+ events.unaffiliated_event = qedr_unaffiliated_event;
+ events.context = dev;
+
+ in_params->events = &events;
+ in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
+ in_params->max_mtu = dev->ndev->mtu;
+ ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
+
+ rc = dev->ops->rdma_init(dev->cdev, in_params);
+ if (rc)
+ goto out;
+
+ rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
+ if (rc)
+ goto out;
+
+ dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
+ dev->db_phys_addr = out_params.dpi_phys_addr;
+ dev->db_size = out_params.dpi_size;
+ dev->dpi = out_params.dpi;
+
+ rc = qedr_set_device_attr(dev);
+out:
+ kfree(in_params);
+ if (rc)
+ DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
+
+ return rc;
+}
+
+void qedr_stop_hw(struct qedr_dev *dev)
+{
+ dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
+ dev->ops->rdma_stop(dev->rdma_ctx);
+}
+
+static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
+ struct net_device *ndev)
+{
+ struct qed_dev_rdma_info dev_info;
+ struct qedr_dev *dev;
+ int rc = 0, i;
+
+ dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev) {
+ pr_err("Unable to allocate ib device\n");
+ return NULL;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
+
+ dev->pdev = pdev;
+ dev->ndev = ndev;
+ dev->cdev = cdev;
+
+ qed_ops = qed_get_rdma_ops();
+ if (!qed_ops) {
+ DP_ERR(dev, "Failed to get qed roce operations\n");
+ goto init_err;
+ }
+
+ dev->ops = qed_ops;
+ rc = qed_ops->fill_dev_info(cdev, &dev_info);
+ if (rc)
+ goto init_err;
+
+ dev->num_hwfns = dev_info.common.num_hwfns;
+ dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
+
+ dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
+ if (!dev->num_cnq) {
+ DP_ERR(dev, "not enough CNQ resources.\n");
+ goto init_err;
+ }
+
+ dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
+
+ qedr_pci_set_atomic(dev, pdev);
+
+ rc = qedr_alloc_resources(dev);
+ if (rc)
+ goto init_err;
+
+ rc = qedr_init_hw(dev);
+ if (rc)
+ goto alloc_err;
+
+ rc = qedr_setup_irqs(dev);
+ if (rc)
+ goto irq_err;
+
+ rc = qedr_register_device(dev);
+ if (rc) {
+ DP_ERR(dev, "Unable to allocate register device\n");
+ goto reg_err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
+ if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
+ goto sysfs_err;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
+ return dev;
+
+sysfs_err:
+ ib_unregister_device(&dev->ibdev);
+reg_err:
+ qedr_sync_free_irqs(dev);
+irq_err:
+ qedr_stop_hw(dev);
+alloc_err:
+ qedr_free_resources(dev);
+init_err:
+ ib_dealloc_device(&dev->ibdev);
+ DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
+
+ return NULL;
+}
+
+static void qedr_remove(struct qedr_dev *dev)
+{
+ /* First unregister with stack to stop all the active traffic
+ * of the registered clients.
+ */
+ qedr_remove_sysfiles(dev);
+ ib_unregister_device(&dev->ibdev);
+
+ qedr_stop_hw(dev);
+ qedr_sync_free_irqs(dev);
+ qedr_free_resources(dev);
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static int qedr_close(struct qedr_dev *dev)
+{
+ qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
+
+ return 0;
+}
+
+static void qedr_shutdown(struct qedr_dev *dev)
+{
+ qedr_close(dev);
+ qedr_remove(dev);
+}
+
+static void qedr_mac_address_change(struct qedr_dev *dev)
+{
+ union ib_gid *sgid = &dev->sgid_tbl[0];
+ u8 guid[8], mac_addr[6];
+ int rc;
+
+ /* Update SGID */
+ ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
+ guid[0] = mac_addr[0] ^ 2;
+ guid[1] = mac_addr[1];
+ guid[2] = mac_addr[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = mac_addr[3];
+ guid[6] = mac_addr[4];
+ guid[7] = mac_addr[5];
+ sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ memcpy(&sgid->raw[8], guid, sizeof(guid));
+
+ /* Update LL2 */
+ rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev,
+ dev->gsi_ll2_mac_address,
+ dev->ndev->dev_addr);
+
+ ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+ qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+
+ if (rc)
+ DP_ERR(dev, "Error updating mac filter\n");
+}
+
+/* event handling via NIC driver ensures that all the NIC specific
+ * initialization done before RoCE driver notifies
+ * event to stack.
+ */
+static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
+{
+ switch (event) {
+ case QEDE_UP:
+ qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ break;
+ case QEDE_DOWN:
+ qedr_close(dev);
+ break;
+ case QEDE_CLOSE:
+ qedr_shutdown(dev);
+ break;
+ case QEDE_CHANGE_ADDR:
+ qedr_mac_address_change(dev);
+ break;
+ default:
+ pr_err("Event not supported\n");
+ }
+}
+
+static struct qedr_driver qedr_drv = {
+ .name = "qedr_driver",
+ .add = qedr_add,
+ .remove = qedr_remove,
+ .notify = qedr_notify,
+};
+
+static int __init qedr_init_module(void)
+{
+ return qede_roce_register_driver(&qedr_drv);
+}
+
+static void __exit qedr_exit_module(void)
+{
+ qede_roce_unregister_driver(&qedr_drv);
+}
+
+module_init(qedr_init_module);
+module_exit(qedr_exit_module);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
new file mode 100644
index 000000000000..620badd7d4fb
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -0,0 +1,495 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_H__
+#define __QEDR_H__
+
+#include <linux/pci.h>
+#include <rdma/ib_addr.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_roce_if.h>
+#include <linux/qed/qede_roce.h>
+#include "qedr_hsi.h"
+
+#define QEDR_MODULE_VERSION "8.10.10.0"
+#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
+#define DP_NAME(dev) ((dev)->ibdev.name)
+
+#define DP_DEBUG(dev, module, fmt, ...) \
+ pr_debug("(%s) " module ": " fmt, \
+ DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__)
+
+#define QEDR_MSG_INIT "INIT"
+#define QEDR_MSG_MISC "MISC"
+#define QEDR_MSG_CQ " CQ"
+#define QEDR_MSG_MR " MR"
+#define QEDR_MSG_RQ " RQ"
+#define QEDR_MSG_SQ " SQ"
+#define QEDR_MSG_QP " QP"
+#define QEDR_MSG_GSI " GSI"
+
+#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
+
+struct qedr_dev;
+
+struct qedr_cnq {
+ struct qedr_dev *dev;
+ struct qed_chain pbl;
+ struct qed_sb_info *sb;
+ char name[32];
+ u64 n_comp;
+ __le16 *hw_cons_ptr;
+ u8 index;
+};
+
+#define QEDR_MAX_SGID 128
+
+struct qedr_device_attr {
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u64 fw_ver;
+ u64 node_guid;
+ u64 sys_image_guid;
+ u8 max_cnq;
+ u8 max_sge;
+ u16 max_inline;
+ u32 max_sqe;
+ u32 max_rqe;
+ u8 max_qp_resp_rd_atomic_resc;
+ u8 max_qp_req_rd_atomic_resc;
+ u64 max_dev_resp_rd_atomic_resc;
+ u32 max_cq;
+ u32 max_qp;
+ u32 max_mr;
+ u64 max_mr_size;
+ u32 max_cqe;
+ u32 max_mw;
+ u32 max_fmr;
+ u32 max_mr_mw_fmr_pbl;
+ u64 max_mr_mw_fmr_size;
+ u32 max_pd;
+ u32 max_ah;
+ u8 max_pkey;
+ u32 max_srq;
+ u32 max_srq_wr;
+ u8 max_srq_sge;
+ u8 max_stats_queues;
+ u32 dev_caps;
+
+ u64 page_size_caps;
+ u8 dev_ack_delay;
+ u32 reserved_lkey;
+ u32 bad_pkey_counter;
+ struct qed_rdma_events events;
+};
+
+struct qedr_dev {
+ struct ib_device ibdev;
+ struct qed_dev *cdev;
+ struct pci_dev *pdev;
+ struct net_device *ndev;
+
+ enum ib_atomic_cap atomic_cap;
+
+ void *rdma_ctx;
+ struct qedr_device_attr attr;
+
+ const struct qed_rdma_ops *ops;
+ struct qed_int_info int_info;
+
+ struct qed_sb_info *sb_array;
+ struct qedr_cnq *cnq_array;
+ int num_cnq;
+ int sb_start;
+
+ void __iomem *db_addr;
+ u64 db_phys_addr;
+ u32 db_size;
+ u16 dpi;
+
+ union ib_gid *sgid_tbl;
+
+ /* Lock for sgid table */
+ spinlock_t sgid_lock;
+
+ u64 guid;
+
+ u32 dp_module;
+ u8 dp_level;
+ u8 num_hwfns;
+ uint wq_multiplier;
+ u8 gsi_ll2_mac_address[ETH_ALEN];
+ int gsi_qp_created;
+ struct qedr_cq *gsi_sqcq;
+ struct qedr_cq *gsi_rqcq;
+ struct qedr_qp *gsi_qp;
+};
+
+#define QEDR_MAX_SQ_PBL (0x8000)
+#define QEDR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *))
+#define QEDR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge))
+#define QEDR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
+ QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
+ QEDR_SQE_ELEMENT_SIZE)
+#define QEDR_MAX_SQE ((QEDR_MAX_SQ_PBL_ENTRIES) *\
+ (RDMA_RING_PAGE_SIZE) / \
+ (QEDR_SQE_ELEMENT_SIZE) /\
+ (QEDR_MAX_SQE_ELEMENTS_PER_SQE))
+/* RQ */
+#define QEDR_MAX_RQ_PBL (0x2000)
+#define QEDR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *))
+#define QEDR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge))
+#define QEDR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
+ QEDR_RQE_ELEMENT_SIZE)
+#define QEDR_MAX_RQE ((QEDR_MAX_RQ_PBL_ENTRIES) *\
+ (RDMA_RING_PAGE_SIZE) / \
+ (QEDR_RQE_ELEMENT_SIZE) /\
+ (QEDR_MAX_RQE_ELEMENTS_PER_RQE))
+
+#define QEDR_CQE_SIZE (sizeof(union rdma_cqe))
+#define QEDR_MAX_CQE_PBL_SIZE (512 * 1024)
+#define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \
+ sizeof(u64)) - 1)
+#define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \
+ (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE))
+
+#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
+
+#define QEDR_MAX_PORT (1)
+
+#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+
+#define QEDR_ROCE_PKEY_MAX 1
+#define QEDR_ROCE_PKEY_TABLE_LEN 1
+#define QEDR_ROCE_PKEY_DEFAULT 0xffff
+
+struct qedr_pbl {
+ struct list_head list_entry;
+ void *va;
+ dma_addr_t pa;
+};
+
+struct qedr_ucontext {
+ struct ib_ucontext ibucontext;
+ struct qedr_dev *dev;
+ struct qedr_pd *pd;
+ u64 dpi_addr;
+ u64 dpi_phys_addr;
+ u32 dpi_size;
+ u16 dpi;
+
+ struct list_head mm_head;
+
+ /* Lock to protect mm list */
+ struct mutex mm_list_lock;
+};
+
+union db_prod64 {
+ struct rdma_pwm_val32_data data;
+ u64 raw;
+};
+
+enum qedr_cq_type {
+ QEDR_CQ_TYPE_GSI,
+ QEDR_CQ_TYPE_KERNEL,
+ QEDR_CQ_TYPE_USER,
+};
+
+struct qedr_pbl_info {
+ u32 num_pbls;
+ u32 num_pbes;
+ u32 pbl_size;
+ u32 pbe_size;
+ bool two_layered;
+};
+
+struct qedr_userq {
+ struct ib_umem *umem;
+ struct qedr_pbl_info pbl_info;
+ struct qedr_pbl *pbl_tbl;
+ u64 buf_addr;
+ size_t buf_len;
+};
+
+struct qedr_cq {
+ struct ib_cq ibcq;
+
+ enum qedr_cq_type cq_type;
+ u32 sig;
+
+ u16 icid;
+
+ /* Lock to protect completion handler */
+ spinlock_t comp_handler_lock;
+
+ /* Lock to protect multiplem CQ's */
+ spinlock_t cq_lock;
+ u8 arm_flags;
+ struct qed_chain pbl;
+
+ void __iomem *db_addr;
+ union db_prod64 db;
+
+ u8 pbl_toggle;
+ union rdma_cqe *latest_cqe;
+ union rdma_cqe *toggle_cqe;
+
+ u32 cq_cons;
+
+ struct qedr_userq q;
+};
+
+struct qedr_pd {
+ struct ib_pd ibpd;
+ u32 pd_id;
+ struct qedr_ucontext *uctx;
+};
+
+struct qedr_mm {
+ struct {
+ u64 phy_addr;
+ unsigned long len;
+ } key;
+ struct list_head entry;
+};
+
+union db_prod32 {
+ struct rdma_pwm_val16_data data;
+ u32 raw;
+};
+
+struct qedr_qp_hwq_info {
+ /* WQE Elements */
+ struct qed_chain pbl;
+ u64 p_phys_addr_tbl;
+ u32 max_sges;
+
+ /* WQE */
+ u16 prod;
+ u16 cons;
+ u16 wqe_cons;
+ u16 gsi_cons;
+ u16 max_wr;
+
+ /* DB */
+ void __iomem *db;
+ union db_prod32 db_data;
+};
+
+#define QEDR_INC_SW_IDX(p_info, index) \
+ do { \
+ p_info->index = (p_info->index + 1) & \
+ qed_chain_get_capacity(p_info->pbl) \
+ } while (0)
+
+enum qedr_qp_err_bitmap {
+ QEDR_QP_ERR_SQ_FULL = 1,
+ QEDR_QP_ERR_RQ_FULL = 2,
+ QEDR_QP_ERR_BAD_SR = 4,
+ QEDR_QP_ERR_BAD_RR = 8,
+ QEDR_QP_ERR_SQ_PBL_FULL = 16,
+ QEDR_QP_ERR_RQ_PBL_FULL = 32,
+};
+
+struct qedr_qp {
+ struct ib_qp ibqp; /* must be first */
+ struct qedr_dev *dev;
+
+ struct qedr_qp_hwq_info sq;
+ struct qedr_qp_hwq_info rq;
+
+ u32 max_inline_data;
+
+ /* Lock for QP's */
+ spinlock_t q_lock;
+ struct qedr_cq *sq_cq;
+ struct qedr_cq *rq_cq;
+ struct qedr_srq *srq;
+ enum qed_roce_qp_state state;
+ u32 id;
+ struct qedr_pd *pd;
+ enum ib_qp_type qp_type;
+ struct qed_rdma_qp *qed_qp;
+ u32 qp_id;
+ u16 icid;
+ u16 mtu;
+ int sgid_idx;
+ u32 rq_psn;
+ u32 sq_psn;
+ u32 qkey;
+ u32 dest_qp_num;
+
+ /* Relevant to qps created from kernel space only (ULPs) */
+ u8 prev_wqe_size;
+ u16 wqe_cons;
+ u32 err_bitmap;
+ bool signaled;
+
+ /* SQ shadow */
+ struct {
+ u64 wr_id;
+ enum ib_wc_opcode opcode;
+ u32 bytes_len;
+ u8 wqe_size;
+ bool signaled;
+ dma_addr_t icrc_mapping;
+ u32 *icrc;
+ struct qedr_mr *mr;
+ } *wqe_wr_id;
+
+ /* RQ shadow */
+ struct {
+ u64 wr_id;
+ struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
+ u8 wqe_size;
+
+ u8 smac[ETH_ALEN];
+ u16 vlan_id;
+ int rc;
+ } *rqe_wr_id;
+
+ /* Relevant to qps created from user space only (applications) */
+ struct qedr_userq usq;
+ struct qedr_userq urq;
+};
+
+struct qedr_ah {
+ struct ib_ah ibah;
+ struct ib_ah_attr attr;
+};
+
+enum qedr_mr_type {
+ QEDR_MR_USER,
+ QEDR_MR_KERNEL,
+ QEDR_MR_DMA,
+ QEDR_MR_FRMR,
+};
+
+struct mr_info {
+ struct qedr_pbl *pbl_table;
+ struct qedr_pbl_info pbl_info;
+ struct list_head free_pbl_list;
+ struct list_head inuse_pbl_list;
+ u32 completed;
+ u32 completed_handled;
+};
+
+struct qedr_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+
+ struct qed_rdma_register_tid_in_params hw_mr;
+ enum qedr_mr_type type;
+
+ struct qedr_dev *dev;
+ struct mr_info info;
+
+ u64 *pages;
+ u32 npages;
+};
+
+#define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
+
+#define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
+ RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
+#define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
+ RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
+#define QEDR_RESP_RDMA_IMM (QEDR_RESP_IMM | QEDR_RESP_RDMA)
+
+static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
+{
+ info->cons = (info->cons + 1) % info->max_wr;
+ info->wqe_cons++;
+}
+
+static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
+{
+ info->prod = (info->prod + 1) % info->max_wr;
+}
+
+static inline int qedr_get_dmac(struct qedr_dev *dev,
+ struct ib_ah_attr *ah_attr, u8 *mac_addr)
+{
+ union ib_gid zero_sgid = { { 0 } };
+ struct in6_addr in6;
+
+ if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
+ DP_ERR(dev, "Local port GID not supported\n");
+ eth_zero_addr(mac_addr);
+ return -EINVAL;
+ }
+
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
+ ether_addr_copy(mac_addr, ah_attr->dmac);
+
+ return 0;
+}
+
+static inline
+struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct qedr_ucontext, ibucontext);
+}
+
+static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct qedr_dev, ibdev);
+}
+
+static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct qedr_pd, ibpd);
+}
+
+static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct qedr_cq, ibcq);
+}
+
+static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct qedr_qp, ibqp);
+}
+
+static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct qedr_ah, ibah);
+}
+
+static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct qedr_mr, ibmr);
+}
+#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
new file mode 100644
index 000000000000..63890ebb72bd
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -0,0 +1,622 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include "qedr_hsi.h"
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_roce_if.h>
+#include "qedr.h"
+#include "qedr_hsi.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_hsi.h"
+#include "qedr_cm.h"
+
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
+{
+ info->gsi_cons = (info->gsi_cons + 1) % info->max_wr;
+}
+
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ dev->gsi_qp_created = 1;
+ dev->gsi_sqcq = get_qedr_cq(attrs->send_cq);
+ dev->gsi_rqcq = get_qedr_cq(attrs->recv_cq);
+ dev->gsi_qp = qp;
+}
+
+void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
+{
+ struct qedr_dev *dev = (struct qedr_dev *)_qdev;
+ struct qedr_cq *cq = dev->gsi_sqcq;
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "LL2 TX CB: gsi_sqcq=%p, gsi_rqcq=%p, gsi_cons=%d, ibcq_comp=%s\n",
+ dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons,
+ cq->ibcq.comp_handler ? "Yes" : "No");
+
+ dma_free_coherent(&dev->pdev->dev, pkt->header.len, pkt->header.vaddr,
+ pkt->header.baddr);
+ kfree(pkt);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+ qedr_inc_sw_gsi_cons(&qp->sq);
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (cq->ibcq.comp_handler) {
+ spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+ spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+ }
+}
+
+void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
+ struct qed_roce_ll2_rx_params *params)
+{
+ struct qedr_dev *dev = (struct qedr_dev *)_dev;
+ struct qedr_cq *cq = dev->gsi_rqcq;
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc;
+ qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id;
+ qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len;
+ ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac);
+
+ qedr_inc_sw_gsi_cons(&qp->rq);
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (cq->ibcq.comp_handler) {
+ spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+ spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+ }
+}
+
+static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ struct qed_rdma_destroy_cq_in_params iparams;
+ struct qed_rdma_destroy_cq_out_params oparams;
+ struct qedr_cq *cq;
+
+ cq = get_qedr_cq(attrs->send_cq);
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+
+ cq = get_qedr_cq(attrs->recv_cq);
+ /* if a dedicated recv_cq was used, delete it too */
+ if (iparams.icid != cq->icid) {
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
+}
+
+static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if (attrs->cap.max_recv_sge > QEDR_GSI_MAX_RECV_SGE) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_recv_sge is larger the max %d>%d\n",
+ attrs->cap.max_recv_sge, QEDR_GSI_MAX_RECV_SGE);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_recv_wr > QEDR_GSI_MAX_RECV_WR) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_recv_wr is too large %d>%d\n",
+ attrs->cap.max_recv_wr, QEDR_GSI_MAX_RECV_WR);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_wr > QEDR_GSI_MAX_SEND_WR) {
+ DP_ERR(dev,
+ " create gsi qp: failed. max_send_wr is too large %d>%d\n",
+ attrs->cap.max_send_wr, QEDR_GSI_MAX_SEND_WR);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp)
+{
+ struct qed_roce_ll2_params ll2_params;
+ int rc;
+
+ rc = qedr_check_gsi_qp_attrs(dev, attrs);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /* configure and start LL2 */
+ memset(&ll2_params, 0, sizeof(ll2_params));
+ ll2_params.max_tx_buffers = attrs->cap.max_send_wr;
+ ll2_params.max_rx_buffers = attrs->cap.max_recv_wr;
+ ll2_params.cbs.tx_cb = qedr_ll2_tx_cb;
+ ll2_params.cbs.rx_cb = qedr_ll2_rx_cb;
+ ll2_params.cb_cookie = (void *)dev;
+ ll2_params.mtu = dev->ndev->mtu;
+ ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr);
+ rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params);
+ if (rc) {
+ DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc);
+ return ERR_PTR(rc);
+ }
+
+ /* create QP */
+ qp->ibqp.qp_num = 1;
+ qp->rq.max_wr = attrs->cap.max_recv_wr;
+ qp->sq.max_wr = attrs->cap.max_send_wr;
+
+ qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->rqe_wr_id)
+ goto err;
+ qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->wqe_wr_id)
+ goto err;
+
+ qedr_store_gsi_qp_cq(dev, qp, attrs);
+ ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
+
+ /* the GSI CQ is handled by the driver so remove it from the FW */
+ qedr_destroy_gsi_cq(dev, attrs);
+ dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+ dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
+
+ DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
+
+ return &qp->ibqp;
+
+err:
+ kfree(qp->rqe_wr_id);
+
+ rc = dev->ops->roce_ll2_stop(dev->cdev);
+ if (rc)
+ DP_ERR(dev, "create gsi qp: failed destroy on create\n");
+
+ return ERR_PTR(-ENOMEM);
+}
+
+int qedr_destroy_gsi_qp(struct qedr_dev *dev)
+{
+ int rc;
+
+ rc = dev->ops->roce_ll2_stop(dev->cdev);
+ if (rc)
+ DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc);
+ else
+ DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n");
+
+ return rc;
+}
+
+#define QEDR_MAX_UD_HEADER_SIZE (100)
+#define QEDR_GSI_QPN (1)
+static inline int qedr_gsi_build_header(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_send_wr *swr,
+ struct ib_ud_header *udh,
+ int *roce_mode)
+{
+ bool has_vlan = false, has_grh_ipv6 = true;
+ struct ib_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
+ struct ib_global_route *grh = &ah_attr->grh;
+ union ib_gid sgid;
+ int send_size = 0;
+ u16 vlan_id = 0;
+ u16 ether_type;
+ struct ib_gid_attr sgid_attr;
+ int rc;
+ int ip_ver = 0;
+
+ bool has_udp = false;
+ int i;
+
+ send_size = 0;
+ for (i = 0; i < swr->num_sge; ++i)
+ send_size += swr->sg_list[i].length;
+
+ rc = ib_get_cached_gid(qp->ibqp.device, ah_attr->port_num,
+ grh->sgid_index, &sgid, &sgid_attr);
+ if (rc) {
+ DP_ERR(dev,
+ "gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
+ ah_attr->port_num, grh->sgid_index);
+ return rc;
+ }
+
+ vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
+ if (vlan_id < VLAN_CFI_MASK)
+ has_vlan = true;
+ if (sgid_attr.ndev)
+ dev_put(sgid_attr.ndev);
+
+ if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
+ DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
+ ah_attr->grh.sgid_index);
+ return -ENOENT;
+ }
+
+ has_udp = (sgid_attr.gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP);
+ if (!has_udp) {
+ /* RoCE v1 */
+ ether_type = ETH_P_ROCE;
+ *roce_mode = ROCE_V1;
+ } else if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) {
+ /* RoCE v2 IPv4 */
+ ip_ver = 4;
+ ether_type = ETH_P_IP;
+ has_grh_ipv6 = false;
+ *roce_mode = ROCE_V2_IPV4;
+ } else {
+ /* RoCE v2 IPv6 */
+ ip_ver = 6;
+ ether_type = ETH_P_IPV6;
+ *roce_mode = ROCE_V2_IPV6;
+ }
+
+ rc = ib_ud_header_init(send_size, false, true, has_vlan,
+ has_grh_ipv6, ip_ver, has_udp, 0, udh);
+ if (rc) {
+ DP_ERR(dev, "gsi post send: failed to init header\n");
+ return rc;
+ }
+
+ /* ENET + VLAN headers */
+ ether_addr_copy(udh->eth.dmac_h, ah_attr->dmac);
+ ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
+ if (has_vlan) {
+ udh->eth.type = htons(ETH_P_8021Q);
+ udh->vlan.tag = htons(vlan_id);
+ udh->vlan.type = htons(ether_type);
+ } else {
+ udh->eth.type = htons(ether_type);
+ }
+
+ /* BTH */
+ udh->bth.solicited_event = !!(swr->send_flags & IB_SEND_SOLICITED);
+ udh->bth.pkey = QEDR_ROCE_PKEY_DEFAULT;
+ udh->bth.destination_qpn = htonl(ud_wr(swr)->remote_qpn);
+ udh->bth.psn = htonl((qp->sq_psn++) & ((1 << 24) - 1));
+ udh->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+
+ /* DETH */
+ udh->deth.qkey = htonl(0x80010000);
+ udh->deth.source_qpn = htonl(QEDR_GSI_QPN);
+
+ if (has_grh_ipv6) {
+ /* GRH / IPv6 header */
+ udh->grh.traffic_class = grh->traffic_class;
+ udh->grh.flow_label = grh->flow_label;
+ udh->grh.hop_limit = grh->hop_limit;
+ udh->grh.destination_gid = grh->dgid;
+ memcpy(&udh->grh.source_gid.raw, &sgid.raw,
+ sizeof(udh->grh.source_gid.raw));
+ } else {
+ /* IPv4 header */
+ u32 ipv4_addr;
+
+ udh->ip4.protocol = IPPROTO_UDP;
+ udh->ip4.tos = htonl(ah_attr->grh.flow_label);
+ udh->ip4.frag_off = htons(IP_DF);
+ udh->ip4.ttl = ah_attr->grh.hop_limit;
+
+ ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
+ udh->ip4.saddr = ipv4_addr;
+ ipv4_addr = qedr_get_ipv4_from_gid(ah_attr->grh.dgid.raw);
+ udh->ip4.daddr = ipv4_addr;
+ /* note: checksum is calculated by the device */
+ }
+
+ /* UDP */
+ if (has_udp) {
+ udh->udp.sport = htons(QEDR_ROCE_V2_UDP_SPORT);
+ udh->udp.dport = htons(ROCE_V2_UDP_DPORT);
+ udh->udp.csum = 0;
+ /* UDP length is untouched hence is zero */
+ }
+ return 0;
+}
+
+static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_send_wr *swr,
+ struct qed_roce_ll2_packet **p_packet)
+{
+ u8 ud_header_buffer[QEDR_MAX_UD_HEADER_SIZE];
+ struct qed_roce_ll2_packet *packet;
+ struct pci_dev *pdev = dev->pdev;
+ int roce_mode, header_size;
+ struct ib_ud_header udh;
+ int i, rc;
+
+ *p_packet = NULL;
+
+ rc = qedr_gsi_build_header(dev, qp, swr, &udh, &roce_mode);
+ if (rc)
+ return rc;
+
+ header_size = ib_ud_header_pack(&udh, &ud_header_buffer);
+
+ packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+ if (!packet)
+ return -ENOMEM;
+
+ packet->header.vaddr = dma_alloc_coherent(&pdev->dev, header_size,
+ &packet->header.baddr,
+ GFP_ATOMIC);
+ if (!packet->header.vaddr) {
+ kfree(packet);
+ return -ENOMEM;
+ }
+
+ if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
+ packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
+ else
+ packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+
+ packet->roce_mode = roce_mode;
+ memcpy(packet->header.vaddr, ud_header_buffer, header_size);
+ packet->header.len = header_size;
+ packet->n_seg = swr->num_sge;
+ for (i = 0; i < packet->n_seg; i++) {
+ packet->payload[i].baddr = swr->sg_list[i].addr;
+ packet->payload[i].len = swr->sg_list[i].length;
+ }
+
+ *p_packet = packet;
+
+ return 0;
+}
+
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qed_roce_ll2_packet *pkt = NULL;
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_roce_ll2_tx_params params;
+ struct qedr_dev *dev = qp->dev;
+ unsigned long flags;
+ int rc;
+
+ if (qp->state != QED_ROCE_QP_STATE_RTS) {
+ *bad_wr = wr;
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTS\n",
+ qp->state);
+ return -EINVAL;
+ }
+
+ if (wr->num_sge > RDMA_MAX_SGE_PER_SQ_WQE) {
+ DP_ERR(dev, "gsi post send: num_sge is too large (%d>%d)\n",
+ wr->num_sge, RDMA_MAX_SGE_PER_SQ_WQE);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (wr->opcode != IB_WR_SEND) {
+ DP_ERR(dev,
+ "gsi post send: failed due to unsupported opcode %d\n",
+ wr->opcode);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ memset(&params, 0, sizeof(params));
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ rc = qedr_gsi_build_packet(dev, qp, wr, &pkt);
+ if (rc) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ goto err;
+ }
+
+ rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, &params);
+ if (!rc) {
+ qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+ qedr_inc_sw_prod(&qp->sq);
+ DP_DEBUG(qp->dev, QEDR_MSG_GSI,
+ "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n",
+ wr->opcode, in_irq(), irqs_disabled(), wr->wr_id);
+ } else {
+ if (rc == QED_ROCE_TX_HEAD_FAILURE) {
+ /* TX failed while posting header - release resources */
+ dma_free_coherent(&dev->pdev->dev, pkt->header.len,
+ pkt->header.vaddr, pkt->header.baddr);
+ kfree(pkt);
+ } else if (rc == QED_ROCE_TX_FRAG_FAILURE) {
+ /* NTD since TX failed while posting a fragment. We will
+ * release the resources on TX callback
+ */
+ }
+
+ DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc);
+ rc = -EAGAIN;
+ *bad_wr = wr;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (wr->next) {
+ DP_ERR(dev,
+ "gsi post send: failed second WR. Only one WR may be passed at a time\n");
+ *bad_wr = wr->next;
+ rc = -EINVAL;
+ }
+
+ return rc;
+
+err:
+ *bad_wr = wr;
+ return rc;
+}
+
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_roce_ll2_buffer buf;
+ unsigned long flags;
+ int status = 0;
+ int rc;
+
+ if ((qp->state != QED_ROCE_QP_STATE_RTR) &&
+ (qp->state != QED_ROCE_QP_STATE_RTS)) {
+ *bad_wr = wr;
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. state is %d and not QED_ROCE_QP_STATE_RTR/S\n",
+ qp->state);
+ return -EINVAL;
+ }
+
+ memset(&buf, 0, sizeof(buf));
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ while (wr) {
+ if (wr->num_sge > QEDR_GSI_MAX_RECV_SGE) {
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer. too many sges %d>%d\n",
+ wr->num_sge, QEDR_GSI_MAX_RECV_SGE);
+ goto err;
+ }
+
+ buf.baddr = wr->sg_list[0].addr;
+ buf.len = wr->sg_list[0].length;
+
+ rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1);
+ if (rc) {
+ DP_ERR(dev,
+ "gsi post recv: failed to post rx buffer (rc=%d)\n",
+ rc);
+ goto err;
+ }
+
+ memset(&qp->rqe_wr_id[qp->rq.prod], 0,
+ sizeof(qp->rqe_wr_id[qp->rq.prod]));
+ qp->rqe_wr_id[qp->rq.prod].sg_list[0] = wr->sg_list[0];
+ qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+
+ qedr_inc_sw_prod(&qp->rq);
+
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return status;
+err:
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ return -ENOMEM;
+}
+
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ struct qedr_qp *qp = dev->gsi_qp;
+ unsigned long flags;
+ int i = 0;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
+ while (i < num_entries && qp->rq.cons != qp->rq.gsi_cons) {
+ memset(&wc[i], 0, sizeof(*wc));
+
+ wc[i].qp = &qp->ibqp;
+ wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+ wc[i].opcode = IB_WC_RECV;
+ wc[i].pkey_index = 0;
+ wc[i].status = (qp->rqe_wr_id[qp->rq.cons].rc) ?
+ IB_WC_GENERAL_ERR : IB_WC_SUCCESS;
+ /* 0 - currently only one recv sg is supported */
+ wc[i].byte_len = qp->rqe_wr_id[qp->rq.cons].sg_list[0].length;
+ wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
+ ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
+ wc[i].wc_flags |= IB_WC_WITH_SMAC;
+ if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
+ wc[i].wc_flags |= IB_WC_WITH_VLAN;
+ wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
+ }
+
+ qedr_inc_sw_cons(&qp->rq);
+ i++;
+ }
+
+ while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) {
+ memset(&wc[i], 0, sizeof(*wc));
+
+ wc[i].qp = &qp->ibqp;
+ wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+ wc[i].opcode = IB_WC_SEND;
+ wc[i].status = IB_WC_SUCCESS;
+
+ qedr_inc_sw_cons(&qp->sq);
+ i++;
+ }
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "gsi poll_cq: requested entries=%d, actual=%d, qp->rq.cons=%d, qp->rq.gsi_cons=%x, qp->sq.cons=%d, qp->sq.gsi_cons=%d, qp_num=%d\n",
+ num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons,
+ qp->sq.gsi_cons, qp->ibqp.qp_num);
+
+ return i;
+}
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_cm.h
new file mode 100644
index 000000000000..9ba6e15cd93f
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_cm.h
@@ -0,0 +1,61 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef LINUX_QEDR_CM_H_
+#define LINUX_QEDR_CM_H_
+
+#define QEDR_GSI_MAX_RECV_WR (4096)
+#define QEDR_GSI_MAX_SEND_WR (4096)
+
+#define QEDR_GSI_MAX_RECV_SGE (1) /* LL2 FW limitation */
+
+#define ETH_P_ROCE (0x8915)
+#define QEDR_ROCE_V2_UDP_SPORT (0000)
+
+static inline u32 qedr_get_ipv4_from_gid(u8 *gid)
+{
+ return *(u32 *)(void *)&gid[12];
+}
+
+/* RDMA CM */
+int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs,
+ struct qedr_qp *qp);
+void qedr_store_gsi_qp_cq(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_qp_init_attr *attrs);
+int qedr_destroy_gsi_qp(struct qedr_dev *dev);
+void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info);
+#endif
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/infiniband/hw/qedr/qedr_hsi.h
new file mode 100644
index 000000000000..66d27521373f
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_hsi.h
@@ -0,0 +1,56 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QED_HSI_ROCE__
+#define __QED_HSI_ROCE__
+
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/roce_common.h>
+#include "qedr_hsi_rdma.h"
+
+/* Affiliated asynchronous events / errors enumeration */
+enum roce_async_events_type {
+ ROCE_ASYNC_EVENT_NONE = 0,
+ ROCE_ASYNC_EVENT_COMM_EST = 1,
+ ROCE_ASYNC_EVENT_SQ_DRAINED,
+ ROCE_ASYNC_EVENT_SRQ_LIMIT,
+ ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
+ ROCE_ASYNC_EVENT_CQ_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
+ ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
+ ROCE_ASYNC_EVENT_SRQ_EMPTY,
+ MAX_ROCE_ASYNC_EVENTS_TYPE
+};
+
+#endif /* __QED_HSI_ROCE__ */
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
new file mode 100644
index 000000000000..5c98d2055cad
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
@@ -0,0 +1,748 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QED_HSI_RDMA__
+#define __QED_HSI_RDMA__
+
+#include <linux/qed/rdma_common.h>
+
+/* rdma completion notification queue element */
+struct rdma_cnqe {
+ struct regpair cq_handle;
+};
+
+struct rdma_cqe_responder {
+ struct regpair srq_wr_id;
+ struct regpair qp_handle;
+ __le32 imm_data_or_inv_r_Key;
+ __le32 length;
+ __le32 imm_data_hi;
+ __le16 rq_cons;
+ u8 flags;
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_RESPONDER_TYPE_MASK 0x3
+#define RDMA_CQE_RESPONDER_TYPE_SHIFT 1
+#define RDMA_CQE_RESPONDER_INV_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT 3
+#define RDMA_CQE_RESPONDER_IMM_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT 4
+#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK 0x1
+#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT 5
+#define RDMA_CQE_RESPONDER_RESERVED2_MASK 0x3
+#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT 6
+ u8 status;
+};
+
+struct rdma_cqe_requester {
+ __le16 sq_cons;
+ __le16 reserved0;
+ __le32 reserved1;
+ struct regpair qp_handle;
+ struct regpair reserved2;
+ __le32 reserved3;
+ __le16 reserved4;
+ u8 flags;
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_REQUESTER_TYPE_MASK 0x3
+#define RDMA_CQE_REQUESTER_TYPE_SHIFT 1
+#define RDMA_CQE_REQUESTER_RESERVED5_MASK 0x1F
+#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT 3
+ u8 status;
+};
+
+struct rdma_cqe_common {
+ struct regpair reserved0;
+ struct regpair qp_handle;
+ __le16 reserved1[7];
+ u8 flags;
+#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK 0x1
+#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
+#define RDMA_CQE_COMMON_TYPE_MASK 0x3
+#define RDMA_CQE_COMMON_TYPE_SHIFT 1
+#define RDMA_CQE_COMMON_RESERVED2_MASK 0x1F
+#define RDMA_CQE_COMMON_RESERVED2_SHIFT 3
+ u8 status;
+};
+
+/* rdma completion queue element */
+union rdma_cqe {
+ struct rdma_cqe_responder resp;
+ struct rdma_cqe_requester req;
+ struct rdma_cqe_common cmn;
+};
+
+/* * CQE requester status enumeration */
+enum rdma_cqe_requester_status_enum {
+ RDMA_CQE_REQ_STS_OK,
+ RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
+ RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
+ RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
+ RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
+ RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
+ RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
+ MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
+};
+
+/* CQE responder status enumeration */
+enum rdma_cqe_responder_status_enum {
+ RDMA_CQE_RESP_STS_OK,
+ RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
+ RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
+ RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
+ RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
+ RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
+ MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
+};
+
+/* CQE type enumeration */
+enum rdma_cqe_type {
+ RDMA_CQE_TYPE_REQUESTER,
+ RDMA_CQE_TYPE_RESPONDER_RQ,
+ RDMA_CQE_TYPE_RESPONDER_SRQ,
+ RDMA_CQE_TYPE_INVALID,
+ MAX_RDMA_CQE_TYPE
+};
+
+struct rdma_sq_sge {
+ __le32 length;
+ struct regpair addr;
+ __le32 l_key;
+};
+
+struct rdma_rq_sge {
+ struct regpair addr;
+ __le32 length;
+ __le32 flags;
+#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF
+#define RDMA_RQ_SGE_L_KEY_SHIFT 0
+#define RDMA_RQ_SGE_NUM_SGES_MASK 0x7
+#define RDMA_RQ_SGE_NUM_SGES_SHIFT 26
+#define RDMA_RQ_SGE_RESERVED0_MASK 0x7
+#define RDMA_RQ_SGE_RESERVED0_SHIFT 29
+};
+
+struct rdma_srq_sge {
+ struct regpair addr;
+ __le32 length;
+ __le32 l_key;
+};
+
+/* Rdma doorbell data for SQ and RQ */
+struct rdma_pwm_val16_data {
+ __le16 icid;
+ __le16 value;
+};
+
+union rdma_pwm_val16_data_union {
+ struct rdma_pwm_val16_data as_struct;
+ __le32 as_dword;
+};
+
+/* Rdma doorbell data for CQ */
+struct rdma_pwm_val32_data {
+ __le16 icid;
+ u8 agg_flags;
+ u8 params;
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
+#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
+#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
+#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F
+#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3
+ __le32 value;
+};
+
+/* DIF Block size options */
+enum rdma_dif_block_size {
+ RDMA_DIF_BLOCK_512 = 0,
+ RDMA_DIF_BLOCK_4096 = 1,
+ MAX_RDMA_DIF_BLOCK_SIZE
+};
+
+/* DIF CRC initial value */
+enum rdma_dif_crc_seed {
+ RDMA_DIF_CRC_SEED_0000 = 0,
+ RDMA_DIF_CRC_SEED_FFFF = 1,
+ MAX_RDMA_DIF_CRC_SEED
+};
+
+/* RDMA DIF Error Result Structure */
+struct rdma_dif_error_result {
+ __le32 error_intervals;
+ __le32 dif_error_1st_interval;
+ u8 flags;
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT 0
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK 0xF
+#define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT 3
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK 0x1
+#define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT 7
+ u8 reserved1[55];
+};
+
+/* DIF IO direction */
+enum rdma_dif_io_direction_flg {
+ RDMA_DIF_DIR_RX = 0,
+ RDMA_DIF_DIR_TX = 1,
+ MAX_RDMA_DIF_IO_DIRECTION_FLG
+};
+
+/* RDMA DIF Runt Result Structure */
+struct rdma_dif_runt_result {
+ __le16 guard_tag;
+ __le16 reserved[3];
+};
+
+/* Memory window type enumeration */
+enum rdma_mw_type {
+ RDMA_MW_TYPE_1,
+ RDMA_MW_TYPE_2A,
+ MAX_RDMA_MW_TYPE
+};
+
+struct rdma_sq_atomic_wqe {
+ __le32 reserved1;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ struct regpair remote_va;
+ __le32 r_key;
+ __le32 reserved2;
+ struct regpair cmp_data;
+ struct regpair swap_data;
+};
+
+/* First element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_1st {
+ __le32 reserved1;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_2nd {
+ struct regpair remote_va;
+ __le32 r_key;
+ __le32 reserved2;
+};
+
+/* Third element (16 bytes) of atomic wqe */
+struct rdma_sq_atomic_wqe_3rd {
+ struct regpair cmp_data;
+ struct regpair swap_data;
+};
+
+struct rdma_sq_bind_wqe {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_BIND_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7
+#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0
+#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F
+#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2
+ u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_BIND_WQE_RESERVED2_MASK 0x7
+#define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ __le32 parent_l_key;
+ __le32 reserved4;
+};
+
+/* First element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_1st {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of bind wqe */
+struct rdma_sq_bind_wqe_2nd {
+ u8 bind_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0
+#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2
+ u8 access_ctrl;
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK 0x7
+#define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ __le32 parent_l_key;
+ __le32 reserved4;
+};
+
+/* Structure with only the SQ WQE common
+ * fields. Size is of one SQ element (16B)
+ */
+struct rdma_sq_common_wqe {
+ __le32 reserved1[3];
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_COMMON_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_COMMON_WQE_RESERVED0_MASK 0x7
+#define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_fmr_wqe {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_FMR_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT 5
+#define RDMA_SQ_FMR_WQE_BIND_EN_MASK 0x1
+#define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT 6
+#define RDMA_SQ_FMR_WQE_RESERVED1_MASK 0x1
+#define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT 7
+ u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_FMR_WQE_RESERVED2_MASK 0x7
+#define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ struct regpair pbl_addr;
+ __le32 dif_base_ref_tag;
+ __le16 dif_app_tag;
+ __le16 dif_app_tag_mask;
+ __le16 dif_runt_crc_value;
+ __le16 dif_flags;
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
+#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF
+#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7
+ __le32 Reserved5;
+};
+
+/* First element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_1st {
+ struct regpair addr;
+ __le32 l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK 0x3
+#define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_2nd {
+ u8 fmr_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT 5
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT 6
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT 7
+ u8 access_ctrl;
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT 0
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT 3
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT 4
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK 0x7
+#define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT 5
+ u8 reserved3;
+ u8 length_hi;
+ __le32 length_lo;
+ struct regpair pbl_addr;
+};
+
+/* Third element (16 bytes) of fmr wqe */
+struct rdma_sq_fmr_wqe_3rd {
+ __le32 dif_base_ref_tag;
+ __le16 dif_app_tag;
+ __le16 dif_app_tag_mask;
+ __le16 dif_runt_crc_value;
+ __le16 dif_flags;
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
+#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
+#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
+ __le32 Reserved5;
+};
+
+struct rdma_sq_local_inv_wqe {
+ struct regpair reserved;
+ __le32 inv_l_key;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_rdma_wqe {
+ __le32 imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ struct regpair remote_va;
+ __le32 r_key;
+ u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F
+#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3
+ u8 reserved2[3];
+};
+
+/* First element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_1st {
+ __le32 imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK 0x3
+#define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+/* Second element (16 bytes) of rdma wqe */
+struct rdma_sq_rdma_wqe_2nd {
+ struct regpair remote_va;
+ __le32 r_key;
+ u8 dif_flags;
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT 0
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK 0x1
+#define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT 2
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK 0x1F
+#define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT 3
+ u8 reserved2[3];
+};
+
+/* SQ WQE req type enumeration */
+enum rdma_sq_req_type {
+ RDMA_SQ_REQ_TYPE_SEND,
+ RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
+ RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
+ RDMA_SQ_REQ_TYPE_RDMA_WR,
+ RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
+ RDMA_SQ_REQ_TYPE_RDMA_RD,
+ RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
+ RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
+ RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
+ RDMA_SQ_REQ_TYPE_FAST_MR,
+ RDMA_SQ_REQ_TYPE_BIND,
+ RDMA_SQ_REQ_TYPE_INVALID,
+ MAX_RDMA_SQ_REQ_TYPE
+};
+
+struct rdma_sq_send_wqe {
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_SEND_WQE_COMP_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT 0
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_SEND_WQE_SE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT 3
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
+#define RDMA_SQ_SEND_WQE_RESERVED0_MASK 0x3
+#define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT 6
+ u8 wqe_size;
+ u8 prev_wqe_size;
+ __le32 reserved1[4];
+};
+
+struct rdma_sq_send_wqe_1st {
+ __le32 inv_key_or_imm_data;
+ __le32 length;
+ __le32 xrc_srq;
+ u8 req_type;
+ u8 flags;
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT 0
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT 1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT 3
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK 0x1
+#define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT 4
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK 0x7
+#define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT 5
+ u8 wqe_size;
+ u8 prev_wqe_size;
+};
+
+struct rdma_sq_send_wqe_2st {
+ __le32 reserved1[4];
+};
+
+#endif /* __QED_HSI_RDMA__ */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
new file mode 100644
index 000000000000..a61514296767
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -0,0 +1,3547 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/crc32.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+#include <linux/iommu.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+
+#include "qedr_hsi.h"
+#include <linux/qed/qed_if.h>
+#include "qedr.h"
+#include "verbs.h"
+#include <rdma/qedr-abi.h>
+#include "qedr_cm.h"
+
+#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+
+int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+ if (index > QEDR_ROCE_PKEY_TABLE_LEN)
+ return -EINVAL;
+
+ *pkey = QEDR_ROCE_PKEY_DEFAULT;
+ return 0;
+}
+
+int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *sgid)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ int rc = 0;
+
+ if (!rdma_cap_roce_gid_table(ibdev, port))
+ return -ENODEV;
+
+ rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
+ if (rc == -EAGAIN) {
+ memcpy(sgid, &zgid, sizeof(*sgid));
+ return 0;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
+ sgid->global.interface_id, sgid->global.subnet_prefix);
+
+ return rc;
+}
+
+int qedr_add_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context)
+{
+ if (!rdma_cap_roce_gid_table(device, port_num))
+ return -EINVAL;
+
+ if (port_num > QEDR_MAX_PORT)
+ return -EINVAL;
+
+ if (!context)
+ return -EINVAL;
+
+ return 0;
+}
+
+int qedr_del_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, void **context)
+{
+ if (!rdma_cap_roce_gid_table(device, port_num))
+ return -EINVAL;
+
+ if (port_num > QEDR_MAX_PORT)
+ return -EINVAL;
+
+ if (!context)
+ return -EINVAL;
+
+ return 0;
+}
+
+int qedr_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev,
+ "qedr_query_device called with invalid params rdma_ctx=%p\n",
+ dev->rdma_ctx);
+ return -EINVAL;
+ }
+
+ memset(attr, 0, sizeof(*attr));
+
+ attr->fw_ver = qattr->fw_ver;
+ attr->sys_image_guid = qattr->sys_image_guid;
+ attr->max_mr_size = qattr->max_mr_size;
+ attr->page_size_cap = qattr->page_size_caps;
+ attr->vendor_id = qattr->vendor_id;
+ attr->vendor_part_id = qattr->vendor_part_id;
+ attr->hw_ver = qattr->hw_ver;
+ attr->max_qp = qattr->max_qp;
+ attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
+ attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
+ IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
+
+ attr->max_sge = qattr->max_sge;
+ attr->max_sge_rd = qattr->max_sge;
+ attr->max_cq = qattr->max_cq;
+ attr->max_cqe = qattr->max_cqe;
+ attr->max_mr = qattr->max_mr;
+ attr->max_mw = qattr->max_mw;
+ attr->max_pd = qattr->max_pd;
+ attr->atomic_cap = dev->atomic_cap;
+ attr->max_fmr = qattr->max_fmr;
+ attr->max_map_per_fmr = 16;
+ attr->max_qp_init_rd_atom =
+ 1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
+ attr->max_qp_rd_atom =
+ min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
+ attr->max_qp_init_rd_atom);
+
+ attr->max_srq = qattr->max_srq;
+ attr->max_srq_sge = qattr->max_srq_sge;
+ attr->max_srq_wr = qattr->max_srq_wr;
+
+ attr->local_ca_ack_delay = qattr->dev_ack_delay;
+ attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
+ attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
+ attr->max_ah = qattr->max_ah;
+
+ return 0;
+}
+
+#define QEDR_SPEED_SDR (1)
+#define QEDR_SPEED_DDR (2)
+#define QEDR_SPEED_QDR (4)
+#define QEDR_SPEED_FDR10 (8)
+#define QEDR_SPEED_FDR (16)
+#define QEDR_SPEED_EDR (32)
+
+static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+ u8 *ib_width)
+{
+ switch (speed) {
+ case 1000:
+ *ib_speed = QEDR_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+ case 10000:
+ *ib_speed = QEDR_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 20000:
+ *ib_speed = QEDR_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 25000:
+ *ib_speed = QEDR_SPEED_EDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 40000:
+ *ib_speed = QEDR_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 50000:
+ *ib_speed = QEDR_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 100000:
+ *ib_speed = QEDR_SPEED_EDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+ *ib_speed = QEDR_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ }
+}
+
+int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
+{
+ struct qedr_dev *dev;
+ struct qed_rdma_port *rdma_port;
+
+ dev = get_qedr_dev(ibdev);
+ if (port > 1) {
+ DP_ERR(dev, "invalid_port=0x%x\n", port);
+ return -EINVAL;
+ }
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev, "rdma_ctx is NULL\n");
+ return -EINVAL;
+ }
+
+ rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
+ memset(attr, 0, sizeof(*attr));
+
+ if (rdma_port->port_state == QED_RDMA_PORT_UP) {
+ attr->state = IB_PORT_ACTIVE;
+ attr->phys_state = 5;
+ } else {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = 3;
+ }
+ attr->max_mtu = IB_MTU_4096;
+ attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
+ attr->lid = 0;
+ attr->lmc = 0;
+ attr->sm_lid = 0;
+ attr->sm_sl = 0;
+ attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
+ attr->gid_tbl_len = QEDR_MAX_SGID;
+ attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
+ attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
+ attr->qkey_viol_cntr = 0;
+ get_link_speed_and_width(rdma_port->link_speed,
+ &attr->active_speed, &attr->active_width);
+ attr->max_msg_sz = rdma_port->max_msg_size;
+ attr->max_vl_num = 4;
+
+ return 0;
+}
+
+int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
+ struct ib_port_modify *props)
+{
+ struct qedr_dev *dev;
+
+ dev = get_qedr_dev(ibdev);
+ if (port > 1) {
+ DP_ERR(dev, "invalid_port=0x%x\n", port);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
+ unsigned long len)
+{
+ struct qedr_mm *mm;
+
+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+ if (!mm)
+ return -ENOMEM;
+
+ mm->key.phy_addr = phy_addr;
+ /* This function might be called with a length which is not a multiple
+ * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
+ * forces this granularity by increasing the requested size if needed.
+ * When qedr_mmap is called, it will search the list with the updated
+ * length as a key. To prevent search failures, the length is rounded up
+ * in advance to PAGE_SIZE.
+ */
+ mm->key.len = roundup(len, PAGE_SIZE);
+ INIT_LIST_HEAD(&mm->entry);
+
+ mutex_lock(&uctx->mm_list_lock);
+ list_add(&mm->entry, &uctx->mm_head);
+ mutex_unlock(&uctx->mm_list_lock);
+
+ DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+ "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
+ (unsigned long long)mm->key.phy_addr,
+ (unsigned long)mm->key.len, uctx);
+
+ return 0;
+}
+
+static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
+ unsigned long len)
+{
+ bool found = false;
+ struct qedr_mm *mm;
+
+ mutex_lock(&uctx->mm_list_lock);
+ list_for_each_entry(mm, &uctx->mm_head, entry) {
+ if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+ continue;
+
+ found = true;
+ break;
+ }
+ mutex_unlock(&uctx->mm_list_lock);
+ DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+ "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
+ mm->key.phy_addr, mm->key.len, uctx, found);
+
+ return found;
+}
+
+struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ int rc;
+ struct qedr_ucontext *ctx;
+ struct qedr_alloc_ucontext_resp uresp;
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qed_rdma_add_user_out_params oparams;
+
+ if (!udata)
+ return ERR_PTR(-EFAULT);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
+ if (rc) {
+ DP_ERR(dev,
+ "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
+ rc);
+ goto err;
+ }
+
+ ctx->dpi = oparams.dpi;
+ ctx->dpi_addr = oparams.dpi_addr;
+ ctx->dpi_phys_addr = oparams.dpi_phys_addr;
+ ctx->dpi_size = oparams.dpi_size;
+ INIT_LIST_HEAD(&ctx->mm_head);
+ mutex_init(&ctx->mm_list_lock);
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.db_pa = ctx->dpi_phys_addr;
+ uresp.db_size = ctx->dpi_size;
+ uresp.max_send_wr = dev->attr.max_sqe;
+ uresp.max_recv_wr = dev->attr.max_rqe;
+ uresp.max_srq_wr = dev->attr.max_srq_wr;
+ uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+ uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
+ uresp.max_cqes = QEDR_MAX_CQES;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ goto err;
+
+ ctx->dev = dev;
+
+ rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
+ if (rc)
+ goto err;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
+ &ctx->ibucontext);
+ return &ctx->ibucontext;
+
+err:
+ kfree(ctx);
+ return ERR_PTR(rc);
+}
+
+int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
+ struct qedr_mm *mm, *tmp;
+ int status = 0;
+
+ DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
+ uctx);
+ uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
+
+ list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
+ DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
+ "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
+ mm->key.phy_addr, mm->key.len, uctx);
+ list_del(&mm->entry);
+ kfree(mm);
+ }
+
+ kfree(uctx);
+ return status;
+}
+
+int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
+ struct qedr_dev *dev = get_qedr_dev(context->device);
+ unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
+ u64 unmapped_db = dev->db_phys_addr;
+ unsigned long len = (vma->vm_end - vma->vm_start);
+ int rc = 0;
+ bool found;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
+ vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
+ if (vma->vm_start & (PAGE_SIZE - 1)) {
+ DP_ERR(dev, "Vma_start not page aligned = %ld\n",
+ vma->vm_start);
+ return -EINVAL;
+ }
+
+ found = qedr_search_mmap(ucontext, vm_page, len);
+ if (!found) {
+ DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
+ vma->vm_pgoff);
+ return -EINVAL;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
+
+ if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
+ dev->db_size))) {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
+ if (vma->vm_flags & VM_READ) {
+ DP_ERR(dev, "Trying to map doorbell bar for read\n");
+ return -EPERM;
+ }
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ PAGE_SIZE, vma->vm_page_prot);
+ } else {
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
+ rc = remap_pfn_range(vma, vma->vm_start,
+ vma->vm_pgoff, len, vma->vm_page_prot);
+ }
+ DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
+ return rc;
+}
+
+struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qedr_ucontext *uctx = NULL;
+ struct qedr_alloc_pd_uresp uresp;
+ struct qedr_pd *pd;
+ u16 pd_id;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
+ (udata && context) ? "User Lib" : "Kernel");
+
+ if (!dev->rdma_ctx) {
+ DP_ERR(dev, "invlaid RDMA context\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+
+ uresp.pd_id = pd_id;
+ pd->pd_id = pd_id;
+
+ if (udata && context) {
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
+ uctx = get_qedr_ucontext(context);
+ uctx->pd = pd;
+ pd->uctx = uctx;
+ }
+
+ return &pd->ibpd;
+}
+
+int qedr_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+
+ if (!pd)
+ pr_err("Invalid PD received in dealloc_pd\n");
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
+ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
+
+ kfree(pd);
+
+ return 0;
+}
+
+static void qedr_free_pbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int i;
+
+ for (i = 0; i < pbl_info->num_pbls; i++) {
+ if (!pbl[i].va)
+ continue;
+ dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+ pbl[i].va, pbl[i].pa);
+ }
+
+ kfree(pbl);
+}
+
+#define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
+#define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
+
+#define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
+#define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
+#define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
+
+static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info,
+ gfp_t flags)
+{
+ struct pci_dev *pdev = dev->pdev;
+ struct qedr_pbl *pbl_table;
+ dma_addr_t *pbl_main_tbl;
+ dma_addr_t pa;
+ void *va;
+ int i;
+
+ pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
+ if (!pbl_table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < pbl_info->num_pbls; i++) {
+ va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
+ &pa, flags);
+ if (!va)
+ goto err;
+
+ memset(va, 0, pbl_info->pbl_size);
+ pbl_table[i].va = va;
+ pbl_table[i].pa = pa;
+ }
+
+ /* Two-Layer PBLs, if we have more than one pbl we need to initialize
+ * the first one with physical pointers to all of the rest
+ */
+ pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
+ for (i = 0; i < pbl_info->num_pbls - 1; i++)
+ pbl_main_tbl[i] = pbl_table[i + 1].pa;
+
+ return pbl_table;
+
+err:
+ for (i--; i >= 0; i--)
+ dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
+ pbl_table[i].va, pbl_table[i].pa);
+
+ qedr_free_pbl(dev, pbl_info, pbl_table);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
+ struct qedr_pbl_info *pbl_info,
+ u32 num_pbes, int two_layer_capable)
+{
+ u32 pbl_capacity;
+ u32 pbl_size;
+ u32 num_pbls;
+
+ if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
+ if (num_pbes > MAX_PBES_TWO_LAYER) {
+ DP_ERR(dev, "prepare pbl table: too many pages %d\n",
+ num_pbes);
+ return -EINVAL;
+ }
+
+ /* calculate required pbl page size */
+ pbl_size = MIN_FW_PBL_PAGE_SIZE;
+ pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
+ NUM_PBES_ON_PAGE(pbl_size);
+
+ while (pbl_capacity < num_pbes) {
+ pbl_size *= 2;
+ pbl_capacity = pbl_size / sizeof(u64);
+ pbl_capacity = pbl_capacity * pbl_capacity;
+ }
+
+ num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
+ num_pbls++; /* One for the layer0 ( points to the pbls) */
+ pbl_info->two_layered = true;
+ } else {
+ /* One layered PBL */
+ num_pbls = 1;
+ pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
+ roundup_pow_of_two((num_pbes * sizeof(u64))));
+ pbl_info->two_layered = false;
+ }
+
+ pbl_info->num_pbls = num_pbls;
+ pbl_info->pbl_size = pbl_size;
+ pbl_info->num_pbes = num_pbes;
+
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
+ pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
+
+ return 0;
+}
+
+static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
+ struct qedr_pbl *pbl,
+ struct qedr_pbl_info *pbl_info)
+{
+ int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
+ struct qedr_pbl *pbl_tbl;
+ struct scatterlist *sg;
+ struct regpair *pbe;
+ int entry;
+ u32 addr;
+
+ if (!pbl_info->num_pbes)
+ return;
+
+ /* If we have a two layered pbl, the first pbl points to the rest
+ * of the pbls and the first entry lays on the second pbl in the table
+ */
+ if (pbl_info->two_layered)
+ pbl_tbl = &pbl[1];
+ else
+ pbl_tbl = pbl;
+
+ pbe = (struct regpair *)pbl_tbl->va;
+ if (!pbe) {
+ DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
+ return;
+ }
+
+ pbe_cnt = 0;
+
+ shift = ilog2(umem->page_size);
+
+ for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
+ pages = sg_dma_len(sg) >> shift;
+ for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
+ /* store the page address in pbe */
+ pbe->lo = cpu_to_le32(sg_dma_address(sg) +
+ umem->page_size * pg_cnt);
+ addr = upper_32_bits(sg_dma_address(sg) +
+ umem->page_size * pg_cnt);
+ pbe->hi = cpu_to_le32(addr);
+ pbe_cnt++;
+ total_num_pbes++;
+ pbe++;
+
+ if (total_num_pbes == pbl_info->num_pbes)
+ return;
+
+ /* If the given pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct regpair *)pbl_tbl->va;
+ pbe_cnt = 0;
+ }
+ }
+ }
+}
+
+static int qedr_copy_cq_uresp(struct qedr_dev *dev,
+ struct qedr_cq *cq, struct ib_udata *udata)
+{
+ struct qedr_create_cq_uresp uresp;
+ int rc;
+
+ memset(&uresp, 0, sizeof(uresp));
+
+ uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+ uresp.icid = cq->icid;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
+
+ return rc;
+}
+
+static void consume_cqe(struct qedr_cq *cq)
+{
+ if (cq->latest_cqe == cq->toggle_cqe)
+ cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+
+ cq->latest_cqe = qed_chain_consume(&cq->pbl);
+}
+
+static inline int qedr_align_cq_entries(int entries)
+{
+ u64 size, aligned_size;
+
+ /* We allocate an extra entry that we don't report to the FW. */
+ size = (entries + 1) * QEDR_CQE_SIZE;
+ aligned_size = ALIGN(size, PAGE_SIZE);
+
+ return aligned_size / QEDR_CQE_SIZE;
+}
+
+static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
+ struct qedr_dev *dev,
+ struct qedr_userq *q,
+ u64 buf_addr, size_t buf_len,
+ int access, int dmasync)
+{
+ int page_cnt;
+ int rc;
+
+ q->buf_addr = buf_addr;
+ q->buf_len = buf_len;
+ q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
+ if (IS_ERR(q->umem)) {
+ DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
+ PTR_ERR(q->umem));
+ return PTR_ERR(q->umem);
+ }
+
+ page_cnt = ib_umem_page_count(q->umem);
+ rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
+ if (rc)
+ goto err0;
+
+ q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(q->pbl_tbl))
+ goto err0;
+
+ qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
+
+ return 0;
+
+err0:
+ ib_umem_release(q->umem);
+
+ return rc;
+}
+
+static inline void qedr_init_cq_params(struct qedr_cq *cq,
+ struct qedr_ucontext *ctx,
+ struct qedr_dev *dev, int vector,
+ int chain_entries, int page_cnt,
+ u64 pbl_ptr,
+ struct qed_rdma_create_cq_in_params
+ *params)
+{
+ memset(params, 0, sizeof(*params));
+ params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
+ params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
+ params->cnq_id = vector;
+ params->cq_size = chain_entries - 1;
+ params->dpi = (ctx) ? ctx->dpi : dev->dpi;
+ params->pbl_num_pages = page_cnt;
+ params->pbl_ptr = pbl_ptr;
+ params->pbl_two_level = 0;
+}
+
+static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
+{
+ /* Flush data before signalling doorbell */
+ wmb();
+ cq->db.data.agg_flags = flags;
+ cq->db.data.value = cpu_to_le32(cons);
+ writeq(cq->db.raw, cq->db_addr);
+
+ /* Make sure write would stick */
+ mmiowb();
+}
+
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ unsigned long sflags;
+
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ return 0;
+
+ spin_lock_irqsave(&cq->cq_lock, sflags);
+
+ cq->arm_flags = 0;
+
+ if (flags & IB_CQ_SOLICITED)
+ cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
+
+ if (flags & IB_CQ_NEXT_COMP)
+ cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
+
+ doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+ spin_unlock_irqrestore(&cq->cq_lock, sflags);
+
+ return 0;
+}
+
+struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *ib_ctx, struct ib_udata *udata)
+{
+ struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
+ struct qed_rdma_destroy_cq_out_params destroy_oparams;
+ struct qed_rdma_destroy_cq_in_params destroy_iparams;
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+ struct qed_rdma_create_cq_in_params params;
+ struct qedr_create_cq_ureq ureq;
+ int vector = attr->comp_vector;
+ int entries = attr->cqe;
+ struct qedr_cq *cq;
+ int chain_entries;
+ int page_cnt;
+ u64 pbl_ptr;
+ u16 icid;
+ int rc;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT,
+ "create_cq: called from %s. entries=%d, vector=%d\n",
+ udata ? "User Lib" : "Kernel", entries, vector);
+
+ if (entries > QEDR_MAX_CQES) {
+ DP_ERR(dev,
+ "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
+ entries, QEDR_MAX_CQES);
+ return ERR_PTR(-EINVAL);
+ }
+
+ chain_entries = qedr_align_cq_entries(entries);
+ chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ return ERR_PTR(-ENOMEM);
+
+ if (udata) {
+ memset(&ureq, 0, sizeof(ureq));
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ DP_ERR(dev,
+ "create cq: problem copying data from user space\n");
+ goto err0;
+ }
+
+ if (!ureq.len) {
+ DP_ERR(dev,
+ "create cq: cannot create a cq with 0 entries\n");
+ goto err0;
+ }
+
+ cq->cq_type = QEDR_CQ_TYPE_USER;
+
+ rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
+ ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
+ if (rc)
+ goto err0;
+
+ pbl_ptr = cq->q.pbl_tbl->pa;
+ page_cnt = cq->q.pbl_info.num_pbes;
+ } else {
+ cq->cq_type = QEDR_CQ_TYPE_KERNEL;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ chain_entries,
+ sizeof(union rdma_cqe),
+ &cq->pbl);
+ if (rc)
+ goto err1;
+
+ page_cnt = qed_chain_get_page_cnt(&cq->pbl);
+ pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
+ }
+
+ qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
+ pbl_ptr, &params);
+
+ rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
+ if (rc)
+ goto err2;
+
+ cq->icid = icid;
+ cq->sig = QEDR_CQ_MAGIC_NUMBER;
+ spin_lock_init(&cq->cq_lock);
+
+ if (ib_ctx) {
+ rc = qedr_copy_cq_uresp(dev, cq, udata);
+ if (rc)
+ goto err3;
+ } else {
+ /* Generate doorbell address. */
+ cq->db_addr = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
+ cq->db.data.icid = cq->icid;
+ cq->db.data.params = DB_AGG_CMD_SET <<
+ RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
+
+ /* point to the very last element, passing it we will toggle */
+ cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
+ cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
+ cq->latest_cqe = NULL;
+ consume_cqe(cq);
+ cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
+ cq->icid, cq, params.cq_size);
+
+ return &cq->ibcq;
+
+err3:
+ destroy_iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
+ &destroy_oparams);
+err2:
+ if (udata)
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+ else
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+err1:
+ if (udata)
+ ib_umem_release(cq->q.umem);
+err0:
+ kfree(cq);
+ return ERR_PTR(-EINVAL);
+}
+
+int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+
+ DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
+
+ return 0;
+}
+
+int qedr_destroy_cq(struct ib_cq *ibcq)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qed_rdma_destroy_cq_out_params oparams;
+ struct qed_rdma_destroy_cq_in_params iparams;
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+
+ DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
+
+ /* GSIs CQs are handled by driver, so they don't exist in the FW */
+ if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
+ iparams.icid = cq->icid;
+ dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
+ }
+
+ if (ibcq->uobject && ibcq->uobject->context) {
+ qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
+ ib_umem_release(cq->q.umem);
+ }
+
+ kfree(cq);
+
+ return 0;
+}
+
+static inline int get_gid_info_from_table(struct ib_qp *ibqp,
+ struct ib_qp_attr *attr,
+ int attr_mask,
+ struct qed_rdma_modify_qp_in_params
+ *qp_params)
+{
+ enum rdma_network_type nw_type;
+ struct ib_gid_attr gid_attr;
+ union ib_gid gid;
+ u32 ipv4_addr;
+ int rc = 0;
+ int i;
+
+ rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
+ attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
+ if (rc)
+ return rc;
+
+ if (!memcmp(&gid, &zgid, sizeof(gid)))
+ return -ENOENT;
+
+ if (gid_attr.ndev) {
+ qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
+
+ dev_put(gid_attr.ndev);
+ nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
+ switch (nw_type) {
+ case RDMA_NETWORK_IPV6:
+ memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+ sizeof(qp_params->sgid));
+ memcpy(&qp_params->dgid.bytes[0],
+ &attr->ah_attr.grh.dgid,
+ sizeof(qp_params->dgid));
+ qp_params->roce_mode = ROCE_V2_IPV6;
+ SET_FIELD(qp_params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+ break;
+ case RDMA_NETWORK_IB:
+ memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
+ sizeof(qp_params->sgid));
+ memcpy(&qp_params->dgid.bytes[0],
+ &attr->ah_attr.grh.dgid,
+ sizeof(qp_params->dgid));
+ qp_params->roce_mode = ROCE_V1;
+ break;
+ case RDMA_NETWORK_IPV4:
+ memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
+ memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
+ ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
+ qp_params->sgid.ipv4_addr = ipv4_addr;
+ ipv4_addr =
+ qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
+ qp_params->dgid.ipv4_addr = ipv4_addr;
+ SET_FIELD(qp_params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
+ qp_params->roce_mode = ROCE_V2_IPV4;
+ break;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
+ qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
+ }
+
+ if (qp_params->vlan_id >= VLAN_CFI_MASK)
+ qp_params->vlan_id = 0;
+
+ return 0;
+}
+
+static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
+ ib_umem_release(qp->usq.umem);
+}
+
+static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
+ ib_umem_release(qp->urq.umem);
+}
+
+static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
+ kfree(qp->wqe_wr_id);
+}
+
+static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
+ kfree(qp->rqe_wr_id);
+}
+
+static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ struct qedr_device_attr *qattr = &dev->attr;
+
+ /* QP0... attrs->qp_type == IB_QPT_GSI */
+ if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: unsupported qp type=0x%x requested\n",
+ attrs->qp_type);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_wr > qattr->max_sqe) {
+ DP_ERR(dev,
+ "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
+ attrs->cap.max_send_wr, qattr->max_sqe);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_inline_data > qattr->max_inline) {
+ DP_ERR(dev,
+ "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
+ attrs->cap.max_inline_data, qattr->max_inline);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_send_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
+ attrs->cap.max_send_sge, qattr->max_sge);
+ return -EINVAL;
+ }
+
+ if (attrs->cap.max_recv_sge > qattr->max_sge) {
+ DP_ERR(dev,
+ "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
+ attrs->cap.max_recv_sge, qattr->max_sge);
+ return -EINVAL;
+ }
+
+ /* Unprivileged user space cannot create special QP */
+ if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
+ DP_ERR(dev,
+ "create qp: userspace can't create special QPs of type=0x%x\n",
+ attrs->qp_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
+{
+ uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ uresp->rq_icid = qp->icid;
+}
+
+static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
+ struct qedr_qp *qp)
+{
+ uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ uresp->sq_icid = qp->icid + 1;
+}
+
+static int qedr_copy_qp_uresp(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_udata *udata)
+{
+ struct qedr_create_qp_uresp uresp;
+ int rc;
+
+ memset(&uresp, 0, sizeof(uresp));
+ qedr_copy_sq_uresp(&uresp, qp);
+ qedr_copy_rq_uresp(&uresp, qp);
+
+ uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
+ uresp.qp_id = qp->qp_id;
+
+ rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (rc)
+ DP_ERR(dev,
+ "create qp: failed a copy to user space with qp icid=0x%x.\n",
+ qp->icid);
+
+ return rc;
+}
+
+static void qedr_set_qp_init_params(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qedr_pd *pd,
+ struct ib_qp_init_attr *attrs)
+{
+ qp->pd = pd;
+
+ spin_lock_init(&qp->q_lock);
+
+ qp->qp_type = attrs->qp_type;
+ qp->max_inline_data = attrs->cap.max_inline_data;
+ qp->sq.max_sges = attrs->cap.max_send_sge;
+ qp->state = QED_ROCE_QP_STATE_RESET;
+ qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+ qp->sq_cq = get_qedr_cq(attrs->send_cq);
+ qp->rq_cq = get_qedr_cq(attrs->recv_cq);
+ qp->dev = dev;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
+ pd->pd_id, qp->qp_type, qp->max_inline_data,
+ qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
+ qp->sq.max_sges, qp->sq_cq->icid);
+ qp->rq.max_sges = attrs->cap.max_recv_sge;
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
+ qp->rq.max_sges, qp->rq_cq->icid);
+}
+
+static inline void
+qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
+ struct qedr_create_qp_ureq *ureq)
+{
+ /* QP handle to be written in CQE */
+ params->qp_handle_lo = ureq->qp_handle_lo;
+ params->qp_handle_hi = ureq->qp_handle_hi;
+}
+
+static inline void
+qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qp->sq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
+ qp->sq.db_data.data.icid = qp->icid + 1;
+}
+
+static inline void
+qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ qp->rq.db = dev->db_addr +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
+ qp->rq.db_data.data.icid = qp->icid;
+}
+
+static inline int
+qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
+{
+ /* Allocate driver internal RQ array */
+ qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->rqe_wr_id)
+ return -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
+
+ return 0;
+}
+
+static inline int
+qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ u32 temp_max_wr;
+
+ /* Allocate driver internal SQ array */
+ temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
+ temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
+
+ /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
+ qp->sq.max_wr = (u16)temp_max_wr;
+ qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
+ GFP_KERNEL);
+ if (!qp->wqe_wr_id)
+ return -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
+
+ /* QP handle to be written in CQE */
+ params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
+ params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
+
+ return 0;
+}
+
+static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ u32 n_sq_elems, n_sq_entries;
+ int rc;
+
+ /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
+ * the ring. The ring should allow at least a single WR, even if the
+ * user requested none, due to allocation issues.
+ */
+ n_sq_entries = attrs->cap.max_send_wr;
+ n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
+ n_sq_entries = max_t(u32, n_sq_entries, 1);
+ n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ n_sq_elems,
+ QEDR_SQE_ELEMENT_SIZE,
+ &qp->sq.pbl);
+ if (rc) {
+ DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
+ return rc;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_SQ,
+ "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
+ qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
+ n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
+ return 0;
+}
+
+static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs)
+{
+ u32 n_rq_elems, n_rq_entries;
+ int rc;
+
+ /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
+ * the ring. There ring should allow at least a single WR, even if the
+ * user requested none, due to allocation issues.
+ */
+ n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
+ n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
+ rc = dev->ops->common->chain_alloc(dev->cdev,
+ QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U32,
+ n_rq_elems,
+ QEDR_RQE_ELEMENT_SIZE,
+ &qp->rq.pbl);
+
+ if (rc) {
+ DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
+ return -ENOMEM;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_RQ,
+ "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
+ qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
+ n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
+
+ /* n_rq_entries < u16 so the casting is safe */
+ qp->rq.max_wr = (u16)n_rq_entries;
+
+ return 0;
+}
+
+static inline void
+qedr_init_qp_in_params_sq(struct qedr_dev *dev,
+ struct qedr_pd *pd,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ /* QP handle to be written in an async event */
+ params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
+ params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
+
+ params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
+ params->fmr_and_reserved_lkey = !udata;
+ params->pd = pd->pd_id;
+ params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
+ params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
+ params->max_sq_sges = 0;
+ params->stats_queue = 0;
+
+ if (udata) {
+ params->sq_num_pages = qp->usq.pbl_info.num_pbes;
+ params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
+ } else {
+ params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
+ params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
+ }
+}
+
+static inline void
+qedr_init_qp_in_params_rq(struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
+ params->srq_id = 0;
+ params->use_srq = false;
+
+ if (udata) {
+ params->rq_num_pages = qp->urq.pbl_info.num_pbes;
+ params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
+ } else {
+ params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
+ params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
+ }
+}
+
+static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
+{
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
+ qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
+ qp->urq.buf_len);
+}
+
+static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
+ struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct qedr_create_qp_ureq *ureq)
+{
+ int rc;
+
+ /* SQ - read access only (0), dma sync not required (0) */
+ rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
+ ureq->sq_len, 0, 0);
+ if (rc)
+ return rc;
+
+ /* RQ - read access only (0), dma sync not required (0) */
+ rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
+ ureq->rq_len, 0, 0);
+
+ if (rc)
+ qedr_cleanup_user_sq(dev, qp);
+ return rc;
+}
+
+static inline int
+qedr_init_kernel_qp(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ struct qed_rdma_create_qp_in_params *params)
+{
+ int rc;
+
+ rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
+ if (rc) {
+ DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
+ return rc;
+ }
+
+ rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
+ if (rc) {
+ dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
+ DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
+ return rc;
+ }
+
+ rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
+ if (rc) {
+ qedr_cleanup_kernel_sq(dev, qp);
+ DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
+ return rc;
+ }
+
+ rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
+ if (rc) {
+ DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
+ qedr_cleanup_kernel_sq(dev, qp);
+ dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
+ return rc;
+ }
+
+ return rc;
+}
+
+struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qed_rdma_create_qp_out_params out_params;
+ struct qed_rdma_create_qp_in_params in_params;
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct ib_ucontext *ib_ctx = NULL;
+ struct qedr_ucontext *ctx = NULL;
+ struct qedr_create_qp_ureq ureq;
+ struct qedr_qp *qp;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
+ udata ? "user library" : "kernel", pd);
+
+ rc = qedr_check_qp_attrs(ibpd, dev, attrs);
+ if (rc)
+ return ERR_PTR(rc);
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ if (attrs->srq)
+ return ERR_PTR(-EINVAL);
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
+ get_qedr_cq(attrs->send_cq),
+ get_qedr_cq(attrs->send_cq)->icid,
+ get_qedr_cq(attrs->recv_cq),
+ get_qedr_cq(attrs->recv_cq)->icid);
+
+ qedr_set_qp_init_params(dev, qp, pd, attrs);
+
+ if (attrs->qp_type == IB_QPT_GSI) {
+ if (udata) {
+ DP_ERR(dev,
+ "create qp: unexpected udata when creating GSI QP\n");
+ goto err0;
+ }
+ return qedr_create_gsi_qp(dev, attrs, qp);
+ }
+
+ memset(&in_params, 0, sizeof(in_params));
+
+ if (udata) {
+ if (!(udata && ibpd->uobject && ibpd->uobject->context))
+ goto err0;
+
+ ib_ctx = ibpd->uobject->context;
+ ctx = get_qedr_ucontext(ib_ctx);
+
+ memset(&ureq, 0, sizeof(ureq));
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ DP_ERR(dev,
+ "create qp: problem copying data from user space\n");
+ goto err0;
+ }
+
+ rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
+ if (rc)
+ goto err0;
+
+ qedr_init_qp_user_params(&in_params, &ureq);
+ } else {
+ rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
+ if (rc)
+ goto err0;
+ }
+
+ qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
+ qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
+
+ qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
+ &in_params, &out_params);
+
+ if (!qp->qed_qp)
+ goto err1;
+
+ qp->qp_id = out_params.qp_id;
+ qp->icid = out_params.icid;
+ qp->ibqp.qp_num = qp->qp_id;
+
+ if (udata) {
+ rc = qedr_copy_qp_uresp(dev, qp, udata);
+ if (rc)
+ goto err2;
+
+ qedr_qp_user_print(dev, qp);
+ } else {
+ qedr_init_qp_kernel_doorbell_sq(dev, qp);
+ qedr_init_qp_kernel_doorbell_rq(dev, qp);
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
+ udata ? "user" : "kernel", qp);
+
+ return &qp->ibqp;
+
+err2:
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+ if (rc)
+ DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
+err1:
+ if (udata) {
+ qedr_cleanup_user_sq(dev, qp);
+ qedr_cleanup_user_rq(dev, qp);
+ } else {
+ qedr_cleanup_kernel_sq(dev, qp);
+ qedr_cleanup_kernel_rq(dev, qp);
+ }
+
+err0:
+ kfree(qp);
+
+ return ERR_PTR(-EFAULT);
+}
+
+enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+{
+ switch (qp_state) {
+ case QED_ROCE_QP_STATE_RESET:
+ return IB_QPS_RESET;
+ case QED_ROCE_QP_STATE_INIT:
+ return IB_QPS_INIT;
+ case QED_ROCE_QP_STATE_RTR:
+ return IB_QPS_RTR;
+ case QED_ROCE_QP_STATE_RTS:
+ return IB_QPS_RTS;
+ case QED_ROCE_QP_STATE_SQD:
+ return IB_QPS_SQD;
+ case QED_ROCE_QP_STATE_ERR:
+ return IB_QPS_ERR;
+ case QED_ROCE_QP_STATE_SQE:
+ return IB_QPS_SQE;
+ }
+ return IB_QPS_ERR;
+}
+
+enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
+{
+ switch (qp_state) {
+ case IB_QPS_RESET:
+ return QED_ROCE_QP_STATE_RESET;
+ case IB_QPS_INIT:
+ return QED_ROCE_QP_STATE_INIT;
+ case IB_QPS_RTR:
+ return QED_ROCE_QP_STATE_RTR;
+ case IB_QPS_RTS:
+ return QED_ROCE_QP_STATE_RTS;
+ case IB_QPS_SQD:
+ return QED_ROCE_QP_STATE_SQD;
+ case IB_QPS_ERR:
+ return QED_ROCE_QP_STATE_ERR;
+ default:
+ return QED_ROCE_QP_STATE_ERR;
+ }
+}
+
+static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
+{
+ qed_chain_reset(&qph->pbl);
+ qph->prod = 0;
+ qph->cons = 0;
+ qph->wqe_cons = 0;
+ qph->db_data.data.value = cpu_to_le16(0);
+}
+
+static int qedr_update_qp_state(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ enum qed_roce_qp_state new_state)
+{
+ int status = 0;
+
+ if (new_state == qp->state)
+ return 1;
+
+ switch (qp->state) {
+ case QED_ROCE_QP_STATE_RESET:
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_INIT:
+ qp->prev_wqe_size = 0;
+ qedr_reset_qp_hwq_info(&qp->sq);
+ qedr_reset_qp_hwq_info(&qp->rq);
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_INIT:
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTR:
+ /* Update doorbell (in case post_recv was
+ * done before move to RTR)
+ */
+ wmb();
+ writel(qp->rq.db_data.raw, qp->rq.db);
+ /* Make sure write takes effect */
+ mmiowb();
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_RTR:
+ /* RTR->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTS:
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_RTS:
+ /* RTS->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_SQD:
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_SQD:
+ /* SQD->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RTS:
+ case QED_ROCE_QP_STATE_ERR:
+ break;
+ default:
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+ };
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ /* ERR->XXX */
+ switch (new_state) {
+ case QED_ROCE_QP_STATE_RESET:
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ };
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ };
+
+ return status;
+}
+
+int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qed_rdma_modify_qp_in_params qp_params = { 0 };
+ struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
+ enum ib_qp_state old_qp_state, new_qp_state;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP,
+ "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
+ attr->qp_state);
+
+ old_qp_state = qedr_get_ibqp_state(qp->state);
+ if (attr_mask & IB_QP_STATE)
+ new_qp_state = attr->qp_state;
+ else
+ new_qp_state = old_qp_state;
+
+ if (!ib_modify_qp_is_ok
+ (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
+ IB_LINK_LAYER_ETHERNET)) {
+ DP_ERR(dev,
+ "modify qp: invalid attribute mask=0x%x specified for\n"
+ "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
+ attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
+ new_qp_state);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* Translate the masks... */
+ if (attr_mask & IB_QP_STATE) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
+ qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
+ }
+
+ if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+ qp_params.sqd_async = true;
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
+ if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ qp->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
+ qp_params.incoming_rdma_read_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_READ;
+ qp_params.incoming_rdma_write_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_WRITE;
+ qp_params.incoming_atomic_en = attr->qp_access_flags &
+ IB_ACCESS_REMOTE_ATOMIC;
+ }
+
+ if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+ if (attr_mask & IB_QP_PATH_MTU) {
+ if (attr->path_mtu < IB_MTU_256 ||
+ attr->path_mtu > IB_MTU_4096) {
+ pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
+ rc = -EINVAL;
+ goto err;
+ }
+ qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
+ ib_mtu_enum_to_int(iboe_get_mtu
+ (dev->ndev->mtu)));
+ }
+
+ if (!qp->mtu) {
+ qp->mtu =
+ ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+ pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
+
+ qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
+ qp_params.flow_label = attr->ah_attr.grh.flow_label;
+ qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
+
+ qp->sgid_idx = attr->ah_attr.grh.sgid_index;
+
+ rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
+ if (rc) {
+ DP_ERR(dev,
+ "modify qp: problems with GID index %d (rc=%d)\n",
+ attr->ah_attr.grh.sgid_index, rc);
+ return rc;
+ }
+
+ rc = qedr_get_dmac(dev, &attr->ah_attr,
+ qp_params.remote_mac_addr);
+ if (rc)
+ return rc;
+
+ qp_params.use_local_mac = true;
+ ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
+ qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
+ qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
+ DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
+ qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
+ qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
+ DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
+ qp_params.remote_mac_addr);
+;
+
+ qp_params.mtu = qp->mtu;
+ qp_params.lb_indication = false;
+ }
+
+ if (!qp_params.mtu) {
+ /* Stay with current MTU */
+ if (qp->mtu)
+ qp_params.mtu = qp->mtu;
+ else
+ qp_params.mtu =
+ ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
+ }
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
+
+ qp_params.ack_timeout = attr->timeout;
+ if (attr->timeout) {
+ u32 temp;
+
+ temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
+ /* FW requires [msec] */
+ qp_params.ack_timeout = temp;
+ } else {
+ /* Infinite */
+ qp_params.ack_timeout = 0;
+ }
+ }
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
+ qp_params.retry_cnt = attr->retry_cnt;
+ }
+
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
+ qp_params.rnr_retry_cnt = attr->rnr_retry;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
+ qp_params.rq_psn = attr->rq_psn;
+ qp->rq_psn = attr->rq_psn;
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
+ rc = -EINVAL;
+ DP_ERR(dev,
+ "unsupported max_rd_atomic=%d, supported=%d\n",
+ attr->max_rd_atomic,
+ dev->attr.max_qp_req_rd_atomic_resc);
+ goto err;
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
+ qp_params.max_rd_atomic_req = attr->max_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
+ qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
+ }
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
+ qp_params.sq_psn = attr->sq_psn;
+ qp->sq_psn = attr->sq_psn;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (attr->max_dest_rd_atomic >
+ dev->attr.max_qp_resp_rd_atomic_resc) {
+ DP_ERR(dev,
+ "unsupported max_dest_rd_atomic=%d, supported=%d\n",
+ attr->max_dest_rd_atomic,
+ dev->attr.max_qp_resp_rd_atomic_resc);
+
+ rc = -EINVAL;
+ goto err;
+ }
+
+ SET_FIELD(qp_params.modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
+ qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_DEST_QPN) {
+ SET_FIELD(qp_params.modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
+
+ qp_params.dest_qp = attr->dest_qp_num;
+ qp->dest_qp_num = attr->dest_qp_num;
+ }
+
+ if (qp->qp_type != IB_QPT_GSI)
+ rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
+ qp->qed_qp, &qp_params);
+
+ if (attr_mask & IB_QP_STATE) {
+ if ((qp->qp_type != IB_QPT_GSI) && (!udata))
+ qedr_update_qp_state(dev, qp, qp_params.new_state);
+ qp->state = qp_params.new_state;
+ }
+
+err:
+ return rc;
+}
+
+static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
+{
+ int ib_qp_acc_flags = 0;
+
+ if (params->incoming_rdma_write_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (params->incoming_rdma_read_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
+ if (params->incoming_atomic_en)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
+ return ib_qp_acc_flags;
+}
+
+int qedr_query_qp(struct ib_qp *ibqp,
+ struct ib_qp_attr *qp_attr,
+ int attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct qed_rdma_query_qp_out_params params;
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ int rc = 0;
+
+ memset(&params, 0, sizeof(params));
+
+ rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
+ if (rc)
+ goto err;
+
+ memset(qp_attr, 0, sizeof(*qp_attr));
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+
+ qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+ qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
+ qp_attr->path_mtu = iboe_get_mtu(params.mtu);
+ qp_attr->path_mig_state = IB_MIG_MIGRATED;
+ qp_attr->rq_psn = params.rq_psn;
+ qp_attr->sq_psn = params.sq_psn;
+ qp_attr->dest_qp_num = params.dest_qp;
+
+ qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
+
+ qp_attr->cap.max_send_wr = qp->sq.max_wr;
+ qp_attr->cap.max_recv_wr = qp->rq.max_wr;
+ qp_attr->cap.max_send_sge = qp->sq.max_sges;
+ qp_attr->cap.max_recv_sge = qp->rq.max_sges;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+ qp_init_attr->cap = qp_attr->cap;
+
+ memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
+ sizeof(qp_attr->ah_attr.grh.dgid.raw));
+
+ qp_attr->ah_attr.grh.flow_label = params.flow_label;
+ qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
+ qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
+ qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
+
+ qp_attr->ah_attr.ah_flags = IB_AH_GRH;
+ qp_attr->ah_attr.port_num = 1;
+ qp_attr->ah_attr.sl = 0;
+ qp_attr->timeout = params.timeout;
+ qp_attr->rnr_retry = params.rnr_retry;
+ qp_attr->retry_cnt = params.retry_cnt;
+ qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
+ qp_attr->pkey_index = params.pkey_index;
+ qp_attr->port_num = 1;
+ qp_attr->ah_attr.src_path_bits = 0;
+ qp_attr->ah_attr.static_rate = 0;
+ qp_attr->alt_pkey_index = 0;
+ qp_attr->alt_port_num = 0;
+ qp_attr->alt_timeout = 0;
+ memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
+
+ qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
+ qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
+ qp_attr->max_rd_atomic = params.max_rd_atomic;
+ qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
+ qp_attr->cap.max_inline_data);
+
+err:
+ return rc;
+}
+
+int qedr_destroy_qp(struct ib_qp *ibqp)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ struct ib_qp_attr attr;
+ int attr_mask = 0;
+ int rc = 0;
+
+ DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
+ qp, qp->qp_type);
+
+ if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
+ QED_ROCE_QP_STATE_INIT)) {
+ attr.qp_state = IB_QPS_ERR;
+ attr_mask |= IB_QP_STATE;
+
+ /* Change the QP state to ERROR */
+ qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
+ }
+
+ if (qp->qp_type != IB_QPT_GSI) {
+ rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
+ if (rc)
+ return rc;
+ } else {
+ qedr_destroy_gsi_qp(dev);
+ }
+
+ if (ibqp->uobject && ibqp->uobject->context) {
+ qedr_cleanup_user_sq(dev, qp);
+ qedr_cleanup_user_rq(dev, qp);
+ } else {
+ qedr_cleanup_kernel_sq(dev, qp);
+ qedr_cleanup_kernel_rq(dev, qp);
+ }
+
+ kfree(qp);
+
+ return rc;
+}
+
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
+{
+ struct qedr_ah *ah;
+
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ ah->attr = *attr;
+
+ return &ah->ibah;
+}
+
+int qedr_destroy_ah(struct ib_ah *ibah)
+{
+ struct qedr_ah *ah = get_qedr_ah(ibah);
+
+ kfree(ah);
+ return 0;
+}
+
+static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
+{
+ struct qedr_pbl *pbl, *tmp;
+
+ if (info->pbl_table)
+ list_add_tail(&info->pbl_table->list_entry,
+ &info->free_pbl_list);
+
+ if (!list_empty(&info->inuse_pbl_list))
+ list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
+
+ list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
+ list_del(&pbl->list_entry);
+ qedr_free_pbl(dev, &info->pbl_info, pbl);
+ }
+}
+
+static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
+ size_t page_list_len, bool two_layered)
+{
+ struct qedr_pbl *tmp;
+ int rc;
+
+ INIT_LIST_HEAD(&info->free_pbl_list);
+ INIT_LIST_HEAD(&info->inuse_pbl_list);
+
+ rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
+ page_list_len, two_layered);
+ if (rc)
+ goto done;
+
+ info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+ if (!info->pbl_table) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
+ &info->pbl_table->pa);
+
+ /* in usual case we use 2 PBLs, so we add one to free
+ * list and allocating another one
+ */
+ tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
+ if (!tmp) {
+ DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
+ goto done;
+ }
+
+ list_add_tail(&tmp->list_entry, &info->free_pbl_list);
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
+
+done:
+ if (rc)
+ free_mr_info(dev, info);
+
+ return rc;
+}
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 usr_addr, int acc, struct ib_udata *udata)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_mr *mr;
+ struct qedr_pd *pd;
+ int rc = -ENOMEM;
+
+ pd = get_qedr_pd(ibpd);
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
+ pd->pd_id, start, len, usr_addr, acc);
+
+ if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(rc);
+
+ mr->type = QEDR_MR_USER;
+
+ mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
+ if (IS_ERR(mr->umem)) {
+ rc = -EFAULT;
+ goto err0;
+ }
+
+ rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
+ if (rc)
+ goto err1;
+
+ qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
+ &mr->info.pbl_info);
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ goto err1;
+ }
+
+ /* Index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+ mr->hw_mr.key = 0;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hw_mr.mw_bind = false;
+ mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
+ mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+ mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+ mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
+ mr->hw_mr.fbo = ib_umem_offset(mr->umem);
+ mr->hw_mr.length = len;
+ mr->hw_mr.vaddr = usr_addr;
+ mr->hw_mr.zbva = false;
+ mr->hw_mr.phy_mr = false;
+ mr->hw_mr.dma_mr = false;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err2;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+ mr->hw_mr.remote_atomic)
+ mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
+ mr->ibmr.lkey);
+ return &mr->ibmr;
+
+err2:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+err0:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+int qedr_dereg_mr(struct ib_mr *ib_mr)
+{
+ struct qedr_mr *mr = get_qedr_mr(ib_mr);
+ struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
+ int rc = 0;
+
+ rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
+ if (rc)
+ return rc;
+
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+
+ if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
+ qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
+
+ /* it could be user registered memory. */
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ kfree(mr);
+
+ return rc;
+}
+
+struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
+{
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_mr *mr;
+ int rc = -ENOMEM;
+
+ DP_DEBUG(dev, QEDR_MSG_MR,
+ "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
+ max_page_list_len);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(rc);
+
+ mr->dev = dev;
+ mr->type = QEDR_MR_FRMR;
+
+ rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
+ if (rc)
+ goto err0;
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ goto err0;
+ }
+
+ /* Index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
+ mr->hw_mr.key = 0;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = 0;
+ mr->hw_mr.remote_read = 0;
+ mr->hw_mr.remote_write = 0;
+ mr->hw_mr.remote_atomic = 0;
+ mr->hw_mr.mw_bind = false;
+ mr->hw_mr.pbl_ptr = 0;
+ mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
+ mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
+ mr->hw_mr.fbo = 0;
+ mr->hw_mr.length = 0;
+ mr->hw_mr.vaddr = 0;
+ mr->hw_mr.zbva = false;
+ mr->hw_mr.phy_mr = true;
+ mr->hw_mr.dma_mr = false;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err1;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ mr->ibmr.rkey = mr->ibmr.lkey;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
+ return mr;
+
+err1:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err0:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
+ enum ib_mr_type mr_type, u32 max_num_sg)
+{
+ struct qedr_dev *dev;
+ struct qedr_mr *mr;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = __qedr_alloc_mr(ibpd, max_num_sg);
+
+ if (IS_ERR(mr))
+ return ERR_PTR(-EINVAL);
+
+ dev = mr->dev;
+
+ return &mr->ibmr;
+}
+
+static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct qedr_mr *mr = get_qedr_mr(ibmr);
+ struct qedr_pbl *pbl_table;
+ struct regpair *pbe;
+ u32 pbes_in_page;
+
+ if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
+ DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
+ return -ENOMEM;
+ }
+
+ DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
+ mr->npages, addr);
+
+ pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
+ pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
+ pbe = (struct regpair *)pbl_table->va;
+ pbe += mr->npages % pbes_in_page;
+ pbe->lo = cpu_to_le32((u32)addr);
+ pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
+
+ mr->npages++;
+
+ return 0;
+}
+
+static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
+{
+ int work = info->completed - info->completed_handled - 1;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
+ while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
+ struct qedr_pbl *pbl;
+
+ /* Free all the page list that are possible to be freed
+ * (all the ones that were invalidated), under the assumption
+ * that if an FMR was completed successfully that means that
+ * if there was an invalidate operation before it also ended
+ */
+ pbl = list_first_entry(&info->inuse_pbl_list,
+ struct qedr_pbl, list_entry);
+ list_del(&pbl->list_entry);
+ list_add_tail(&pbl->list_entry, &info->free_pbl_list);
+ info->completed_handled++;
+ }
+}
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct qedr_mr *mr = get_qedr_mr(ibmr);
+
+ mr->npages = 0;
+
+ handle_completed_mrs(mr->dev, &mr->info);
+ return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
+}
+
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+ struct qedr_pd *pd = get_qedr_pd(ibpd);
+ struct qedr_mr *mr;
+ int rc;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->type = QEDR_MR_DMA;
+
+ rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
+ if (rc) {
+ DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
+ goto err1;
+ }
+
+ /* index only, 18 bit long, lkey = itid << 8 | key */
+ mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
+ mr->hw_mr.pd = pd->pd_id;
+ mr->hw_mr.local_read = 1;
+ mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hw_mr.dma_mr = true;
+
+ rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
+ if (rc) {
+ DP_ERR(dev, "roce register tid returned an error %d\n", rc);
+ goto err2;
+ }
+
+ mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+ if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
+ mr->hw_mr.remote_atomic)
+ mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
+
+ DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
+ return &mr->ibmr;
+
+err2:
+ dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
+err1:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
+{
+ return (((wq->prod + 1) % wq->max_wr) == wq->cons);
+}
+
+static int sge_data_len(struct ib_sge *sg_list, int num_sge)
+{
+ int i, len = 0;
+
+ for (i = 0; i < num_sge; i++)
+ len += sg_list[i].length;
+
+ return len;
+}
+
+static void swap_wqe_data64(u64 *p)
+{
+ int i;
+
+ for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
+ *p = cpu_to_be64(cpu_to_le64(*p));
+}
+
+static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
+ struct qedr_qp *qp, u8 *wqe_size,
+ struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr, u8 *bits,
+ u8 bit)
+{
+ u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
+ char *seg_prt, *wqe;
+ int i, seg_siz;
+
+ if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
+ DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
+ *bad_wr = wr;
+ return 0;
+ }
+
+ if (!data_size)
+ return data_size;
+
+ *bits |= bit;
+
+ seg_prt = NULL;
+ wqe = NULL;
+ seg_siz = 0;
+
+ /* Copy data inline */
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 len = wr->sg_list[i].length;
+ void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
+
+ while (len > 0) {
+ u32 cur;
+
+ /* New segment required */
+ if (!seg_siz) {
+ wqe = (char *)qed_chain_produce(&qp->sq.pbl);
+ seg_prt = wqe;
+ seg_siz = sizeof(struct rdma_sq_common_wqe);
+ (*wqe_size)++;
+ }
+
+ /* Calculate currently allowed length */
+ cur = min_t(u32, len, seg_siz);
+ memcpy(seg_prt, src, cur);
+
+ /* Update segment variables */
+ seg_prt += cur;
+ seg_siz -= cur;
+
+ /* Update sge variables */
+ src += cur;
+ len -= cur;
+
+ /* Swap fully-completed segments */
+ if (!seg_siz)
+ swap_wqe_data64((u64 *)wqe);
+ }
+ }
+
+ /* swap last not completed segment */
+ if (seg_siz)
+ swap_wqe_data64((u64 *)wqe);
+
+ return data_size;
+}
+
+#define RQ_SGE_SET(sge, vaddr, vlength, vflags) \
+ do { \
+ DMA_REGPAIR_LE(sge->addr, vaddr); \
+ (sge)->length = cpu_to_le32(vlength); \
+ (sge)->flags = cpu_to_le32(vflags); \
+ } while (0)
+
+#define SRQ_HDR_SET(hdr, vwr_id, num_sge) \
+ do { \
+ DMA_REGPAIR_LE(hdr->wr_id, vwr_id); \
+ (hdr)->num_sges = num_sge; \
+ } while (0)
+
+#define SRQ_SGE_SET(sge, vaddr, vlength, vlkey) \
+ do { \
+ DMA_REGPAIR_LE(sge->addr, vaddr); \
+ (sge)->length = cpu_to_le32(vlength); \
+ (sge)->l_key = cpu_to_le32(vlkey); \
+ } while (0)
+
+static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
+ struct ib_send_wr *wr)
+{
+ u32 data_size = 0;
+ int i;
+
+ for (i = 0; i < wr->num_sge; i++) {
+ struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
+
+ DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
+ sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
+ sge->length = cpu_to_le32(wr->sg_list[i].length);
+ data_size += wr->sg_list[i].length;
+ }
+
+ if (wqe_size)
+ *wqe_size += wr->num_sge;
+
+ return data_size;
+}
+
+static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct rdma_sq_rdma_wqe_1st *rwqe,
+ struct rdma_sq_rdma_wqe_2nd *rwqe2,
+ struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
+ DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
+
+ if (wr->send_flags & IB_SEND_INLINE) {
+ u8 flags = 0;
+
+ SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
+ return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
+ bad_wr, &rwqe->flags, flags);
+ }
+
+ return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
+}
+
+static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
+ struct qedr_qp *qp,
+ struct rdma_sq_send_wqe_1st *swqe,
+ struct rdma_sq_send_wqe_2st *swqe2,
+ struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ memset(swqe2, 0, sizeof(*swqe2));
+ if (wr->send_flags & IB_SEND_INLINE) {
+ u8 flags = 0;
+
+ SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
+ return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
+ bad_wr, &swqe->flags, flags);
+ }
+
+ return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
+}
+
+static int qedr_prepare_reg(struct qedr_qp *qp,
+ struct rdma_sq_fmr_wqe_1st *fwqe1,
+ struct ib_reg_wr *wr)
+{
+ struct qedr_mr *mr = get_qedr_mr(wr->mr);
+ struct rdma_sq_fmr_wqe_2nd *fwqe2;
+
+ fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
+ fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
+ fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
+ fwqe1->l_key = wr->key;
+
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
+ !!(wr->access & IB_ACCESS_REMOTE_READ));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
+ !!(wr->access & IB_ACCESS_REMOTE_WRITE));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
+ !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
+ SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
+ !!(wr->access & IB_ACCESS_LOCAL_WRITE));
+ fwqe2->fmr_ctrl = 0;
+
+ SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
+ ilog2(mr->ibmr.page_size) - 12);
+
+ fwqe2->length_hi = 0;
+ fwqe2->length_lo = mr->ibmr.length;
+ fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
+ fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
+
+ qp->wqe_wr_id[qp->sq.prod].mr = mr;
+
+ return 0;
+}
+
+enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+{
+ switch (opcode) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return IB_WC_RDMA_WRITE;
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ return IB_WC_SEND;
+ case IB_WR_RDMA_READ:
+ return IB_WC_RDMA_READ;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ return IB_WC_COMP_SWAP;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ return IB_WC_FETCH_ADD;
+ case IB_WR_REG_MR:
+ return IB_WC_REG_MR;
+ case IB_WR_LOCAL_INV:
+ return IB_WC_LOCAL_INV;
+ default:
+ return IB_WC_SEND;
+ }
+}
+
+inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+{
+ int wq_is_full, err_wr, pbl_is_full;
+ struct qedr_dev *dev = qp->dev;
+
+ /* prevent SQ overflow and/or processing of a bad WR */
+ err_wr = wr->num_sge > qp->sq.max_sges;
+ wq_is_full = qedr_wq_is_full(&qp->sq);
+ pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
+ QEDR_MAX_SQE_ELEMENTS_PER_SQE;
+ if (wq_is_full || err_wr || pbl_is_full) {
+ if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
+ DP_ERR(dev,
+ "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
+ }
+
+ if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
+ DP_ERR(dev,
+ "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
+ }
+
+ if (pbl_is_full &&
+ !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
+ DP_ERR(dev,
+ "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
+ qp);
+ qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
+ }
+ return false;
+ }
+ return true;
+}
+
+int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct rdma_sq_atomic_wqe_1st *awqe1;
+ struct rdma_sq_atomic_wqe_2nd *awqe2;
+ struct rdma_sq_atomic_wqe_3rd *awqe3;
+ struct rdma_sq_send_wqe_2st *swqe2;
+ struct rdma_sq_local_inv_wqe *iwqe;
+ struct rdma_sq_rdma_wqe_2nd *rwqe2;
+ struct rdma_sq_send_wqe_1st *swqe;
+ struct rdma_sq_rdma_wqe_1st *rwqe;
+ struct rdma_sq_fmr_wqe_1st *fwqe1;
+ struct rdma_sq_common_wqe *wqe;
+ u32 length;
+ int rc = 0;
+ bool comp;
+
+ if (!qedr_can_post_send(qp, wr)) {
+ *bad_wr = wr;
+ return -ENOMEM;
+ }
+
+ wqe = qed_chain_produce(&qp->sq.pbl);
+ qp->wqe_wr_id[qp->sq.prod].signaled =
+ !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
+
+ wqe->flags = 0;
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
+ !!(wr->send_flags & IB_SEND_SOLICITED));
+ comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
+ SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
+ !!(wr->send_flags & IB_SEND_FENCE));
+ wqe->prev_wqe_size = qp->prev_wqe_size;
+
+ qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
+
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+ swqe->wqe_size = 2;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+
+ swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+ case IB_WR_SEND:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+
+ swqe->wqe_size = 2;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
+ swqe = (struct rdma_sq_send_wqe_1st *)wqe;
+ swqe2 = qed_chain_produce(&qp->sq.pbl);
+ swqe->wqe_size = 2;
+ swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
+ length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
+ wr, bad_wr);
+ swqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
+ qp->prev_wqe_size = swqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+ case IB_WR_RDMA_WRITE:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ DP_ERR(dev,
+ "RDMA READ WITH INVALIDATE not supported\n");
+ *bad_wr = wr;
+ rc = -EINVAL;
+ break;
+
+ case IB_WR_RDMA_READ:
+ wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
+ rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
+
+ rwqe->wqe_size = 2;
+ rwqe2 = qed_chain_produce(&qp->sq.pbl);
+ length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
+ wr, bad_wr);
+ rwqe->length = cpu_to_le32(length);
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
+ qp->prev_wqe_size = rwqe->wqe_size;
+ qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
+ awqe1->wqe_size = 4;
+
+ awqe2 = qed_chain_produce(&qp->sq.pbl);
+ DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
+ awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
+
+ awqe3 = qed_chain_produce(&qp->sq.pbl);
+
+ if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
+ DMA_REGPAIR_LE(awqe3->swap_data,
+ atomic_wr(wr)->compare_add);
+ } else {
+ wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
+ DMA_REGPAIR_LE(awqe3->swap_data,
+ atomic_wr(wr)->swap);
+ DMA_REGPAIR_LE(awqe3->cmp_data,
+ atomic_wr(wr)->compare_add);
+ }
+
+ qedr_prepare_sq_sges(qp, NULL, wr);
+
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
+ qp->prev_wqe_size = awqe1->wqe_size;
+ break;
+
+ case IB_WR_LOCAL_INV:
+ iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
+ iwqe->wqe_size = 1;
+
+ iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
+ iwqe->inv_l_key = wr->ex.invalidate_rkey;
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
+ qp->prev_wqe_size = iwqe->wqe_size;
+ break;
+ case IB_WR_REG_MR:
+ DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
+ wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
+ fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
+ fwqe1->wqe_size = 2;
+
+ rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
+ if (rc) {
+ DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
+ *bad_wr = wr;
+ break;
+ }
+
+ qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
+ qp->prev_wqe_size = fwqe1->wqe_size;
+ break;
+ default:
+ DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
+ rc = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ if (*bad_wr) {
+ u16 value;
+
+ /* Restore prod to its position before
+ * this WR was processed
+ */
+ value = le16_to_cpu(qp->sq.db_data.data.value);
+ qed_chain_set_prod(&qp->sq.pbl, value, wqe);
+
+ /* Restore prev_wqe_size */
+ qp->prev_wqe_size = wqe->prev_wqe_size;
+ rc = -EINVAL;
+ DP_ERR(dev, "POST SEND FAILED\n");
+ }
+
+ return rc;
+}
+
+int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibqp->device);
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ unsigned long flags;
+ int rc = 0;
+
+ *bad_wr = NULL;
+
+ if (qp->qp_type == IB_QPT_GSI)
+ return qedr_gsi_post_send(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
+ (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ DP_DEBUG(dev, QEDR_MSG_CQ,
+ "QP in wrong state! QP icid=0x%x state %d\n",
+ qp->icid, qp->state);
+ return -EINVAL;
+ }
+
+ if (!wr) {
+ DP_ERR(dev, "Got an empty post send.\n");
+ return -EINVAL;
+ }
+
+ while (wr) {
+ rc = __qedr_post_send(ibqp, wr, bad_wr);
+ if (rc)
+ break;
+
+ qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
+
+ qedr_inc_sw_prod(&qp->sq);
+
+ qp->sq.db_data.data.value++;
+
+ wr = wr->next;
+ }
+
+ /* Trigger doorbell
+ * If there was a failure in the first WR then it will be triggered in
+ * vane. However this is not harmful (as long as the producer value is
+ * unchanged). For performance reasons we avoid checking for this
+ * redundant doorbell.
+ */
+ wmb();
+ writel(qp->sq.db_data.raw, qp->sq.db);
+
+ /* Make sure write sticks */
+ mmiowb();
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return rc;
+}
+
+int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct qedr_qp *qp = get_qedr_qp(ibqp);
+ struct qedr_dev *dev = qp->dev;
+ unsigned long flags;
+ int status = 0;
+
+ if (qp->qp_type == IB_QPT_GSI)
+ return qedr_gsi_post_recv(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
+ (qp->state == QED_ROCE_QP_STATE_ERR)) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ while (wr) {
+ int i;
+
+ if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
+ QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
+ wr->num_sge > qp->rq.max_sges) {
+ DP_ERR(dev, "Can't post WR (%d < %d) || (%d > %d)\n",
+ qed_chain_get_elem_left_u32(&qp->rq.pbl),
+ QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
+ qp->rq.max_sges);
+ status = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+ for (i = 0; i < wr->num_sge; i++) {
+ u32 flags = 0;
+ struct rdma_rq_sge *rqe =
+ qed_chain_produce(&qp->rq.pbl);
+
+ /* First one must include the number
+ * of SGE in the list
+ */
+ if (!i)
+ SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
+ wr->num_sge);
+
+ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
+ wr->sg_list[i].lkey);
+
+ RQ_SGE_SET(rqe, wr->sg_list[i].addr,
+ wr->sg_list[i].length, flags);
+ }
+
+ /* Special case of no sges. FW requires between 1-4 sges...
+ * in this case we need to post 1 sge with length zero. this is
+ * because rdma write with immediate consumes an RQ.
+ */
+ if (!wr->num_sge) {
+ u32 flags = 0;
+ struct rdma_rq_sge *rqe =
+ qed_chain_produce(&qp->rq.pbl);
+
+ /* First one must include the number
+ * of SGE in the list
+ */
+ SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0);
+ SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
+
+ RQ_SGE_SET(rqe, 0, 0, flags);
+ i = 1;
+ }
+
+ qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id;
+ qp->rqe_wr_id[qp->rq.prod].wqe_size = i;
+
+ qedr_inc_sw_prod(&qp->rq);
+
+ /* Flush all the writes before signalling doorbell */
+ wmb();
+
+ qp->rq.db_data.data.value++;
+
+ writel(qp->rq.db_data.raw, qp->rq.db);
+
+ /* Make sure write sticks */
+ mmiowb();
+
+ wr = wr->next;
+ }
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ return status;
+}
+
+static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+ return (resp_cqe->flags & RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK) ==
+ cq->pbl_toggle;
+}
+
+static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+ struct qedr_qp *qp;
+
+ qp = (struct qedr_qp *)(uintptr_t)HILO_GEN(resp_cqe->qp_handle.hi,
+ resp_cqe->qp_handle.lo,
+ u64);
+ return qp;
+}
+
+static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
+{
+ struct rdma_cqe_requester *resp_cqe = &cqe->req;
+
+ return GET_FIELD(resp_cqe->flags, RDMA_CQE_REQUESTER_TYPE);
+}
+
+/* Return latest CQE (needs processing) */
+static union rdma_cqe *get_cqe(struct qedr_cq *cq)
+{
+ return cq->latest_cqe;
+}
+
+/* In fmr we need to increase the number of fmr completed counter for the fmr
+ * algorithm determining whether we can free a pbl or not.
+ * we need to perform this whether the work request was signaled or not. for
+ * this purpose we call this function from the condition that checks if a wr
+ * should be skipped, to make sure we don't miss it ( possibly this fmr
+ * operation was not signalted)
+ */
+static inline void qedr_chk_if_fmr(struct qedr_qp *qp)
+{
+ if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR)
+ qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+}
+
+static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, int num_entries,
+ struct ib_wc *wc, u16 hw_cons, enum ib_wc_status status,
+ int force)
+{
+ u16 cnt = 0;
+
+ while (num_entries && qp->sq.wqe_cons != hw_cons) {
+ if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) {
+ qedr_chk_if_fmr(qp);
+ /* skip WC */
+ goto next_cqe;
+ }
+
+ /* fill WC */
+ wc->status = status;
+ wc->wc_flags = 0;
+ wc->src_qp = qp->id;
+ wc->qp = &qp->ibqp;
+
+ wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id;
+ wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode;
+
+ switch (wc->opcode) {
+ case IB_WC_RDMA_WRITE:
+ wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
+ break;
+ case IB_WC_COMP_SWAP:
+ case IB_WC_FETCH_ADD:
+ wc->byte_len = 8;
+ break;
+ case IB_WC_REG_MR:
+ qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
+ break;
+ default:
+ break;
+ }
+
+ num_entries--;
+ wc++;
+ cnt++;
+next_cqe:
+ while (qp->wqe_wr_id[qp->sq.cons].wqe_size--)
+ qed_chain_consume(&qp->sq.pbl);
+ qedr_inc_sw_cons(&qp->sq);
+ }
+
+ return cnt;
+}
+
+static int qedr_poll_cq_req(struct qedr_dev *dev,
+ struct qedr_qp *qp, struct qedr_cq *cq,
+ int num_entries, struct ib_wc *wc,
+ struct rdma_cqe_requester *req)
+{
+ int cnt = 0;
+
+ switch (req->status) {
+ case RDMA_CQE_REQ_STS_OK:
+ cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+ IB_WC_SUCCESS, 0);
+ break;
+ case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
+ IB_WC_WR_FLUSH_ERR, 0);
+ break;
+ default:
+ /* process all WQE before the cosumer */
+ qp->state = QED_ROCE_QP_STATE_ERR;
+ cnt = process_req(dev, qp, cq, num_entries, wc,
+ req->sq_cons - 1, IB_WC_SUCCESS, 0);
+ wc += cnt;
+ /* if we have extra WC fill it with actual error info */
+ if (cnt < num_entries) {
+ enum ib_wc_status wc_status;
+
+ switch (req->status) {
+ case RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_BAD_RESP_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_LOC_LEN_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_LOC_PROT_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_MW_BIND_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_REM_OP_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_RNR_RETRY_EXC_ERR;
+ break;
+ case RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR:
+ DP_ERR(dev,
+ "Error: POLL CQ with ROCE_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_RETRY_EXC_ERR;
+ break;
+ default:
+ DP_ERR(dev,
+ "Error: POLL CQ with IB_WC_GENERAL_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
+ wc_status = IB_WC_GENERAL_ERR;
+ }
+ cnt += process_req(dev, qp, cq, 1, wc, req->sq_cons,
+ wc_status, 1);
+ }
+ }
+
+ return cnt;
+}
+
+static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, struct ib_wc *wc,
+ struct rdma_cqe_responder *resp, u64 wr_id)
+{
+ enum ib_wc_status wc_status = IB_WC_SUCCESS;
+ u8 flags;
+
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = 0;
+
+ switch (resp->status) {
+ case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
+ wc_status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
+ wc_status = IB_WC_LOC_LEN_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
+ wc_status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
+ wc_status = IB_WC_LOC_PROT_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
+ wc_status = IB_WC_MW_BIND_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
+ wc_status = IB_WC_REM_INV_RD_REQ_ERR;
+ break;
+ case RDMA_CQE_RESP_STS_OK:
+ wc_status = IB_WC_SUCCESS;
+ wc->byte_len = le32_to_cpu(resp->length);
+
+ flags = resp->flags & QEDR_RESP_RDMA_IMM;
+
+ if (flags == QEDR_RESP_RDMA_IMM)
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+
+ if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
+ wc->ex.imm_data =
+ le32_to_cpu(resp->imm_data_or_inv_r_Key);
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ }
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ DP_ERR(dev, "Invalid CQE status detected\n");
+ }
+
+ /* fill WC */
+ wc->status = wc_status;
+ wc->src_qp = qp->id;
+ wc->qp = &qp->ibqp;
+ wc->wr_id = wr_id;
+}
+
+static int process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, struct ib_wc *wc,
+ struct rdma_cqe_responder *resp)
+{
+ u64 wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+
+ __process_resp_one(dev, qp, cq, wc, resp, wr_id);
+
+ while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
+ qed_chain_consume(&qp->rq.pbl);
+ qedr_inc_sw_cons(&qp->rq);
+
+ return 1;
+}
+
+static int process_resp_flush(struct qedr_qp *qp, struct qedr_cq *cq,
+ int num_entries, struct ib_wc *wc, u16 hw_cons)
+{
+ u16 cnt = 0;
+
+ while (num_entries && qp->rq.wqe_cons != hw_cons) {
+ /* fill WC */
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->wc_flags = 0;
+ wc->src_qp = qp->id;
+ wc->byte_len = 0;
+ wc->wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id;
+ wc->qp = &qp->ibqp;
+ num_entries--;
+ wc++;
+ cnt++;
+ while (qp->rqe_wr_id[qp->rq.cons].wqe_size--)
+ qed_chain_consume(&qp->rq.pbl);
+ qedr_inc_sw_cons(&qp->rq);
+ }
+
+ return cnt;
+}
+
+static void try_consume_resp_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
+ struct rdma_cqe_responder *resp, int *update)
+{
+ if (le16_to_cpu(resp->rq_cons) == qp->rq.wqe_cons) {
+ consume_cqe(cq);
+ *update |= 1;
+ }
+}
+
+static int qedr_poll_cq_resp(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, int num_entries,
+ struct ib_wc *wc, struct rdma_cqe_responder *resp,
+ int *update)
+{
+ int cnt;
+
+ if (resp->status == RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR) {
+ cnt = process_resp_flush(qp, cq, num_entries, wc,
+ resp->rq_cons);
+ try_consume_resp_cqe(cq, qp, resp, update);
+ } else {
+ cnt = process_resp_one(dev, qp, cq, wc, resp);
+ consume_cqe(cq);
+ *update |= 1;
+ }
+
+ return cnt;
+}
+
+static void try_consume_req_cqe(struct qedr_cq *cq, struct qedr_qp *qp,
+ struct rdma_cqe_requester *req, int *update)
+{
+ if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) {
+ consume_cqe(cq);
+ *update |= 1;
+ }
+}
+
+int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibcq->device);
+ struct qedr_cq *cq = get_qedr_cq(ibcq);
+ union rdma_cqe *cqe = cq->latest_cqe;
+ u32 old_cons, new_cons;
+ unsigned long flags;
+ int update = 0;
+ int done = 0;
+
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ return qedr_gsi_poll_cq(ibcq, num_entries, wc);
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ old_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+ while (num_entries && is_valid_cqe(cq, cqe)) {
+ struct qedr_qp *qp;
+ int cnt = 0;
+
+ /* prevent speculative reads of any field of CQE */
+ rmb();
+
+ qp = cqe_get_qp(cqe);
+ if (!qp) {
+ WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
+ break;
+ }
+
+ wc->qp = &qp->ibqp;
+
+ switch (cqe_get_type(cqe)) {
+ case RDMA_CQE_TYPE_REQUESTER:
+ cnt = qedr_poll_cq_req(dev, qp, cq, num_entries, wc,
+ &cqe->req);
+ try_consume_req_cqe(cq, qp, &cqe->req, &update);
+ break;
+ case RDMA_CQE_TYPE_RESPONDER_RQ:
+ cnt = qedr_poll_cq_resp(dev, qp, cq, num_entries, wc,
+ &cqe->resp, &update);
+ break;
+ case RDMA_CQE_TYPE_INVALID:
+ default:
+ DP_ERR(dev, "Error: invalid CQE type = %d\n",
+ cqe_get_type(cqe));
+ }
+ num_entries -= cnt;
+ wc += cnt;
+ done += cnt;
+
+ cqe = get_cqe(cq);
+ }
+ new_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
+
+ cq->cq_cons += new_cons - old_cons;
+
+ if (update)
+ /* doorbell notifies abount latest VALID entry,
+ * but chain already point to the next INVALID one
+ */
+ doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ return done;
+}
+
+int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u8 port_num,
+ const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *mad_hdr,
+ size_t in_mad_size, struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
+{
+ struct qedr_dev *dev = get_qedr_dev(ibdev);
+
+ DP_DEBUG(dev, QEDR_MSG_GSI,
+ "QEDR_PROCESS_MAD in_mad %x %x %x %x %x %x %x %x\n",
+ mad_hdr->attr_id, mad_hdr->base_version, mad_hdr->attr_mod,
+ mad_hdr->class_specific, mad_hdr->class_version,
+ mad_hdr->method, mad_hdr->mgmt_class, mad_hdr->status);
+ return IB_MAD_RESULT_SUCCESS;
+}
+
+int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ err = qedr_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
new file mode 100644
index 000000000000..a9b5e67bb81e
--- /dev/null
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -0,0 +1,101 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_VERBS_H__
+#define __QEDR_VERBS_H__
+
+int qedr_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr, struct ib_udata *udata);
+int qedr_query_port(struct ib_device *, u8 port, struct ib_port_attr *props);
+int qedr_modify_port(struct ib_device *, u8 port, int mask,
+ struct ib_port_modify *props);
+
+int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid);
+
+int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
+
+struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *);
+int qedr_dealloc_ucontext(struct ib_ucontext *);
+
+int qedr_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
+int qedr_del_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, void **context);
+int qedr_add_gid(struct ib_device *device, u8 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context);
+struct ib_pd *qedr_alloc_pd(struct ib_device *,
+ struct ib_ucontext *, struct ib_udata *);
+int qedr_dealloc_pd(struct ib_pd *pd);
+
+struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
+ const struct ib_cq_init_attr *attr,
+ struct ib_ucontext *ib_ctx,
+ struct ib_udata *udata);
+int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
+int qedr_destroy_cq(struct ib_cq *);
+int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs,
+ struct ib_udata *);
+int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *);
+int qedr_destroy_qp(struct ib_qp *ibqp);
+
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr);
+int qedr_destroy_ah(struct ib_ah *ibah);
+
+int qedr_dereg_mr(struct ib_mr *);
+struct ib_mr *qedr_get_dma_mr(struct ib_pd *, int acc);
+
+struct ib_mr *qedr_reg_user_mr(struct ib_pd *, u64 start, u64 length,
+ u64 virt, int acc, struct ib_udata *);
+
+int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
+
+struct ib_mr *qedr_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int qedr_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
+int qedr_post_send(struct ib_qp *, struct ib_send_wr *,
+ struct ib_send_wr **bad_wr);
+int qedr_post_recv(struct ib_qp *, struct ib_recv_wr *,
+ struct ib_recv_wr **bad_wr);
+int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u8 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh,
+ const struct ib_mad_hdr *in_mad,
+ size_t in_mad_size, struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
+
+int qedr_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable);
+#endif
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 2d2b94fd3633..75f08624ac05 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -67,7 +67,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got,
+ FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index a0b6ebee4d8a..1ccee6ea5bc3 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -111,6 +111,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int i;
int flags;
dma_addr_t pa;
+ unsigned int gup_flags;
if (!can_do_mlock())
return -EPERM;
@@ -135,6 +136,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
flags = IOMMU_READ | IOMMU_CACHE;
flags |= (writable) ? IOMMU_WRITE : 0;
+ gup_flags = FOLL_WRITE;
+ gup_flags |= (writable) ? 0 : FOLL_FORCE;
cur_base = addr & PAGE_MASK;
ret = 0;
@@ -142,7 +145,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
- 1, !writable, page_list, NULL);
+ gup_flags, page_list, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 7b8d2d9e2263..da12717a3eb7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -63,6 +63,8 @@ enum ipoib_flush_level {
enum {
IPOIB_ENCAP_LEN = 4,
+ IPOIB_PSEUDO_LEN = 20,
+ IPOIB_HARD_LEN = IPOIB_ENCAP_LEN + IPOIB_PSEUDO_LEN,
IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */
@@ -134,15 +136,21 @@ struct ipoib_header {
u16 reserved;
};
-struct ipoib_cb {
- struct qdisc_skb_cb qdisc_cb;
- u8 hwaddr[INFINIBAND_ALEN];
+struct ipoib_pseudo_header {
+ u8 hwaddr[INFINIBAND_ALEN];
};
-static inline struct ipoib_cb *ipoib_skb_cb(const struct sk_buff *skb)
+static inline void skb_add_pseudo_hdr(struct sk_buff *skb)
{
- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct ipoib_cb));
- return (struct ipoib_cb *)skb->cb;
+ char *data = skb_push(skb, IPOIB_PSEUDO_LEN);
+
+ /*
+ * only the ipoib header is present now, make room for a dummy
+ * pseudo header and set skb field accordingly
+ */
+ memset(data, 0, IPOIB_PSEUDO_LEN);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, IPOIB_HARD_LEN);
}
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 4ad297d3de89..339a1eecdfe3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -63,6 +63,8 @@ MODULE_PARM_DESC(cm_data_debug_level,
#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
#define IPOIB_CM_RX_UPDATE_MASK (0x3)
+#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
+
static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
@@ -146,15 +148,15 @@ static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct sk_buff *skb;
int i;
- skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
+ skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
if (unlikely(!skb))
return NULL;
/*
- * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
+ * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
* IP header to a multiple of 16.
*/
- skb_reserve(skb, 12);
+ skb_reserve(skb, IPOIB_CM_RX_RESERVE);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
@@ -624,9 +626,9 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
if (wc->byte_len < IPOIB_CM_COPYBREAK) {
int dlen = wc->byte_len;
- small_skb = dev_alloc_skb(dlen + 12);
+ small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
if (small_skb) {
- skb_reserve(small_skb, 12);
+ skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, small_skb->data, dlen);
@@ -663,8 +665,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
copied:
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index be11d5d5b8c1..830fecb6934c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -128,16 +128,15 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
+ skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
if (unlikely(!skb))
return NULL;
/*
- * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
- * header. So we need 4 more bytes to get to 48 and align the
- * IP header to a multiple of 16.
+ * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
+ * 64 bytes aligned
*/
- skb_reserve(skb, 4);
+ skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
mapping = priv->rx_ring[id].mapping;
mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
@@ -253,8 +252,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
- skb_reset_mac_header(skb);
- skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ae5d7cd100a5..c50794fb92db 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -938,9 +938,12 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
ipoib_neigh_free(neigh);
goto err_drop;
}
- if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
+ if (skb_queue_len(&neigh->queue) <
+ IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&neigh->queue, skb);
- else {
+ } else {
ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
skb_queue_len(&neigh->queue));
goto err_drop;
@@ -977,7 +980,7 @@ err_drop:
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
- struct ipoib_cb *cb)
+ struct ipoib_pseudo_header *phdr)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
@@ -985,16 +988,18 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, cb->hwaddr + 4);
+ path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
if (!path) {
- path = path_rec_create(dev, cb->hwaddr + 4);
+ path = path_rec_create(dev, phdr->hwaddr + 4);
new_path = 1;
}
if (path) {
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -1022,10 +1027,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
return;
} else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, IPOIB_PSEUDO_LEN);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -1039,13 +1046,15 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
unsigned long flags;
+ phdr = (struct ipoib_pseudo_header *) skb->data;
+ skb_pull(skb, sizeof(*phdr));
header = (struct ipoib_header *) skb->data;
- if (unlikely(cb->hwaddr[4] == 0xff)) {
+ if (unlikely(phdr->hwaddr[4] == 0xff)) {
/* multicast, arrange "if" according to probability */
if ((header->proto != htons(ETH_P_IP)) &&
(header->proto != htons(ETH_P_IPV6)) &&
@@ -1058,13 +1067,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
/* Add in the P_Key for multicast*/
- cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
- cb->hwaddr[9] = priv->pkey & 0xff;
+ phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ phdr->hwaddr[9] = priv->pkey & 0xff;
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (likely(neigh))
goto send_using_neigh;
- ipoib_mcast_send(dev, cb->hwaddr, skb);
+ ipoib_mcast_send(dev, phdr->hwaddr, skb);
return NETDEV_TX_OK;
}
@@ -1073,16 +1082,16 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
case htons(ETH_P_TIPC):
- neigh = ipoib_neigh_get(dev, cb->hwaddr);
+ neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
- neigh_add_path(skb, cb->hwaddr, dev);
+ neigh_add_path(skb, phdr->hwaddr, dev);
return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
/* for unicast ARP and RARP should always perform path find */
- unicast_arp_send(skb, dev, cb);
+ unicast_arp_send(skb, dev, phdr);
return NETDEV_TX_OK;
default:
/* ethertype not supported by IPoIB */
@@ -1099,11 +1108,13 @@ send_using_neigh:
goto unref;
}
} else if (neigh->ah) {
- ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(cb->hwaddr));
+ ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(phdr->hwaddr));
goto unref;
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(*phdr));
spin_lock_irqsave(&priv->lock, flags);
__skb_queue_tail(&neigh->queue, skb);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1135,8 +1146,8 @@ static int ipoib_hard_header(struct sk_buff *skb,
unsigned short type,
const void *daddr, const void *saddr, unsigned len)
{
+ struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
- struct ipoib_cb *cb = ipoib_skb_cb(skb);
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
@@ -1145,12 +1156,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
/*
* we don't rely on dst_entry structure, always stuff the
- * destination address into skb->cb so we can figure out where
+ * destination address into skb hard header so we can figure out where
* to send the packet later.
*/
- memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
+ phdr = (struct ipoib_pseudo_header *) skb_push(skb, sizeof(*phdr));
+ memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
- return sizeof *header;
+ return IPOIB_HARD_LEN;
}
static void ipoib_set_mcast_list(struct net_device *dev)
@@ -1772,7 +1784,7 @@ void ipoib_setup(struct net_device *dev)
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- dev->hard_header_len = IPOIB_ENCAP_LEN;
+ dev->hard_header_len = IPOIB_HARD_LEN;
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index d3394b6add24..1909dd252c94 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -796,9 +796,11 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
}
- if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
+ if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
+ /* put pseudoheader back on for next time */
+ skb_push(skb, sizeof(struct ipoib_pseudo_header));
skb_queue_tail(&mcast->pkt_queue, skb);
- else {
+ } else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 936f07a4e35f..6d7de9bfed9a 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -103,6 +103,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
6-byte ALPS packet */
#define ALPS_STICK_BITS 0x100 /* separate stick button bits */
#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
+#define ALPS_DUALPOINT_WITH_PRESSURE 0x400 /* device can report trackpoint pressure */
static const struct alps_model_info alps_model_data[] = {
{ { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Toshiba Salellite Pro M10 */
@@ -1156,15 +1157,28 @@ static unsigned char alps_get_pkt_id_ss4_v2(unsigned char *byte)
{
unsigned char pkt_id = SS4_PACKET_ID_IDLE;
- if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
- (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 && byte[5] == 0x00) {
- pkt_id = SS4_PACKET_ID_IDLE;
- } else if (!(byte[3] & 0x10)) {
- pkt_id = SS4_PACKET_ID_ONE;
- } else if (!(byte[3] & 0x20)) {
+ switch (byte[3] & 0x30) {
+ case 0x00:
+ if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 &&
+ (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 &&
+ byte[5] == 0x00) {
+ pkt_id = SS4_PACKET_ID_IDLE;
+ } else {
+ pkt_id = SS4_PACKET_ID_ONE;
+ }
+ break;
+ case 0x10:
+ /* two-finger finger positions */
pkt_id = SS4_PACKET_ID_TWO;
- } else {
+ break;
+ case 0x20:
+ /* stick pointer */
+ pkt_id = SS4_PACKET_ID_STICK;
+ break;
+ case 0x30:
+ /* third and fourth finger positions */
pkt_id = SS4_PACKET_ID_MULTI;
+ break;
}
return pkt_id;
@@ -1185,7 +1199,13 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
f->mt[0].x = SS4_1F_X_V2(p);
f->mt[0].y = SS4_1F_Y_V2(p);
f->pressure = ((SS4_1F_Z_V2(p)) * 2) & 0x7f;
- f->fingers = 1;
+ /*
+ * When a button is held the device will give us events
+ * with x, y, and pressure of 0. This causes annoying jumps
+ * if a touch is released while the button is held.
+ * Handle this by claiming zero contacts.
+ */
+ f->fingers = f->pressure > 0 ? 1 : 0;
f->first_mp = 0;
f->is_mp = 0;
break;
@@ -1246,16 +1266,40 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
}
break;
+ case SS4_PACKET_ID_STICK:
+ if (!(priv->flags & ALPS_DUALPOINT)) {
+ psmouse_warn(psmouse,
+ "Rejected trackstick packet from non DualPoint device");
+ } else {
+ int x = (s8)(((p[0] & 1) << 7) | (p[1] & 0x7f));
+ int y = (s8)(((p[3] & 1) << 7) | (p[2] & 0x7f));
+ int pressure = (s8)(p[4] & 0x7f);
+
+ input_report_rel(priv->dev2, REL_X, x);
+ input_report_rel(priv->dev2, REL_Y, -y);
+ input_report_abs(priv->dev2, ABS_PRESSURE, pressure);
+ }
+ break;
+
case SS4_PACKET_ID_IDLE:
default:
memset(f, 0, sizeof(struct alps_fields));
break;
}
- f->left = !!(SS4_BTN_V2(p) & 0x01);
- if (!(priv->flags & ALPS_BUTTONPAD)) {
- f->right = !!(SS4_BTN_V2(p) & 0x02);
- f->middle = !!(SS4_BTN_V2(p) & 0x04);
+ /* handle buttons */
+ if (pkt_id == SS4_PACKET_ID_STICK) {
+ f->ts_left = !!(SS4_BTN_V2(p) & 0x01);
+ if (!(priv->flags & ALPS_BUTTONPAD)) {
+ f->ts_right = !!(SS4_BTN_V2(p) & 0x02);
+ f->ts_middle = !!(SS4_BTN_V2(p) & 0x04);
+ }
+ } else {
+ f->left = !!(SS4_BTN_V2(p) & 0x01);
+ if (!(priv->flags & ALPS_BUTTONPAD)) {
+ f->right = !!(SS4_BTN_V2(p) & 0x02);
+ f->middle = !!(SS4_BTN_V2(p) & 0x04);
+ }
}
return 0;
@@ -1266,6 +1310,7 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
struct alps_data *priv = psmouse->private;
unsigned char *packet = psmouse->packet;
struct input_dev *dev = psmouse->dev;
+ struct input_dev *dev2 = priv->dev2;
struct alps_fields *f = &priv->f;
memset(f, 0, sizeof(struct alps_fields));
@@ -1311,6 +1356,13 @@ static void alps_process_packet_ss4_v2(struct psmouse *psmouse)
input_report_abs(dev, ABS_PRESSURE, f->pressure);
input_sync(dev);
+
+ if (priv->flags & ALPS_DUALPOINT) {
+ input_report_key(dev2, BTN_LEFT, f->ts_left);
+ input_report_key(dev2, BTN_RIGHT, f->ts_right);
+ input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
+ input_sync(dev2);
+ }
}
static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse)
@@ -2695,6 +2747,10 @@ static int alps_set_protocol(struct psmouse *psmouse,
if (alps_set_defaults_ss4_v2(psmouse, priv))
return -EIO;
+ if (priv->fw_ver[1] == 0x1)
+ priv->flags |= ALPS_DUALPOINT |
+ ALPS_DUALPOINT_WITH_PRESSURE;
+
break;
}
@@ -2767,6 +2823,9 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
} else if (e7[0] == 0x73 && e7[1] == 0x03 &&
e7[2] == 0x14 && ec[1] == 0x02) {
protocol = &alps_v8_protocol_data;
+ } else if (e7[0] == 0x73 && e7[1] == 0x03 &&
+ e7[2] == 0x28 && ec[1] == 0x01) {
+ protocol = &alps_v8_protocol_data;
} else {
psmouse_dbg(psmouse,
"Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
@@ -2949,6 +3008,10 @@ int alps_init(struct psmouse *psmouse)
input_set_capability(dev2, EV_REL, REL_X);
input_set_capability(dev2, EV_REL, REL_Y);
+ if (priv->flags & ALPS_DUALPOINT_WITH_PRESSURE) {
+ input_set_capability(dev2, EV_ABS, ABS_PRESSURE);
+ input_set_abs_params(dev2, ABS_PRESSURE, 0, 127, 0, 0);
+ }
input_set_capability(dev2, EV_KEY, BTN_LEFT);
input_set_capability(dev2, EV_KEY, BTN_RIGHT);
input_set_capability(dev2, EV_KEY, BTN_MIDDLE);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index d37f814dc447..b9417e2d7ad3 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -37,12 +37,14 @@
* or there's button activities.
* SS4_PACKET_ID_TWO: There's two or more fingers on touchpad
* SS4_PACKET_ID_MULTI: There's three or more fingers on touchpad
+ * SS4_PACKET_ID_STICK: A stick pointer packet
*/
enum SS4_PACKET_ID {
SS4_PACKET_ID_IDLE = 0,
SS4_PACKET_ID_ONE,
SS4_PACKET_ID_TWO,
SS4_PACKET_ID_MULTI,
+ SS4_PACKET_ID_STICK,
};
#define SS4_COUNT_PER_ELECTRODE 256
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 08e252a42480..db7d1d666ac1 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1134,7 +1134,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* System76 Pangolin 0x250f01 ? 2 hw buttons
* (*) + 3 trackpoint buttons
* (**) + 0 trackpoint buttons
- * Note: Lenovo L430 and Lenovo L430 have the same fw_version/caps
+ * Note: Lenovo L430 and Lenovo L530 have the same fw_version/caps
*/
static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
{
@@ -1159,6 +1159,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
},
},
+ {
+ /* Fujitsu H760 also has a middle button */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
+ },
+ },
#endif
{ }
};
@@ -1503,10 +1510,10 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
- /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ /* Fujitsu H760 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
- DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
},
},
{
@@ -1517,6 +1524,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
},
},
{
+ /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ },
+ },
+ {
+ /* Fujitsu LIFEBOOK E556 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/rmi4/rmi_i2c.c b/drivers/input/rmi4/rmi_i2c.c
index 6f2e0e4f0296..1ebc2c1debae 100644
--- a/drivers/input/rmi4/rmi_i2c.c
+++ b/drivers/input/rmi4/rmi_i2c.c
@@ -221,6 +221,21 @@ static const struct of_device_id rmi_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, rmi_i2c_of_match);
#endif
+static void rmi_i2c_regulator_bulk_disable(void *data)
+{
+ struct rmi_i2c_xport *rmi_i2c = data;
+
+ regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
+ rmi_i2c->supplies);
+}
+
+static void rmi_i2c_unregister_transport(void *data)
+{
+ struct rmi_i2c_xport *rmi_i2c = data;
+
+ rmi_unregister_transport_device(&rmi_i2c->xport);
+}
+
static int rmi_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -264,6 +279,12 @@ static int rmi_i2c_probe(struct i2c_client *client,
if (retval < 0)
return retval;
+ retval = devm_add_action_or_reset(&client->dev,
+ rmi_i2c_regulator_bulk_disable,
+ rmi_i2c);
+ if (retval)
+ return retval;
+
of_property_read_u32(client->dev.of_node, "syna,startup-delay-ms",
&rmi_i2c->startup_delay);
@@ -294,6 +315,11 @@ static int rmi_i2c_probe(struct i2c_client *client,
client->addr);
return retval;
}
+ retval = devm_add_action_or_reset(&client->dev,
+ rmi_i2c_unregister_transport,
+ rmi_i2c);
+ if (retval)
+ return retval;
retval = rmi_i2c_init_irq(client);
if (retval < 0)
@@ -304,17 +330,6 @@ static int rmi_i2c_probe(struct i2c_client *client,
return 0;
}
-static int rmi_i2c_remove(struct i2c_client *client)
-{
- struct rmi_i2c_xport *rmi_i2c = i2c_get_clientdata(client);
-
- rmi_unregister_transport_device(&rmi_i2c->xport);
- regulator_bulk_disable(ARRAY_SIZE(rmi_i2c->supplies),
- rmi_i2c->supplies);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int rmi_i2c_suspend(struct device *dev)
{
@@ -431,7 +446,6 @@ static struct i2c_driver rmi_i2c_driver = {
},
.id_table = rmi_id,
.probe = rmi_i2c_probe,
- .remove = rmi_i2c_remove,
};
module_i2c_driver(rmi_i2c_driver);
diff --git a/drivers/input/rmi4/rmi_spi.c b/drivers/input/rmi4/rmi_spi.c
index 55bd1b34970c..4ebef607e214 100644
--- a/drivers/input/rmi4/rmi_spi.c
+++ b/drivers/input/rmi4/rmi_spi.c
@@ -396,6 +396,13 @@ static inline int rmi_spi_of_probe(struct spi_device *spi,
}
#endif
+static void rmi_spi_unregister_transport(void *data)
+{
+ struct rmi_spi_xport *rmi_spi = data;
+
+ rmi_unregister_transport_device(&rmi_spi->xport);
+}
+
static int rmi_spi_probe(struct spi_device *spi)
{
struct rmi_spi_xport *rmi_spi;
@@ -464,6 +471,11 @@ static int rmi_spi_probe(struct spi_device *spi)
dev_err(&spi->dev, "failed to register transport.\n");
return retval;
}
+ retval = devm_add_action_or_reset(&spi->dev,
+ rmi_spi_unregister_transport,
+ rmi_spi);
+ if (retval)
+ return retval;
retval = rmi_spi_init_irq(spi);
if (retval < 0)
@@ -473,15 +485,6 @@ static int rmi_spi_probe(struct spi_device *spi)
return 0;
}
-static int rmi_spi_remove(struct spi_device *spi)
-{
- struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
-
- rmi_unregister_transport_device(&rmi_spi->xport);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int rmi_spi_suspend(struct device *dev)
{
@@ -577,7 +580,6 @@ static struct spi_driver rmi_spi_driver = {
},
.id_table = rmi_id,
.probe = rmi_spi_probe,
- .remove = rmi_spi_remove,
};
module_spi_driver(rmi_spi_driver);
diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h
index a5eed2ade53d..34da81c006b6 100644
--- a/drivers/input/serio/i8042-io.h
+++ b/drivers/input/serio/i8042-io.h
@@ -81,7 +81,7 @@ static inline int i8042_platform_init(void)
return -EBUSY;
#endif
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-ip22io.h b/drivers/input/serio/i8042-ip22io.h
index ee1ad27d6ed0..08a1c10a1448 100644
--- a/drivers/input/serio/i8042-ip22io.h
+++ b/drivers/input/serio/i8042-ip22io.h
@@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
return -EBUSY;
#endif
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-ppcio.h b/drivers/input/serio/i8042-ppcio.h
index f708c75d16f1..1aabea43329e 100644
--- a/drivers/input/serio/i8042-ppcio.h
+++ b/drivers/input/serio/i8042-ppcio.h
@@ -44,7 +44,7 @@ static inline void i8042_write_command(int val)
static inline int i8042_platform_init(void)
{
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index afcd1c1a05b2..6231d63860ee 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -130,7 +130,7 @@ static int __init i8042_platform_init(void)
}
}
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h
index 73f5cc124a36..455747552f85 100644
--- a/drivers/input/serio/i8042-unicore32io.h
+++ b/drivers/input/serio/i8042-unicore32io.h
@@ -61,7 +61,7 @@ static inline int i8042_platform_init(void)
if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042"))
return -EBUSY;
- i8042_reset = 1;
+ i8042_reset = I8042_RESET_ALWAYS;
return 0;
}
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 68f5f4a0f1e7..f4bfb4b2d50a 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -510,6 +510,90 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
{ }
};
+/*
+ * On some Asus laptops, just running self tests cause problems.
+ */
+static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A455LD"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K401LB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K501LB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K501LX"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "R409L"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "V502LX"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X302LA"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X450LD"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X455LAB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X455LDB"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X455LF"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Z450LA"),
+ },
+ },
+ { }
+};
static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
{
/* MSI Wind U-100 */
@@ -1072,12 +1156,18 @@ static int __init i8042_platform_init(void)
return retval;
#if defined(__ia64__)
- i8042_reset = true;
+ i8042_reset = I8042_RESET_ALWAYS;
#endif
#ifdef CONFIG_X86
- if (dmi_check_system(i8042_dmi_reset_table))
- i8042_reset = true;
+ /* Honor module parameter when value is not default */
+ if (i8042_reset == I8042_RESET_DEFAULT) {
+ if (dmi_check_system(i8042_dmi_reset_table))
+ i8042_reset = I8042_RESET_ALWAYS;
+
+ if (dmi_check_system(i8042_dmi_noselftest_table))
+ i8042_reset = I8042_RESET_NEVER;
+ }
if (dmi_check_system(i8042_dmi_noloop_table))
i8042_noloop = true;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 405252a884dd..89abfdb539ac 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -48,9 +48,39 @@ static bool i8042_unlock;
module_param_named(unlock, i8042_unlock, bool, 0);
MODULE_PARM_DESC(unlock, "Ignore keyboard lock.");
-static bool i8042_reset;
-module_param_named(reset, i8042_reset, bool, 0);
-MODULE_PARM_DESC(reset, "Reset controller during init and cleanup.");
+enum i8042_controller_reset_mode {
+ I8042_RESET_NEVER,
+ I8042_RESET_ALWAYS,
+ I8042_RESET_ON_S2RAM,
+#define I8042_RESET_DEFAULT I8042_RESET_ON_S2RAM
+};
+static enum i8042_controller_reset_mode i8042_reset = I8042_RESET_DEFAULT;
+static int i8042_set_reset(const char *val, const struct kernel_param *kp)
+{
+ enum i8042_controller_reset_mode *arg = kp->arg;
+ int error;
+ bool reset;
+
+ if (val) {
+ error = kstrtobool(val, &reset);
+ if (error)
+ return error;
+ } else {
+ reset = true;
+ }
+
+ *arg = reset ? I8042_RESET_ALWAYS : I8042_RESET_NEVER;
+ return 0;
+}
+
+static const struct kernel_param_ops param_ops_reset_param = {
+ .flags = KERNEL_PARAM_OPS_FL_NOARG,
+ .set = i8042_set_reset,
+};
+#define param_check_reset_param(name, p) \
+ __param_check(name, p, enum i8042_controller_reset_mode)
+module_param_named(reset, i8042_reset, reset_param, 0);
+MODULE_PARM_DESC(reset, "Reset controller on resume, cleanup or both");
static bool i8042_direct;
module_param_named(direct, i8042_direct, bool, 0);
@@ -1019,7 +1049,7 @@ static int i8042_controller_init(void)
* Reset the controller and reset CRT to the original value set by BIOS.
*/
-static void i8042_controller_reset(bool force_reset)
+static void i8042_controller_reset(bool s2r_wants_reset)
{
i8042_flush();
@@ -1044,8 +1074,10 @@ static void i8042_controller_reset(bool force_reset)
* Reset the controller if requested.
*/
- if (i8042_reset || force_reset)
+ if (i8042_reset == I8042_RESET_ALWAYS ||
+ (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
i8042_controller_selftest();
+ }
/*
* Restore the original control register setting.
@@ -1110,7 +1142,7 @@ static void i8042_dritek_enable(void)
* before suspending.
*/
-static int i8042_controller_resume(bool force_reset)
+static int i8042_controller_resume(bool s2r_wants_reset)
{
int error;
@@ -1118,7 +1150,8 @@ static int i8042_controller_resume(bool force_reset)
if (error)
return error;
- if (i8042_reset || force_reset) {
+ if (i8042_reset == I8042_RESET_ALWAYS ||
+ (i8042_reset == I8042_RESET_ON_S2RAM && s2r_wants_reset)) {
error = i8042_controller_selftest();
if (error)
return error;
@@ -1195,7 +1228,7 @@ static int i8042_pm_resume_noirq(struct device *dev)
static int i8042_pm_resume(struct device *dev)
{
- bool force_reset;
+ bool want_reset;
int i;
for (i = 0; i < I8042_NUM_PORTS; i++) {
@@ -1218,9 +1251,9 @@ static int i8042_pm_resume(struct device *dev)
* off control to the platform firmware, otherwise we can simply restore
* the mode.
*/
- force_reset = pm_resume_via_firmware();
+ want_reset = pm_resume_via_firmware();
- return i8042_controller_resume(force_reset);
+ return i8042_controller_resume(want_reset);
}
static int i8042_pm_thaw(struct device *dev)
@@ -1482,7 +1515,7 @@ static int __init i8042_probe(struct platform_device *dev)
i8042_platform_device = dev;
- if (i8042_reset) {
+ if (i8042_reset == I8042_RESET_ALWAYS) {
error = i8042_controller_selftest();
if (error)
return error;
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index fb5fb9140ca9..552a3773f79d 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -157,6 +157,7 @@ struct mip4_ts {
char phys[32];
char product_name[16];
+ char ic_name[4];
unsigned int max_x;
unsigned int max_y;
@@ -263,6 +264,18 @@ static int mip4_query_device(struct mip4_ts *ts)
dev_dbg(&ts->client->dev, "product name: %.*s\n",
(int)sizeof(ts->product_name), ts->product_name);
+ /* IC name */
+ cmd[0] = MIP4_R0_INFO;
+ cmd[1] = MIP4_R1_INFO_IC_NAME;
+ error = mip4_i2c_xfer(ts, cmd, sizeof(cmd),
+ ts->ic_name, sizeof(ts->ic_name));
+ if (error)
+ dev_warn(&ts->client->dev,
+ "Failed to retrieve IC name: %d\n", error);
+ else
+ dev_dbg(&ts->client->dev, "IC name: %.*s\n",
+ (int)sizeof(ts->ic_name), ts->ic_name);
+
/* Firmware version */
error = mip4_get_fw_version(ts);
if (error)
@@ -1326,7 +1339,7 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
* paired with current firmware in the chip.
*/
count = snprintf(buf, PAGE_SIZE, "%.*s\n",
- (int)sizeof(ts->product_name), ts->product_name);
+ (int)sizeof(ts->product_name), ts->product_name);
mutex_unlock(&ts->input->mutex);
@@ -1335,9 +1348,30 @@ static ssize_t mip4_sysfs_read_hw_version(struct device *dev,
static DEVICE_ATTR(hw_version, S_IRUGO, mip4_sysfs_read_hw_version, NULL);
+static ssize_t mip4_sysfs_read_ic_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mip4_ts *ts = i2c_get_clientdata(client);
+ size_t count;
+
+ mutex_lock(&ts->input->mutex);
+
+ count = snprintf(buf, PAGE_SIZE, "%.*s\n",
+ (int)sizeof(ts->ic_name), ts->ic_name);
+
+ mutex_unlock(&ts->input->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(ic_name, S_IRUGO, mip4_sysfs_read_ic_name, NULL);
+
static struct attribute *mip4_attrs[] = {
&dev_attr_fw_version.attr,
&dev_attr_hw_version.attr,
+ &dev_attr_ic_name.attr,
&dev_attr_update_fw.attr,
NULL,
};
@@ -1538,6 +1572,6 @@ static struct i2c_driver mip4_driver = {
module_i2c_driver(mip4_driver);
MODULE_DESCRIPTION("MELFAS MIP4 Touchscreen");
-MODULE_VERSION("2016.03.12");
+MODULE_VERSION("2016.09.28");
MODULE_AUTHOR("Sangwon Jee <jeesw@melfas.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index c0e7b624ce54..12102448fddd 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -178,7 +178,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
idev->id_vendor, idev->id_device);
}
-ipack_device_attr(id_format, "0x%hhu\n");
+ipack_device_attr(id_format, "0x%hhx\n");
static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(id_device);
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 82b0b5daf3f5..bc0af3307bbf 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -158,8 +158,8 @@ config PIC32_EVIC
select IRQ_DOMAIN
config JCORE_AIC
- bool "J-Core integrated AIC"
- depends on OF && (SUPERH || COMPILE_TEST)
+ bool "J-Core integrated AIC" if COMPILE_TEST
+ depends on OF
select IRQ_DOMAIN
help
Support for the J-Core integrated AIC.
diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c
index efbf0e4304b7..2a7a38830a8d 100644
--- a/drivers/irqchip/irq-eznps.c
+++ b/drivers/irqchip/irq-eznps.c
@@ -85,7 +85,7 @@ static void nps400_irq_eoi_global(struct irq_data *irqd)
nps_ack_gic();
}
-static void nps400_irq_eoi(struct irq_data *irqd)
+static void nps400_irq_ack(struct irq_data *irqd)
{
unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
@@ -103,7 +103,7 @@ static struct irq_chip nps400_irq_chip_percpu = {
.name = "NPS400 IC",
.irq_mask = nps400_irq_mask,
.irq_unmask = nps400_irq_unmask,
- .irq_eoi = nps400_irq_eoi,
+ .irq_ack = nps400_irq_ack,
};
static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
@@ -135,7 +135,7 @@ static const struct irq_domain_ops nps400_irq_ops = {
static int __init nps400_of_init(struct device_node *node,
struct device_node *parent)
{
- static struct irq_domain *nps400_root_domain;
+ struct irq_domain *nps400_root_domain;
if (parent) {
pr_err("DeviceTree incore ic not a root irq controller\n");
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 003495d91f9c..c5dee300e8a3 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1023,7 +1023,7 @@ static void its_free_tables(struct its_node *its)
static int its_alloc_tables(struct its_node *its)
{
- u64 typer = readq_relaxed(its->base + GITS_TYPER);
+ u64 typer = gic_read_typer(its->base + GITS_TYPER);
u32 ids = GITS_TYPER_DEVBITS(typer);
u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_WaWb;
@@ -1198,7 +1198,7 @@ static void its_cpu_init_collection(void)
* We now have to bind each collection to its target
* redistributor.
*/
- if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
+ if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
/*
* This ITS wants the physical address of the
* redistributor.
@@ -1208,7 +1208,7 @@ static void its_cpu_init_collection(void)
/*
* This ITS wants a linear CPU number.
*/
- target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
+ target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
target = GICR_TYPER_CPU_NUMBER(target) << 16;
}
@@ -1691,7 +1691,7 @@ static int __init its_probe_one(struct resource *res,
INIT_LIST_HEAD(&its->its_device_list);
its->base = its_base;
its->phys_base = res->start;
- its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+ its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
its->numa_node = numa_node;
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
@@ -1763,7 +1763,7 @@ out_unmap:
static bool gic_rdists_supports_plpis(void)
{
- return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
+ return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
}
int its_cpu_init(void)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9b81bd8b929c..19d642eae096 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -153,7 +153,7 @@ static void gic_enable_redist(bool enable)
return; /* No PM support in this redistributor */
}
- while (count--) {
+ while (--count) {
val = readl_relaxed(rbase + GICR_WAKER);
if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
break;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 58e5b4e87056..d6c404b3584d 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1279,7 +1279,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
*/
*base += 0xf000;
cpuif_res.start += 0xf000;
- pr_warn("GIC: Adjusting CPU interface base to %pa",
+ pr_warn("GIC: Adjusting CPU interface base to %pa\n",
&cpuif_res.start);
}
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 6b304eb39bd2..1aec12c6d9ac 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -38,6 +38,7 @@ static void disable_8259A_irq(struct irq_data *d);
static void enable_8259A_irq(struct irq_data *d);
static void mask_and_ack_8259A(struct irq_data *d);
static void init_8259A(int auto_eoi);
+static int (*i8259_poll)(void) = i8259_irq;
static struct irq_chip i8259A_chip = {
.name = "XT-PIC",
@@ -51,6 +52,11 @@ static struct irq_chip i8259A_chip = {
* 8259A PIC functions to handle ISA devices:
*/
+void i8259_set_poll(int (*poll)(void))
+{
+ i8259_poll = poll;
+}
+
/*
* This contains the irq mask for both 8259A irq controllers,
*/
@@ -89,24 +95,6 @@ static void enable_8259A_irq(struct irq_data *d)
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
}
-int i8259A_irq_pending(unsigned int irq)
-{
- unsigned int mask;
- unsigned long flags;
- int ret;
-
- irq -= I8259A_IRQ_BASE;
- mask = 1 << irq;
- raw_spin_lock_irqsave(&i8259A_lock, flags);
- if (irq < 8)
- ret = inb(PIC_MASTER_CMD) & mask;
- else
- ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
- raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-
- return ret;
-}
-
void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
@@ -355,7 +343,7 @@ void __init init_i8259_irqs(void)
static void i8259_irq_dispatch(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
- int hwirq = i8259_irq();
+ int hwirq = i8259_poll();
unsigned int irq;
if (hwirq < 0)
@@ -370,13 +358,15 @@ int __init i8259_of_init(struct device_node *node, struct device_node *parent)
struct irq_domain *domain;
unsigned int parent_irq;
+ domain = __init_i8259_irqs(node);
+
parent_irq = irq_of_parse_and_map(node, 0);
if (!parent_irq) {
pr_err("Failed to map i8259 parent IRQ\n");
+ irq_domain_remove(domain);
return -ENODEV;
}
- domain = __init_i8259_irqs(node);
irq_set_chained_handler_and_data(parent_irq, i8259_irq_dispatch,
domain);
return 0;
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index 84b01dec277d..033bccb41455 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -25,12 +25,30 @@
static struct irq_chip jcore_aic;
+/*
+ * The J-Core AIC1 and AIC2 are cpu-local interrupt controllers and do
+ * not distinguish or use distinct irq number ranges for per-cpu event
+ * interrupts (timer, IPI). Since information to determine whether a
+ * particular irq number should be treated as per-cpu is not available
+ * at mapping time, we use a wrapper handler function which chooses
+ * the right handler at runtime based on whether IRQF_PERCPU was used
+ * when requesting the irq.
+ */
+
+static void handle_jcore_irq(struct irq_desc *desc)
+{
+ if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
+ handle_percpu_irq(desc);
+ else
+ handle_simple_irq(desc);
+}
+
static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct irq_chip *aic = d->host_data;
- irq_set_chip_and_handler(irq, aic, handle_simple_irq);
+ irq_set_chip_and_handler(irq, aic, handle_jcore_irq);
return 0;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8abde6b8cedc..6d53810963f7 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -266,7 +266,7 @@ static struct raid_type {
{"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
{"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
{"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
- {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */
+ {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
{"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
{"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
{"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
@@ -2087,11 +2087,11 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
/*
* No takeover/reshaping, because we don't have the extended v1.9.0 metadata
*/
- if (le32_to_cpu(sb->level) != mddev->level) {
+ if (le32_to_cpu(sb->level) != mddev->new_level) {
DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
return -EINVAL;
}
- if (le32_to_cpu(sb->layout) != mddev->layout) {
+ if (le32_to_cpu(sb->layout) != mddev->new_layout) {
DMERR("Reshaping raid sets not yet supported. (raid layout change)");
DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
DMERR(" Old layout: %s w/ %d copies",
@@ -2102,7 +2102,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
raid10_md_layout_to_copies(mddev->layout));
return -EINVAL;
}
- if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
+ if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
return -EINVAL;
}
@@ -2115,6 +2115,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
return -EINVAL;
}
+ DMINFO("Discovered old metadata format; upgrading to extended metadata format");
+
/* Table line is checked vs. authoritative superblock */
rs_set_new(rs);
}
@@ -2258,7 +2260,8 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
if (!mddev->events && super_init_validation(rs, rdev))
return -EINVAL;
- if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
+ if (le32_to_cpu(sb->compat_features) &&
+ le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
return -EINVAL;
}
@@ -3646,7 +3649,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 9, 0},
+ .version = {1, 9, 1},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index bdf1606f67bc..9a8b71067c6e 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -145,7 +145,6 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record {
struct mirror *m;
- /* if details->bi_bdev == NULL, details were not saved */
struct dm_bio_details details;
region_t write_region;
};
@@ -1200,8 +1199,6 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
- bio_record->details.bi_bdev = NULL;
-
if (rw == WRITE) {
/* Save region for mirror_end_io() handler */
bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
@@ -1260,22 +1257,12 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
}
if (error == -EOPNOTSUPP)
- goto out;
+ return error;
if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
- goto out;
+ return error;
if (unlikely(error)) {
- if (!bio_record->details.bi_bdev) {
- /*
- * There wasn't enough memory to record necessary
- * information for a retry or there was no other
- * mirror in-sync.
- */
- DMERR_LIMIT("Mirror read failed.");
- return -EIO;
- }
-
m = bio_record->m;
DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1291,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
bd = &bio_record->details;
dm_bio_restore(bd, bio);
- bio_record->details.bi_bdev = NULL;
+ bio->bi_error = 0;
queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE;
@@ -1299,9 +1286,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
DMERR("All replicated volumes dead, failing I/O");
}
-out:
- bio_record->details.bi_bdev = NULL;
-
return error;
}
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index dc75bea0d541..1d0d2adc050a 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -856,8 +856,11 @@ int dm_old_init_request_queue(struct mapped_device *md)
kthread_init_worker(&md->kworker);
md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
"kdmwork-%s", dm_device_name(md));
- if (IS_ERR(md->kworker_task))
- return PTR_ERR(md->kworker_task);
+ if (IS_ERR(md->kworker_task)) {
+ int error = PTR_ERR(md->kworker_task);
+ md->kworker_task = NULL;
+ return error;
+ }
elv_register_queue(md->queue);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3e407a9cde1f..c4b53b332607 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -695,37 +695,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,
tgt->type = dm_get_target_type(type);
if (!tgt->type) {
- DMERR("%s: %s: unknown target type", dm_device_name(t->md),
- type);
+ DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
return -EINVAL;
}
if (dm_target_needs_singleton(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: target type %s must appear alone in table",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "singleton target type must appear alone in table";
+ goto bad;
}
t->singleton = true;
}
if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
- DMERR("%s: target type %s may not be included in read-only tables",
- dm_device_name(t->md), type);
- return -EINVAL;
+ tgt->error = "target type may not be included in a read-only table";
+ goto bad;
}
if (t->immutable_target_type) {
if (t->immutable_target_type != tgt->type) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), t->immutable_target_type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
} else if (dm_target_is_immutable(tgt->type)) {
if (t->num_targets) {
- DMERR("%s: immutable target type %s cannot be mixed with other target types",
- dm_device_name(t->md), tgt->type->name);
- return -EINVAL;
+ tgt->error = "immutable target type cannot be mixed with other target types";
+ goto bad;
}
t->immutable_target_type = tgt->type;
}
@@ -740,7 +735,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
*/
if (!adjoin(t, tgt)) {
tgt->error = "Gap in table";
- r = -EINVAL;
goto bad;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 147af9536d0c..ef7bf1dd6900 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1423,8 +1423,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->bs)
bioset_free(md->bs);
- cleanup_srcu_struct(&md->io_barrier);
-
if (md->disk) {
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
@@ -1436,6 +1434,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
if (md->queue)
blk_cleanup_queue(md->queue);
+ cleanup_srcu_struct(&md->io_barrier);
+
if (md->bdev) {
bdput(md->bdev);
md->bdev = NULL;
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 4769469fe842..2c9232ef7baa 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
}
/* Get user pages for DMA Xfer */
- err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0,
- 1, dma->map);
+ err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
+ dma->map, FOLL_FORCE);
if (user_dma.page_count != err) {
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index b094054cda6e..f7299d3d8244 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -76,11 +76,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
/* Get user pages for DMA Xfer */
y_pages = get_user_pages_unlocked(y_dma.uaddr,
- y_dma.page_count, 0, 1, &dma->map[0]);
+ y_dma.page_count, &dma->map[0], FOLL_FORCE);
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
if (y_pages == y_dma.page_count) {
uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
- uv_dma.page_count, 0, 1, &dma->map[y_pages]);
+ uv_dma.page_count, &dma->map[y_pages],
+ FOLL_FORCE);
}
if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index e668dde6d857..a31b95cb3b09 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -214,7 +214,7 @@ static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
if (!vec)
return -ENOMEM;
- ret = get_vaddr_frames(virtp, 1, true, false, vec);
+ ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
if (ret != 1) {
frame_vector_destroy(vec);
return -EINVAL;
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 29b3436d0910..367523a3c774 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -27,7 +27,7 @@ config VIDEO_FIXED_MINOR_RANGES
config VIDEO_PCI_SKELETON
tristate "Skeleton PCI V4L2 driver"
- depends on PCI && BUILD_DOCSRC
+ depends on PCI
depends on VIDEO_V4L2 && VIDEOBUF2_CORE
depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG
---help---
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index f300f060b3f3..1db0af6c7f94 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
{
unsigned long first, last;
int err, rw = 0;
+ unsigned int flags = FOLL_FORCE;
dma->direction = direction;
switch (dma->direction) {
@@ -178,12 +179,14 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
if (NULL == dma->pages)
return -ENOMEM;
+ if (rw == READ)
+ flags |= FOLL_WRITE;
+
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
- rw == READ, 1, /* force */
- dma->pages, NULL);
+ flags, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 3c3b517f1d1c..1cd322e939c7 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -42,6 +42,10 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
unsigned long first, last;
unsigned long nr;
struct frame_vector *vec;
+ unsigned int flags = FOLL_FORCE;
+
+ if (write)
+ flags |= FOLL_WRITE;
first = start >> PAGE_SHIFT;
last = (start + length - 1) >> PAGE_SHIFT;
@@ -49,7 +53,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index d34bc3530385..2e3cf012ef48 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
int rc;
if (!host->req) {
+ pm_runtime_get_sync(ms_dev(host));
do {
rc = memstick_next_req(msh, &host->req);
dev_dbg(ms_dev(host), "next req %d\n", rc);
@@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
host->req->error);
}
} while (!rc);
+ pm_runtime_put(ms_dev(host));
}
}
@@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
__func__, param, value);
+ pm_runtime_get_sync(ms_dev(host));
mutex_lock(&ucr->dev_mutex);
err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
@@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
}
out:
mutex_unlock(&ucr->dev_mutex);
+ pm_runtime_put(ms_dev(host));
/* power-on delay */
if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON)
@@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
int err;
for (;;) {
+ pm_runtime_get_sync(ms_dev(host));
mutex_lock(&ucr->dev_mutex);
/* Check pending MS card changes */
@@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
}
poll_again:
+ pm_runtime_put(ms_dev(host));
if (host->eject)
break;
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index f3d34b941f85..2e5233b60971 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -229,6 +229,14 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
if (ctx->status == STARTED)
goto out; /* already started */
+ /*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc)
+ goto out;
+
if (task) {
ctx->pid = get_task_pid(task, PIDTYPE_PID);
ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
@@ -239,7 +247,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
cxl_ctx_get();
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
+ put_pid(ctx->glpid);
put_pid(ctx->pid);
+ ctx->glpid = ctx->pid = NULL;
+ cxl_adapter_context_put(ctx->afu->adapter);
cxl_ctx_put();
goto out;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index c466ee2b0c97..5e506c19108a 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -238,6 +238,9 @@ int __detach_context(struct cxl_context *ctx)
put_pid(ctx->glpid);
cxl_ctx_put();
+
+ /* Decrease the attached context count on the adapter */
+ cxl_adapter_context_put(ctx->afu->adapter);
return 0;
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 01d372aba131..a144073593fa 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -618,6 +618,14 @@ struct cxl {
bool perst_select_user;
bool perst_same_image;
bool psl_timebase_synced;
+
+ /*
+ * number of contexts mapped on to this card. Possible values are:
+ * >0: Number of contexts mapped and new one can be mapped.
+ * 0: No active contexts and new ones can be mapped.
+ * -1: No contexts mapped and new ones cannot be mapped.
+ */
+ atomic_t contexts_num;
};
int cxl_pci_alloc_one_irq(struct cxl *adapter);
@@ -944,4 +952,20 @@ bool cxl_pci_is_vphb_device(struct pci_dev *dev);
/* decode AFU error bits in the PSL register PSL_SERR_An */
void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr);
+
+/*
+ * Increments the number of attached contexts on an adapter.
+ * In case an adapter_context_lock is taken the return -EBUSY.
+ */
+int cxl_adapter_context_get(struct cxl *adapter);
+
+/* Decrements the number of attached contexts on an adapter */
+void cxl_adapter_context_put(struct cxl *adapter);
+
+/* If no active contexts then prevents contexts from being attached */
+int cxl_adapter_context_lock(struct cxl *adapter);
+
+/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */
+void cxl_adapter_context_unlock(struct cxl *adapter);
+
#endif
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 5fb9894b157f..77080cc5fa0a 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -194,6 +194,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
/*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc) {
+ afu_release_irqs(ctx, ctx);
+ goto out;
+ }
+
+ /*
* We grab the PID here and not in the file open to allow for the case
* where a process (master, some daemon, etc) has opened the chardev on
* behalf of another process, so the AFU's mm gets bound to the process
@@ -205,11 +215,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
amr))) {
afu_release_irqs(ctx, ctx);
+ cxl_adapter_context_put(ctx->afu->adapter);
+ put_pid(ctx->glpid);
+ put_pid(ctx->pid);
+ ctx->glpid = ctx->pid = NULL;
goto out;
}
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 9aa58a77a24d..3e102cd6ed91 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -1152,6 +1152,9 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic
if ((rc = cxl_sysfs_adapter_add(adapter)))
goto err_put1;
+ /* release the context lock as the adapter is configured */
+ cxl_adapter_context_unlock(adapter);
+
return adapter;
err_put1:
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index d9be23b24aa3..62e0dfb5f15b 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -243,8 +243,10 @@ struct cxl *cxl_alloc_adapter(void)
if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
goto err2;
- return adapter;
+ /* start with context lock taken */
+ atomic_set(&adapter->contexts_num, -1);
+ return adapter;
err2:
cxl_remove_adapter_nr(adapter);
err1:
@@ -286,6 +288,44 @@ int cxl_afu_select_best_mode(struct cxl_afu *afu)
return 0;
}
+int cxl_adapter_context_get(struct cxl *adapter)
+{
+ int rc;
+
+ rc = atomic_inc_unless_negative(&adapter->contexts_num);
+ return rc >= 0 ? 0 : -EBUSY;
+}
+
+void cxl_adapter_context_put(struct cxl *adapter)
+{
+ atomic_dec_if_positive(&adapter->contexts_num);
+}
+
+int cxl_adapter_context_lock(struct cxl *adapter)
+{
+ int rc;
+ /* no active contexts -> contexts_num == 0 */
+ rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1);
+ return rc ? -EBUSY : 0;
+}
+
+void cxl_adapter_context_unlock(struct cxl *adapter)
+{
+ int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0);
+
+ /*
+ * contexts lock taken -> contexts_num == -1
+ * If not true then show a warning and force reset the lock.
+ * This will happen when context_unlock was requested without
+ * doing a context_lock.
+ */
+ if (val != -1) {
+ atomic_set(&adapter->contexts_num, 0);
+ WARN(1, "Adapter context unlocked with %d active contexts",
+ val);
+ }
+}
+
static int __init init_cxl(void)
{
int rc = 0;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 7afad8477ad5..e96be9ca4e60 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1487,6 +1487,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = cxl_native_register_psl_err_irq(adapter)))
goto err;
+ /* Release the context lock as adapter is configured */
+ cxl_adapter_context_unlock(adapter);
return 0;
err:
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index b043c20f158f..a8b6d6a635e9 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -75,12 +75,31 @@ static ssize_t reset_adapter_store(struct device *device,
int val;
rc = sscanf(buf, "%i", &val);
- if ((rc != 1) || (val != 1))
+ if ((rc != 1) || (val != 1 && val != -1))
return -EINVAL;
- if ((rc = cxl_ops->adapter_reset(adapter)))
- return rc;
- return count;
+ /*
+ * See if we can lock the context mapping that's only allowed
+ * when there are no contexts attached to the adapter. Once
+ * taken this will also prevent any context from getting activated.
+ */
+ if (val == 1) {
+ rc = cxl_adapter_context_lock(adapter);
+ if (rc)
+ goto out;
+
+ rc = cxl_ops->adapter_reset(adapter);
+ /* In case reset failed release context lock */
+ if (rc)
+ cxl_adapter_context_unlock(adapter);
+
+ } else if (val == -1) {
+ /* Perform a forced adapter reset */
+ rc = cxl_ops->adapter_reset(adapter);
+ }
+
+out:
+ return rc ? rc : count;
}
static ssize_t load_image_on_perst_show(struct device *device,
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 8a679ecc8fd1..fc2794b513fa 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (copy_from_user(sgl->lpage, user_addr + user_size -
sgl->lpage_size, sgl->lpage_size)) {
rc = -EFAULT;
- goto err_out1;
+ goto err_out2;
}
}
return 0;
+ err_out2:
+ __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
+ sgl->lpage_dma_addr);
+ sgl->lpage = NULL;
+ sgl->lpage_dma_addr = 0;
err_out1:
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
+ sgl->fpage = NULL;
+ sgl->fpage_dma_addr = 0;
err_out:
__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
sgl->sgl_dma_addr);
+ sgl->sgl = NULL;
+ sgl->sgl_dma_addr = 0;
+ sgl->sgl_size = 0;
return -ENOMEM;
}
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index e6e5e55a12ed..60415a2bfcbd 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -981,11 +981,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
hisr = mei_txe_br_reg_read(hw, HISR_REG);
aliveness = mei_txe_aliveness_get(dev);
- if (hhisr & IPC_HHIER_SEC && aliveness)
+ if (hhisr & IPC_HHIER_SEC && aliveness) {
ipc_isr = mei_txe_sec_reg_read_silent(hw,
SEC_IPC_HOST_INT_STATUS_REG);
- else
+ } else {
ipc_isr = 0;
+ hhisr &= ~IPC_HHIER_SEC;
+ }
generated = generated ||
(hisr & HISR_INT_STS_MSK) ||
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index e0203b1a20fd..f806a4471eb9 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1396,8 +1396,7 @@ retry:
pinned_pages->nr_pages = get_user_pages(
(u64)addr,
nr_pages,
- !!(prot & SCIF_PROT_WRITE),
- 0,
+ (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
pinned_pages->pages,
NULL);
up_write(&mm->mmap_sem);
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index a2d97b9b17e3..6fb773dbcd0c 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -198,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
#else
*pageshift = PAGE_SHIFT;
#endif
- if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0)
+ if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
return -EFAULT;
*paddr = page_to_phys(page);
put_page(page);
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 1525870f460a..33741ad4a74a 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -283,7 +283,7 @@ static void gru_unload_mm_tracker(struct gru_state *gru,
spin_lock(&gru->gs_asid_lock);
BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap);
asids->mt_ctxbitmap ^= ctxbitmap;
- gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum 0x%d, asidmap 0x%lx\n",
+ gru_dbg(grudev, "gid %d, gts %p, gms %p, ctxnum %d, asidmap 0x%lx\n",
gru->gs_gid, gts, gms, gts->ts_ctxnum, gms->ms_asidmap[0]);
spin_unlock(&gru->gs_asid_lock);
spin_unlock(&gms->ms_asid_lock);
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index a8cee33ae8d2..b3fa738ae005 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -431,6 +431,12 @@ int vmci_doorbell_create(struct vmci_handle *handle,
if (vmci_handle_is_invalid(*handle)) {
u32 context_id = vmci_get_context_id();
+ if (context_id == VMCI_INVALID_ID) {
+ pr_warn("Failed to get context ID\n");
+ result = VMCI_ERROR_NO_RESOURCES;
+ goto free_mem;
+ }
+
/* Let resource code allocate a free ID for us */
new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
} else {
@@ -525,7 +531,7 @@ int vmci_doorbell_destroy(struct vmci_handle handle)
entry = container_of(resource, struct dbell_entry, resource);
- if (vmci_guest_code_active()) {
+ if (!hlist_unhashed(&entry->node)) {
int result;
dbell_index_table_remove(entry);
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 896be150e28f..d7eaf1eb11e7 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.4.0-k");
+MODULE_VERSION("1.1.5.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c3335112e68c..709a872ed484 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -46,6 +46,7 @@
#include <asm/uaccess.h>
#include "queue.h"
+#include "block.h"
MODULE_ALIAS("mmc:block");
#ifdef MODULE_PARAM_PREFIX
@@ -1786,7 +1787,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
struct mmc_blk_data *md = mq->data;
struct mmc_packed *packed = mqrq->packed;
bool do_rel_wr, do_data_tag;
- u32 *packed_cmd_hdr;
+ __le32 *packed_cmd_hdr;
u8 hdr_blocks;
u8 i = 1;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 3c15a75bae86..342f1e3f301e 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -31,7 +31,7 @@ enum mmc_packed_type {
struct mmc_packed {
struct list_head list;
- u32 cmd_hdr[1024];
+ __le32 cmd_hdr[1024];
unsigned int blocks;
u8 nr_entries;
u8 retries;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 3486bc7fbb64..39fc5b2b96c5 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1263,6 +1263,16 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+
+ /* If fails try again during next card power cycle */
+ if (err)
+ goto out_err;
+
err = mmc_select_bus_width(card);
if (err < 0)
goto out_err;
@@ -1272,6 +1282,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
if (err)
goto out_err;
+ mmc_set_clock(host, card->ext_csd.hs_max_dtr);
+
err = mmc_switch_status(card);
if (err)
goto out_err;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 4106295527b9..6e9c0f8fddb1 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
dev_dbg(sdmmc_dev(host), "%s\n", __func__);
mutex_lock(&ucr->dev_mutex);
- if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
- mutex_unlock(&ucr->dev_mutex);
- return;
- }
-
sd_set_power_mode(host, ios->power_mode);
sd_set_bus_width(host, ios->bus_width);
sd_set_timing(host, ios->timing, &host->ddr_mode);
@@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
container_of(work, struct rtsx_usb_sdmmc, led_work);
struct rtsx_ucr *ucr = host->ucr;
+ pm_runtime_get_sync(sdmmc_dev(host));
mutex_lock(&ucr->dev_mutex);
if (host->led.brightness == LED_OFF)
@@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
rtsx_usb_turn_on_led(ucr);
mutex_unlock(&ucr->dev_mutex);
+ pm_runtime_put(sdmmc_dev(host));
}
#endif
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1f54fd8755c8..7123ef96ed18 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -346,7 +346,8 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 data;
- if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE ||
+ reg == SDHCI_INT_STATUS)) {
if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) {
/*
* Clear and then set D3CD bit to avoid missing the
@@ -555,6 +556,25 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
esdhc_clrset_le(host, 0xffff, val, reg);
}
+static u8 esdhc_readb_le(struct sdhci_host *host, int reg)
+{
+ u8 ret;
+ u32 val;
+
+ switch (reg) {
+ case SDHCI_HOST_CONTROL:
+ val = readl(host->ioaddr + reg);
+
+ ret = val & SDHCI_CTRL_LED;
+ ret |= (val >> 5) & SDHCI_CTRL_DMA_MASK;
+ ret |= (val & ESDHC_CTRL_4BITBUS);
+ ret |= (val & ESDHC_CTRL_8BITBUS) << 3;
+ return ret;
+ }
+
+ return readb(host->ioaddr + reg);
+}
+
static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -947,6 +967,7 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
+ .read_b = esdhc_readb_le,
.write_l = esdhc_writel_le,
.write_w = esdhc_writew_le,
.write_b = esdhc_writeb_le,
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index da8e40af6f85..410a55b1c25f 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -250,7 +250,7 @@ static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc,
writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER);
}
-void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
+static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
{
u8 ctrl;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -265,6 +265,28 @@ void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
}
}
+static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_180:
+ /*
+ * Plese don't switch to 1V8 as arasan,5.1 doesn't
+ * actually refer to this setting to indicate the
+ * signal voltage and the state machine will be broken
+ * actually if we force to enable 1V8. That's something
+ * like broken quirk but we could work around here.
+ */
+ return 0;
+ case MMC_SIGNAL_VOLTAGE_330:
+ case MMC_SIGNAL_VOLTAGE_120:
+ /* We don't support 3V3 and 1V2 */
+ break;
+ }
+
+ return -EINVAL;
+}
+
static struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -661,6 +683,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_arasan_hs400_enhanced_strobe;
+ host->mmc_host_ops.start_signal_voltage_switch =
+ sdhci_arasan_voltage_switch;
}
ret = sdhci_add_host(host);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 72a1f1f5180a..1d9e00a00e9f 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -32,6 +32,14 @@
#include "sdhci-pci.h"
#include "sdhci-pci-o2micro.h"
+static int sdhci_pci_enable_dma(struct sdhci_host *host);
+static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width);
+static void sdhci_pci_hw_reset(struct sdhci_host *host);
+static int sdhci_pci_select_drive_strength(struct sdhci_host *host,
+ struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
+
/*****************************************************************************\
* *
* Hardware specific quirk handling *
@@ -390,6 +398,45 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
+#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
+#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
+
+static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ int cntr;
+ u8 reg;
+
+ sdhci_set_power(host, mode, vdd);
+
+ if (mode == MMC_POWER_OFF)
+ return;
+
+ /*
+ * Bus power might not enable after D3 -> D0 transition due to the
+ * present state not yet having propagated. Retry for up to 2ms.
+ */
+ for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
+ reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ if (reg & SDHCI_POWER_ON)
+ break;
+ udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
+ reg |= SDHCI_POWER_ON;
+ sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
+ }
+}
+
+static const struct sdhci_ops sdhci_intel_byt_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_power = sdhci_intel_set_power,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_pci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_pci_hw_reset,
+ .select_drive_strength = sdhci_pci_select_drive_strength,
+};
+
static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
.allow_runtime_pm = true,
.probe_slot = byt_emmc_probe_slot,
@@ -397,6 +444,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
SDHCI_QUIRK2_STOP_WITH_TC,
+ .ops = &sdhci_intel_byt_ops,
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
@@ -405,6 +453,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
.probe_slot = byt_sdio_probe_slot,
+ .ops = &sdhci_intel_byt_ops,
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
@@ -415,6 +464,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
.allow_runtime_pm = true,
.own_cd_for_runtime_pm = true,
.probe_slot = byt_sd_probe_slot,
+ .ops = &sdhci_intel_byt_ops,
};
/* Define Host controllers for Intel Merrifield platform */
@@ -1648,7 +1698,9 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
}
host->hw_name = "PCI";
- host->ops = &sdhci_pci_ops;
+ host->ops = chip->fixes && chip->fixes->ops ?
+ chip->fixes->ops :
+ &sdhci_pci_ops;
host->quirks = chip->quirks;
host->quirks2 = chip->quirks2;
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 9c7c08b93223..6bccf56bc5ff 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -65,6 +65,8 @@ struct sdhci_pci_fixes {
int (*suspend) (struct sdhci_pci_chip *);
int (*resume) (struct sdhci_pci_chip *);
+
+ const struct sdhci_ops *ops;
};
struct sdhci_pci_slot {
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index dd1938d341f7..d0f5c05fbc19 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -315,7 +315,7 @@ static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
struct mmc_host *mmc = host->mmc;
u8 pwr = host->pwr;
- sdhci_set_power(host, mode, vdd);
+ sdhci_set_power_noreg(host, mode, vdd);
if (host->pwr == pwr)
return;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 48055666c655..71654b90227f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -687,7 +687,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
* host->clock is in Hz. target_timeout is in us.
* Hence, us = 1000000 * cycles / Hz. Round up.
*/
- val = 1000000 * data->timeout_clks;
+ val = 1000000ULL * data->timeout_clks;
if (do_div(val, host->clock))
target_timeout++;
target_timeout += val;
@@ -1077,6 +1077,10 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
/* Initially, a command has no error */
cmd->error = 0;
+ if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
+ cmd->opcode == MMC_STOP_TRANSMISSION)
+ cmd->flags |= MMC_RSP_BUSY;
+
/* Wait max 10 ms */
timeout = 10;
@@ -1390,8 +1394,8 @@ static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
}
-void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
+void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
{
u8 pwr = 0;
@@ -1455,20 +1459,17 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
mdelay(10);
}
}
-EXPORT_SYMBOL_GPL(sdhci_set_power);
+EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
-static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
{
- struct mmc_host *mmc = host->mmc;
-
- if (host->ops->set_power)
- host->ops->set_power(host, mode, vdd);
- else if (!IS_ERR(mmc->supply.vmmc))
- sdhci_set_power_reg(host, mode, vdd);
+ if (IS_ERR(host->mmc->supply.vmmc))
+ sdhci_set_power_noreg(host, mode, vdd);
else
- sdhci_set_power(host, mode, vdd);
+ sdhci_set_power_reg(host, mode, vdd);
}
+EXPORT_SYMBOL_GPL(sdhci_set_power);
/*****************************************************************************\
* *
@@ -1609,7 +1610,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
- __sdhci_set_power(host, ios->power_mode, ios->vdd);
+ if (host->ops->set_power)
+ host->ops->set_power(host, ios->power_mode, ios->vdd);
+ else
+ sdhci_set_power(host, ios->power_mode, ios->vdd);
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -2409,7 +2413,7 @@ static void sdhci_timeout_data_timer(unsigned long data)
* *
\*****************************************************************************/
-static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
+static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
{
if (!host->cmd) {
/*
@@ -2453,11 +2457,6 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
return;
}
- if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
- !(host->cmd->flags & MMC_RSP_BUSY) && !host->data &&
- host->cmd->opcode == MMC_STOP_TRANSMISSION)
- *mask &= ~SDHCI_INT_DATA_END;
-
if (intmask & SDHCI_INT_RESPONSE)
sdhci_finish_command(host);
}
@@ -2680,8 +2679,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
}
if (intmask & SDHCI_INT_CMD_MASK)
- sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
- &intmask);
+ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
if (intmask & SDHCI_INT_DATA_MASK)
sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index c722cd23205c..766df17fb7eb 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -683,6 +683,8 @@ u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
+void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 95c4048a371e..388e46be6ad9 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -741,6 +741,7 @@ static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
goto out_put;
}
+ vid_hdr = ubi_get_vid_hdr(vidb);
ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
mutex_lock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index d6384d965788..c1f5c29e458e 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -287,7 +287,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
/* new_aeb is newer */
if (cmp_res & 1) {
- victim = ubi_alloc_aeb(ai, aeb->ec, aeb->pnum);
+ victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
if (!victim)
return -ENOMEM;
@@ -707,11 +707,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
fmvhdr->vol_type,
be32_to_cpu(fmvhdr->last_eb_bytes));
- if (!av)
- goto fail_bad;
- if (PTR_ERR(av) == -EINVAL) {
- ubi_err(ubi, "volume (ID %i) already exists",
- fmvhdr->vol_id);
+ if (IS_ERR(av)) {
+ if (PTR_ERR(av) == -EEXIST)
+ ubi_err(ubi, "volume (ID %i) already exists",
+ fmvhdr->vol_id);
+
goto fail_bad;
}
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 76fb8552c9d9..ef63d24fef81 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -256,6 +256,7 @@ static const struct of_device_id b53_mmap_of_table[] = {
{ .compatible = "brcm,bcm63xx-switch" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
static struct platform_driver b53_mmap_driver = {
.probe = b53_mmap_probe,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e218887f18b7..e3ee27ce13dd 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1133,6 +1133,20 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
return 0;
}
+static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
+{
+ struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+
+ /* For a kernel about to be kexec'd we want to keep the GPHY on for a
+ * successful MDIO bus scan to occur. If we did turn off the GPHY
+ * before (e.g: port_disable), this will also power it back on.
+ *
+ * Do not rely on kexec_in_progress, just power the PHY on.
+ */
+ if (priv->hw_params.num_gphy == 1)
+ bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+}
+
#ifdef CONFIG_PM_SLEEP
static int bcm_sf2_suspend(struct device *dev)
{
@@ -1158,10 +1172,12 @@ static const struct of_device_id bcm_sf2_of_match[] = {
{ .compatible = "brcm,bcm7445-switch-v4.0" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
static struct platform_driver bcm_sf2_driver = {
.probe = bcm_sf2_sw_probe,
.remove = bcm_sf2_sw_remove,
+ .shutdown = bcm_sf2_sw_shutdown,
.driver = {
.name = "brcm-sf2",
.of_match_table = bcm_sf2_of_match,
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 99c40552ea90..b59aa3541270 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1359,6 +1359,7 @@ static const struct of_device_id nb8800_dt_ids[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, nb8800_dt_ids);
static int nb8800_probe(struct platform_device *pdev)
{
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 7e513cacb57a..5c7acef1de2e 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1126,7 +1126,8 @@ out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
- phy_disconnect(phydev);
+ if (priv->has_phy)
+ phy_disconnect(phydev);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 856379cbb402..31ca204b38d2 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1449,7 +1449,7 @@ static int bgmac_phy_connect(struct bgmac *bgmac)
phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- dev_err(bgmac->dev, "PHY connecton failed\n");
+ dev_err(bgmac->dev, "PHY connection failed\n");
return PTR_ERR(phy_dev);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2a5df3f71e9f..eab49ff4eecd 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -271,22 +271,25 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
static u32
bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
{
+ unsigned long flags;
u32 val;
- spin_lock_bh(&bp->indirect_lock);
+ spin_lock_irqsave(&bp->indirect_lock, flags);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
return val;
}
static void
bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
{
- spin_lock_bh(&bp->indirect_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->indirect_lock, flags);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
}
static void
@@ -304,8 +307,10 @@ bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
static void
bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
{
+ unsigned long flags;
+
offset += cid_addr;
- spin_lock_bh(&bp->indirect_lock);
+ spin_lock_irqsave(&bp->indirect_lock, flags);
if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
int i;
@@ -322,7 +327,7 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
BNX2_WR(bp, BNX2_CTX_DATA, val);
}
- spin_unlock_bh(&bp->indirect_lock);
+ spin_unlock_irqrestore(&bp->indirect_lock, flags);
}
#ifdef BCM_CNIC
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 67b6180bdbf6..ab990da677d5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15244,7 +15244,7 @@ static void bnx2x_init_cyclecounter(struct bnx2x *bp)
memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
bp->cyclecounter.read = bnx2x_cyclecounter_read;
bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
- bp->cyclecounter.shift = 1;
+ bp->cyclecounter.shift = 0;
bp->cyclecounter.mult = 1;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index b0bb23f95beb..c0cc2ee77be7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4055,7 +4055,7 @@ static void cfg_queues(struct adapter *adap)
* capped by the number of available cores.
*/
if (n10g) {
- i = num_online_cpus();
+ i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
s->ofldqsets = roundup(i, adap->params.nports);
} else {
s->ofldqsets = adap->params.nports;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 0945fa49a5dd..2471ff465d5c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -135,15 +135,17 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
}
static int alloc_uld_rxqs(struct adapter *adap,
- struct sge_uld_rxq_info *rxq_info,
- unsigned int nq, unsigned int offset, bool lro)
+ struct sge_uld_rxq_info *rxq_info, bool lro)
{
struct sge *s = &adap->sge;
- struct sge_ofld_rxq *q = rxq_info->uldrxq + offset;
- unsigned short *ids = rxq_info->rspq_id + offset;
- unsigned int per_chan = nq / adap->params.nports;
+ unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
+ struct sge_ofld_rxq *q = rxq_info->uldrxq;
+ unsigned short *ids = rxq_info->rspq_id;
unsigned int bmap_idx = 0;
- int i, err, msi_idx;
+ unsigned int per_chan;
+ int i, err, msi_idx, que_idx = 0;
+
+ per_chan = rxq_info->nrxq / adap->params.nports;
if (adap->flags & USING_MSIX)
msi_idx = 1;
@@ -151,12 +153,18 @@ static int alloc_uld_rxqs(struct adapter *adap,
msi_idx = -((int)s->intrq.abs_id + 1);
for (i = 0; i < nq; i++, q++) {
+ if (i == rxq_info->nrxq) {
+ /* start allocation of concentrator queues */
+ per_chan = rxq_info->nciq / adap->params.nports;
+ que_idx = 0;
+ }
+
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
msi_idx = adap->msix_info_ulds[bmap_idx].idx;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
- adap->port[i / per_chan],
+ adap->port[que_idx++ / per_chan],
msi_idx,
q->fl.size ? &q->fl : NULL,
uldrx_handler,
@@ -165,29 +173,19 @@ static int alloc_uld_rxqs(struct adapter *adap,
if (err)
goto freeout;
if (msi_idx >= 0)
- rxq_info->msix_tbl[i + offset] = bmap_idx;
+ rxq_info->msix_tbl[i] = bmap_idx;
memset(&q->stats, 0, sizeof(q->stats));
if (ids)
ids[i] = q->rspq.abs_id;
}
return 0;
freeout:
- q = rxq_info->uldrxq + offset;
+ q = rxq_info->uldrxq;
for ( ; i; i--, q++) {
if (q->rspq.desc)
free_rspq_fl(adap, &q->rspq,
q->fl.size ? &q->fl : NULL);
}
-
- /* We need to free rxq also in case of ciq allocation failure */
- if (offset) {
- q = rxq_info->uldrxq + offset;
- for ( ; i; i--, q++) {
- if (q->rspq.desc)
- free_rspq_fl(adap, &q->rspq,
- q->fl.size ? &q->fl : NULL);
- }
- }
return err;
}
@@ -205,9 +203,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
return -ENOMEM;
}
- ret = !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) &&
- !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq,
- rxq_info->nrxq, lro));
+ ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
/* Tell uP to route control queue completions to rdma rspq */
if (adap->flags & FULL_INIT_DONE &&
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index 539de764bbd3..cbd68a8fe2e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -210,8 +210,10 @@ static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
/* Unbind queue from any existing class */
err = t4_sched_queue_unbind(pi, p);
- if (err)
+ if (err) {
+ t4_free_mem(qe);
goto out;
+ }
/* Bind queue to specified class */
memset(qe, 0, sizeof(*qe));
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index e572a527b18d..36bc2c71fba9 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -169,19 +169,28 @@ int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
struct vnic_dev *vdev = rq->vdev;
+ int i;
- iowrite32(0, &rq->ctrl->enable);
+ /* Due to a race condition with clearing RQ "mini-cache" in hw, we need
+ * to disable the RQ twice to guarantee that stale descriptors are not
+ * used when this RQ is re-enabled.
+ */
+ for (i = 0; i < 2; i++) {
+ iowrite32(0, &rq->ctrl->enable);
- /* Wait for HW to ACK disable request */
- for (wait = 0; wait < 1000; wait++) {
- if (!(ioread32(&rq->ctrl->running)))
- return 0;
- udelay(10);
- }
+ /* Wait for HW to ACK disable request */
+ for (wait = 20000; wait > 0; wait--)
+ if (!ioread32(&rq->ctrl->running))
+ break;
+ if (!wait) {
+ vdev_neterr(vdev, "Failed to disable RQ[%d]\n",
+ rq->index);
- vdev_neterr(vdev, "Failed to disable RQ[%d]\n", rq->index);
+ return -ETIMEDOUT;
+ }
+ }
- return -ETIMEDOUT;
+ return 0;
}
void vnic_rq_clean(struct vnic_rq *rq,
@@ -212,6 +221,11 @@ void vnic_rq_clean(struct vnic_rq *rq,
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
iowrite32(fetch_index, &rq->ctrl->posted_index);
+ /* Anytime we write fetch_index, we need to re-write 0 to rq->enable
+ * to re-sync internal VIC state.
+ */
+ iowrite32(0, &rq->ctrl->enable);
+
vnic_dev_clear_desc_ring(&rq->ring);
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f928e6f79c89..223f35cc034c 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -669,6 +669,7 @@ static const struct of_device_id nps_enet_dt_ids[] = {
{ .compatible = "ezchip,nps-mgt-enet" },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, nps_enet_dt_ids);
static struct platform_driver nps_enet_driver = {
.probe = nps_enet_probe,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 43b2839a3d11..01aebc0084a9 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1430,14 +1430,14 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_put(skb, pkt_len - 4);
data = skb->data;
+ if (!is_copybreak && need_swap)
+ swap_buffer(data, pkt_len);
+
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC)
data = skb_pull_inline(skb, 2);
#endif
- if (!is_copybreak && need_swap)
- swap_buffer(data, pkt_len);
-
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
if (fep->bufdesc_ex)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index e28d960997af..2d0cb609adc3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -207,6 +207,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
int ret;
char *mac_addr = (char *)addr;
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ u8 port_num;
assert(mac_cb);
@@ -221,8 +222,11 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
return ret;
}
- ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM,
- mac_addr, true);
+ ret = hns_mac_get_inner_port_num(mac_cb, handle->vf_id, &port_num);
+ if (ret)
+ return ret;
+
+ ret = hns_mac_set_multi(mac_cb, port_num, mac_addr, true);
if (ret)
dev_err(handle->owner_dev,
"mac add mul_mac:%pM port%d fail, ret = %#x!\n",
@@ -678,9 +682,6 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
ret = -EINVAL;
}
- if (!ret)
- hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en);
-
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index d8e99416ab24..55cbb6ce733c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -141,9 +141,10 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
*@port_num:port number
*
*/
-static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
- u8 vmid, u8 *port_num)
+int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num)
{
+ int q_num_per_vf, vf_num_per_port;
+ int vm_queue_id;
u8 tmp_port;
if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
@@ -174,6 +175,12 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
return -EINVAL;
}
+ q_num_per_vf = mac_cb->dsaf_dev->rcb_common[0]->max_q_per_vf;
+ vf_num_per_port = mac_cb->dsaf_dev->rcb_common[0]->max_vfn;
+
+ vm_queue_id = vmid * q_num_per_vf +
+ vf_num_per_port * q_num_per_vf * mac_cb->mac_id;
+
switch (mac_cb->dsaf_dev->dsaf_mode) {
case DSAF_MODE_ENABLE_FIX:
tmp_port = 0;
@@ -193,7 +200,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
case DSAF_MODE_DISABLE_6PORT_2VM:
case DSAF_MODE_DISABLE_6PORT_4VM:
case DSAF_MODE_DISABLE_6PORT_16VM:
- tmp_port = vmid;
+ tmp_port = vm_queue_id;
break;
default:
dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 4cbdf14f5c16..d3a1f72ece0e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -461,5 +461,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb);
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status);
void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en);
+int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
+ u8 vmid, u8 *port_num);
#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 8e5b3f51b47b..8ea3d95fa483 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -760,16 +760,6 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
DSAF_CFG_MIX_MODE_S, !!en);
}
-void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
-{
- if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
- dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG)
- return;
-
- dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
- DSAFV2_SERDES_LBK_EN_B, !!en);
-}
-
/**
* hns_dsaf_tbl_stat_en - tbl
* @dsaf_id: dsa fabric id
@@ -2761,6 +2751,7 @@ static const struct of_device_id g_dsaf_match[] = {
{.compatible = "hisilicon,hns-dsaf-v2"},
{}
};
+MODULE_DEVICE_TABLE(of, g_dsaf_match);
static struct platform_driver g_dsaf_driver = {
.probe = hns_dsaf_probe,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 35df187e66f1..c494fc52be74 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -466,6 +466,5 @@ void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
u32 *en);
int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
u32 en);
-void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index ef1107777c08..f0ed80d6ef9c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -543,6 +543,22 @@ int hns_rcb_set_coalesce_usecs(
"error: coalesce_usecs setting supports 0~1023us\n");
return -EINVAL;
}
+
+ if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+ if (timeout == 0)
+ /* set timeout to 0, Disable gap time */
+ dsaf_set_reg_field(rcb_common->io_base,
+ RCB_INT_GAP_TIME_REG + port_idx * 4,
+ PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
+ 0);
+ else
+ /* set timeout non 0, restore gap time to 1 */
+ dsaf_set_reg_field(rcb_common->io_base,
+ RCB_INT_GAP_TIME_REG + port_idx * 4,
+ PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
+ 1);
+ }
+
hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 4b8b803822d1..878950a42e6c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -417,6 +417,7 @@
#define RCB_CFG_OVERTIME_REG 0x9300
#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
+#define RCB_INT_GAP_TIME_REG 0x9400
#define RCB_PORT_CFG_OVERTIME_REG 0x9430
#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
@@ -898,6 +899,9 @@
#define PPE_CNT_CLR_CE_B 0
#define PPE_CNT_CLR_SNAP_EN_B 1
+#define PPE_INT_GAPTIME_B 0
+#define PPE_INT_GAPTIME_M 0x3ff
+
#define PPE_COMMON_CNT_CLR_CE_B 0
#define PPE_COMMON_CNT_CLR_SNAP_EN_B 1
#define RCB_COM_TSO_MODE_B 0
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index a7208673116c..60831a2ac86b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -575,7 +575,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
struct sk_buff *skb;
struct hnae_desc *desc;
struct hnae_desc_cb *desc_cb;
- struct ethhdr *eh;
unsigned char *va;
int bnum, length, i;
int pull_len;
@@ -601,7 +600,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
- skb_reset_mac_header(skb);
prefetchw(skb->data);
length = le16_to_cpu(desc->rx.pkt_len);
@@ -683,14 +681,6 @@ out_bnum_err:
return -EFAULT;
}
- /* filter out multicast pkt with the same src mac as this port */
- eh = eth_hdr(skb);
- if (unlikely(is_multicast_ether_addr(eh->h_dest) &&
- ether_addr_equal(ndev->dev_addr, eh->h_source))) {
- dev_kfree_skb_any(skb);
- return -EFAULT;
- }
-
ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len;
@@ -748,25 +738,37 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
ndev->last_rx = jiffies;
}
+static int hns_desc_unused(struct hnae_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+
+ return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
+}
+
static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
int budget, void *v)
{
struct hnae_ring *ring = ring_data->ring;
struct sk_buff *skb;
- int num, bnum, ex_num;
+ int num, bnum;
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int recv_pkts, recv_bds, clean_count, err;
+ int unused_count = hns_desc_unused(ring);
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
rmb(); /* make sure num taken effect before the other data is touched */
recv_pkts = 0, recv_bds = 0, clean_count = 0;
-recv:
+ num -= unused_count;
+
while (recv_pkts < budget && recv_bds < num) {
/* reuse or realloc buffers */
- if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
- hns_nic_alloc_rx_buffers(ring_data, clean_count);
+ if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ hns_nic_alloc_rx_buffers(ring_data,
+ clean_count + unused_count);
clean_count = 0;
+ unused_count = hns_desc_unused(ring);
}
/* poll one pkt */
@@ -787,21 +789,11 @@ recv:
recv_pkts++;
}
- /* make all data has been write before submit */
- if (recv_pkts < budget) {
- ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
-
- if (ex_num > clean_count) {
- num += ex_num - clean_count;
- rmb(); /*complete read rx ring bd number*/
- goto recv;
- }
- }
-
out:
/* make all data has been write before submit */
- if (clean_count > 0)
- hns_nic_alloc_rx_buffers(ring_data, clean_count);
+ if (clean_count + unused_count > 0)
+ hns_nic_alloc_rx_buffers(ring_data,
+ clean_count + unused_count);
return recv_pkts;
}
@@ -811,6 +803,8 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
struct hnae_ring *ring = ring_data->ring;
int num = 0;
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+
/* for hardware bug fixed */
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
@@ -822,6 +816,20 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
}
}
+static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int num = 0;
+
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+
+ if (num == 0)
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring, 0);
+ else
+ napi_schedule(&ring_data->napi);
+}
+
static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
int *bytes, int *pkts)
{
@@ -923,7 +931,11 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
- int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ int head;
+
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head != ring->next_to_clean) {
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
@@ -933,6 +945,18 @@ static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
}
}
+static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+
+ if (head == ring->next_to_clean)
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring, 0);
+ else
+ napi_schedule(&ring_data->napi);
+}
+
static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
@@ -964,10 +988,7 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
if (clean_complete >= 0 && clean_complete < budget) {
napi_complete(napi);
- ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
- ring_data->ring, 0);
- if (ring_data->fini_process)
- ring_data->fini_process(ring_data);
+ ring_data->fini_process(ring_data);
return 0;
}
@@ -1559,6 +1580,21 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
return stats;
}
+static u16
+hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ /* fix hardware broadcast/multicast packets queue loopback */
+ if (!AE_IS_VER1(priv->enet_ver) &&
+ is_multicast_ether_addr(eth_hdr->h_dest))
+ return 0;
+ else
+ return fallback(ndev, skb);
+}
+
static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_open = hns_nic_net_open,
.ndo_stop = hns_nic_net_stop,
@@ -1574,6 +1610,7 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_poll_controller = hns_nic_poll_controller,
#endif
.ndo_set_rx_mode = hns_nic_set_rx_mode,
+ .ndo_select_queue = hns_nic_select_queue,
};
static void hns_nic_update_link_status(struct net_device *netdev)
@@ -1735,7 +1772,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->queue_index = i;
rd->ring = &h->qs[i]->tx_ring;
rd->poll_one = hns_nic_tx_poll_one;
- rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL;
+ rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
+ hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
@@ -1747,7 +1785,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->ring = &h->qs[i - h->q_num]->rx_ring;
rd->poll_one = hns_nic_rx_poll_one;
rd->ex_process = hns_nic_rx_up_pro;
- rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL;
+ rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
+ hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 47e59bbfd061..87d5c94b2810 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -352,6 +352,13 @@ static int __lb_setup(struct net_device *ndev,
break;
}
+ if (!ret) {
+ if (loop == MAC_LOOP_NONE)
+ h->dev->ops->set_promisc_mode(
+ h, ndev->flags & IFF_PROMISC);
+ else
+ h->dev->ops->set_promisc_mode(h, 1);
+ }
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 33f4c483af0f..501eb2090ca6 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -563,6 +563,7 @@ static const struct of_device_id hns_mdio_match[] = {
{.compatible = "hisilicon,hns-mdio"},
{}
};
+MODULE_DEVICE_TABLE(of, hns_mdio_match);
static const struct acpi_device_id hns_mdio_acpi_match[] = {
{ "HISI0141", 0 },
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 657206be7ba9..d54405b46109 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1178,7 +1178,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
if (!scrq)
return NULL;
- scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2);
+ scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
if (!scrq->msgs) {
dev_warn(dev, "Couldn't allocate crq queue messages page\n");
@@ -1449,14 +1449,16 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
return rc;
req_rx_irq_failed:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ }
i = adapter->req_tx_queues;
req_tx_irq_failed:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ }
release_sub_crqs_no_irqs(adapter);
return rc;
}
@@ -3222,6 +3224,27 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
spin_unlock_irqrestore(&adapter->inflight_lock, flags);
}
+static void ibmvnic_xport_event(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter = container_of(work,
+ struct ibmvnic_adapter,
+ ibmvnic_xport);
+ struct device *dev = &adapter->vdev->dev;
+ long rc;
+
+ ibmvnic_free_inflight(adapter);
+ release_sub_crqs(adapter);
+ if (adapter->migrated) {
+ rc = ibmvnic_reenable_crq_queue(adapter);
+ if (rc)
+ dev_err(dev, "Error after enable rc=%ld\n", rc);
+ adapter->migrated = false;
+ rc = ibmvnic_send_crq_init(adapter);
+ if (rc)
+ dev_err(dev, "Error sending init rc=%ld\n", rc);
+ }
+}
+
static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -3257,15 +3280,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
dev_info(dev, "Re-enabling adapter\n");
adapter->migrated = true;
- ibmvnic_free_inflight(adapter);
- release_sub_crqs(adapter);
- rc = ibmvnic_reenable_crq_queue(adapter);
- if (rc)
- dev_err(dev, "Error after enable rc=%ld\n", rc);
- adapter->migrated = false;
- rc = ibmvnic_send_crq_init(adapter);
- if (rc)
- dev_err(dev, "Error sending init rc=%ld\n", rc);
+ schedule_work(&adapter->ibmvnic_xport);
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
dev_info(dev, "Backing device failover detected\n");
netif_carrier_off(netdev);
@@ -3274,8 +3289,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
/* The adapter lost the connection */
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
gen_crq->cmd);
- ibmvnic_free_inflight(adapter);
- release_sub_crqs(adapter);
+ schedule_work(&adapter->ibmvnic_xport);
}
return;
case IBMVNIC_CRQ_CMD_RSP:
@@ -3644,6 +3658,7 @@ static void handle_crq_init_rsp(struct work_struct *work)
goto task_failed;
netdev->real_num_tx_queues = adapter->req_tx_queues;
+ netdev->mtu = adapter->req_mtu;
netdev->min_mtu = adapter->min_mtu;
netdev->max_mtu = adapter->max_mtu;
@@ -3717,6 +3732,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
SET_NETDEV_DEV(netdev, &dev->dev);
INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+ INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
spin_lock_init(&adapter->stats_lock);
@@ -3784,6 +3800,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
+ netdev->mtu = adapter->req_mtu;
rc = register_netdev(netdev);
if (rc) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index bfc84c7d0e11..dd775d951b73 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -27,7 +27,7 @@
/**************************************************************************/
#define IBMVNIC_NAME "ibmvnic"
-#define IBMVNIC_DRIVER_VERSION "1.0"
+#define IBMVNIC_DRIVER_VERSION "1.0.1"
#define IBMVNIC_INVALID_MAP -1
#define IBMVNIC_STATS_TIMEOUT 1
/* basic structures plus 100 2k buffers */
@@ -1048,5 +1048,6 @@ struct ibmvnic_adapter {
u8 map_id;
struct work_struct vnic_crq_init;
+ struct work_struct ibmvnic_xport;
bool failover;
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 01cce5bab861..5a6f8518b4e1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -92,6 +92,7 @@
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 7fa535f57820..d78a4dc7b00b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4641,29 +4641,6 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
}
/**
- * i40e_pf_get_default_tc - Get bitmap for first enabled TC
- * @pf: PF being queried
- *
- * Return a bitmap for first enabled traffic class for this PF.
- **/
-static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
-{
- u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
- u8 i = 0;
-
- if (!enabled_tc)
- return 0x1; /* TC0 */
-
- /* Find the first enabled TC */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- if (enabled_tc & BIT(i))
- break;
- }
-
- return BIT(i);
-}
-
-/**
* i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
* @pf: PF being queried
*
@@ -4673,7 +4650,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
{
/* If DCB is not enabled for this PF then just return default TC */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
- return i40e_pf_get_default_tc(pf);
+ return I40E_DEFAULT_TRAFFIC_CLASS;
/* SFP mode we want PF to be enabled for all TCs */
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
@@ -4683,7 +4660,7 @@ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
if (pf->hw.func_caps.iscsi)
return i40e_get_iscsi_tc_map(pf);
else
- return i40e_pf_get_default_tc(pf);
+ return I40E_DEFAULT_TRAFFIC_CLASS;
}
/**
@@ -5029,7 +5006,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
if (v == pf->lan_vsi)
tc_map = i40e_pf_get_tc_map(pf);
else
- tc_map = i40e_pf_get_default_tc(pf);
+ tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
#ifdef I40E_FCOE
if (pf->vsi[v]->type == I40E_VSI_FCOE)
tc_map = i40e_get_fcoe_tc_map(pf);
@@ -5717,7 +5694,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
u8 type;
/* Not DCB capable or capability disabled */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return ret;
/* Ignore if event is not for Nearest Bridge */
@@ -7707,6 +7684,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
pf->msix_entries = NULL;
+ pci_disable_msix(pf->pdev);
return -ENODEV;
} else if (v_actual == I40E_MIN_MSIX) {
@@ -9047,7 +9025,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return 0;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
- nlflags, 0, 0, filter_mask, NULL);
+ 0, 0, nlflags, filter_mask, NULL);
}
/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cbd2cfa1b154..5e1f57c7ee1b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -9162,10 +9162,14 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
goto fwd_add_err;
fwd_adapter->pool = pool;
fwd_adapter->real_adapter = adapter;
- err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
- if (err)
- goto fwd_add_err;
- netif_tx_start_all_queues(vdev);
+
+ if (netif_running(pdev)) {
+ err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
+ if (err)
+ goto fwd_add_err;
+ netif_tx_start_all_queues(vdev);
+ }
+
return fwd_adapter;
fwd_add_err:
/* unwind counter and free adapter struct */
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 68675d83bdc5..8302c7e660ce 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2981,6 +2981,22 @@ static void set_params(struct mv643xx_eth_private *mp,
mp->txq_count = pd->tx_queue_count ? : 1;
}
+static int get_phy_mode(struct mv643xx_eth_private *mp)
+{
+ struct device *dev = mp->dev->dev.parent;
+ int iface = -1;
+
+ if (dev->of_node)
+ iface = of_get_phy_mode(dev->of_node);
+
+ /* Historical default if unspecified. We could also read/write
+ * the interface state in the PSC1
+ */
+ if (iface < 0)
+ iface = PHY_INTERFACE_MODE_GMII;
+ return iface;
+}
+
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
int phy_addr)
{
@@ -3007,7 +3023,7 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
"orion-mdio-mii", addr);
phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
- PHY_INTERFACE_MODE_GMII);
+ get_phy_mode(mp));
if (!IS_ERR(phydev)) {
phy_addr_set(mp, addr);
break;
@@ -3106,6 +3122,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
+ SET_NETDEV_DEV(dev, &pdev->dev);
mp = netdev_priv(dev);
platform_set_drvdata(pdev, mp);
@@ -3145,7 +3162,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (pd->phy_node) {
phydev = of_phy_connect(mp->dev, pd->phy_node,
mv643xx_eth_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ get_phy_mode(mp));
if (!phydev)
err = -ENODEV;
else
@@ -3203,12 +3220,15 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->priv_flags |= IFF_UNICAST_FLT;
dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
+<<<<<<< HEAD
/* MTU range: 64 - 9500 */
dev->min_mtu = 64;
dev->max_mtu = 9500;
SET_NETDEV_DEV(dev, &pdev->dev);
+=======
+>>>>>>> 2a26d99b251b8625d27aed14e97fc10707a3a81f
if (mp->shared->win_protect)
wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index b1cef7a0f7ca..e36bebcab3f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2469,6 +2469,7 @@ err_comm_admin:
kfree(priv->mfunc.master.slave_state);
err_comm:
iounmap(priv->mfunc.comm);
+ priv->mfunc.comm = NULL;
err_vhcr:
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
priv->mfunc.vhcr,
@@ -2537,6 +2538,13 @@ void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
int slave;
u32 slave_read;
+ /* If the comm channel has not yet been initialized,
+ * skip reporting the internal error event to all
+ * the communication channels.
+ */
+ if (!priv->mfunc.comm)
+ return;
+
/* Report an internal error event to all
* communication channels.
*/
@@ -2571,6 +2579,7 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
}
iounmap(priv->mfunc.comm);
+ priv->mfunc.comm = NULL;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 08fc5fc56d43..a5fc46bbcbe2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -245,8 +245,11 @@ static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 tmp_rounded =
+ roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
+ roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
- max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
+ max_val_cycles : tmp_rounded;
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 132cea655920..e3be7e44ff51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -127,7 +127,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
/* For TX we use the same irq per
ring we assigned for the RX */
struct mlx4_en_cq *rx_cq;
-
+ int xdp_index;
+
+ /* The xdp tx irq must align with the rx ring that forwards to
+ * it, so reindex these from 0. This should only happen when
+ * tx_ring_num is not a multiple of rx_ring_num.
+ */
+ xdp_index = (priv->xdp_ring_num - priv->tx_ring_num) + cq_idx;
+ if (xdp_index >= 0)
+ cq_idx = xdp_index;
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bf35ac4c1c61..58b749dd6125 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1733,6 +1733,13 @@ int mlx4_en_start_port(struct net_device *dev)
udp_tunnel_get_rx_info(dev);
priv->port_up = true;
+
+ /* Process all completions if exist to prevent
+ * the queues freezing if they are full
+ */
+ for (i = 0; i < priv->rx_ring_num; i++)
+ napi_schedule(&priv->rx_cq[i]->napi);
+
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -1910,8 +1917,9 @@ static void mlx4_en_clear_stats(struct net_device *dev)
struct mlx4_en_dev *mdev = priv->mdev;
int i;
- if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
- en_dbg(HW, priv, "Failed dumping statistics\n");
+ if (!mlx4_is_slave(mdev->dev))
+ if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
+ en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
@@ -2194,6 +2202,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
if (!shutdown)
free_netdev(dev);
+ dev->ethtool_ops = NULL;
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 5aa8b751f417..59473a0ebcdf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -166,7 +166,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
return PTR_ERR(mailbox);
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_WRAPPED);
+ MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -322,7 +322,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma,
in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL,
0, MLX4_CMD_DUMP_ETH_STATS,
- MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index b66e03d9711f..c06346a82496 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -118,6 +118,29 @@ mlx4_en_test_loopback_exit:
return !loopback_ok;
}
+static int mlx4_en_test_interrupts(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+ int i = 0;
+
+ err = mlx4_test_async(mdev->dev);
+ /* When not in MSI_X or slave, test only async */
+ if (!(mdev->dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(mdev->dev))
+ return err;
+
+ /* A loop over all completion vectors of current port,
+ * for each vector check whether it works by mapping command
+ * completions to that vector and performing a NOP command
+ */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ err = mlx4_test_interrupt(mdev->dev, priv->rx_cq[i]->vector);
+ if (err)
+ break;
+ }
+
+ return err;
+}
static int mlx4_en_test_link(struct mlx4_en_priv *priv)
{
@@ -151,7 +174,6 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_dev *mdev = priv->mdev;
int i, carrier_ok;
memset(buf, 0, sizeof(u64) * MLX4_EN_NUM_SELF_TEST);
@@ -177,7 +199,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
netif_carrier_on(dev);
}
- buf[0] = mlx4_test_interrupts(mdev->dev);
+ buf[0] = mlx4_en_test_interrupts(priv);
buf[1] = mlx4_en_test_link(priv);
buf[2] = mlx4_en_test_speed(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cf8f8a72a801..cd3638e6fe25 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1361,53 +1361,49 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
kfree(priv->eq_table.uar_map);
}
-/* A test that verifies that we can accept interrupts on all
- * the irq vectors of the device.
+/* A test that verifies that we can accept interrupts
+ * on the vector allocated for asynchronous events
+ */
+int mlx4_test_async(struct mlx4_dev *dev)
+{
+ return mlx4_NOP(dev);
+}
+EXPORT_SYMBOL(mlx4_test_async);
+
+/* A test that verifies that we can accept interrupts
+ * on the given irq vector of the tested port.
* Interrupts are checked using the NOP command.
*/
-int mlx4_test_interrupts(struct mlx4_dev *dev)
+int mlx4_test_interrupt(struct mlx4_dev *dev, int vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- int i;
int err;
- err = mlx4_NOP(dev);
- /* When not in MSI_X, there is only one irq to check */
- if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
- return err;
-
- /* A loop over all completion vectors, for each vector we will check
- * whether it works by mapping command completions to that vector
- * and performing a NOP command
- */
- for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
- /* Make sure request_irq was called */
- if (!priv->eq_table.eq[i].have_irq)
- continue;
-
- /* Temporary use polling for command completions */
- mlx4_cmd_use_polling(dev);
-
- /* Map the new eq to handle all asynchronous events */
- err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[i].eqn);
- if (err) {
- mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
- mlx4_cmd_use_events(dev);
- break;
- }
+ /* Temporary use polling for command completions */
+ mlx4_cmd_use_polling(dev);
- /* Go back to using events */
- mlx4_cmd_use_events(dev);
- err = mlx4_NOP(dev);
+ /* Map the new eq to handle all asynchronous events */
+ err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
+ priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn);
+ if (err) {
+ mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
+ goto out;
}
+ /* Go back to using events */
+ mlx4_cmd_use_events(dev);
+ err = mlx4_NOP(dev);
+
/* Return to default */
+ mlx4_cmd_use_polling(dev);
+out:
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+ mlx4_cmd_use_events(dev);
+
return err;
}
-EXPORT_SYMBOL(mlx4_test_interrupts);
+EXPORT_SYMBOL(mlx4_test_interrupt);
bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index c41ab31a39f8..84bab9f0732e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -49,9 +49,9 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
-static bool enable_qos = true;
+static bool enable_qos;
module_param(enable_qos, bool, 0444);
-MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
+MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
#define MLX4_GET(dest, source, offset) \
do { \
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7183ac4135d2..6f4e67bc3538 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1102,6 +1102,14 @@ static int __set_port_type(struct mlx4_port_info *info,
int i;
int err = 0;
+ if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
+ mlx4_err(mdev,
+ "Requested port type for port %d is not supported on this HCA\n",
+ info->port);
+ err = -EINVAL;
+ goto err_sup;
+ }
+
mlx4_stop_sense(mdev);
mutex_lock(&priv->port_mutex);
info->tmp_type = port_type;
@@ -1147,7 +1155,7 @@ static int __set_port_type(struct mlx4_port_info *info,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
-
+err_sup:
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e4878f31e45d..88ee7d8a5923 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -145,9 +145,10 @@ enum mlx4_resource {
RES_MTT,
RES_MAC,
RES_VLAN,
- RES_EQ,
+ RES_NPORT_ID,
RES_COUNTER,
RES_FS_RULE,
+ RES_EQ,
MLX4_NUM_OF_RESOURCE_TYPE
};
@@ -1329,8 +1330,6 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd);
int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
int port, void *buf);
-int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
- struct mlx4_cmd_mailbox *outbox);
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index c5b2064297a1..b656dd5772e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1728,24 +1728,13 @@ int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
-int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
- u32 in_mod, struct mlx4_cmd_mailbox *outbox)
-{
- return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
- MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_NATIVE);
-}
-
int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd)
{
- if (slave != dev->caps.function)
- return 0;
- return mlx4_common_dump_eth_stats(dev, slave,
- vhcr->in_modifier, outbox);
+ return 0;
}
int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 84d7857ccc27..c548beaaf910 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1605,13 +1605,14 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
r->com.from_state = r->com.state;
r->com.to_state = state;
r->com.state = RES_EQ_BUSY;
- if (eq)
- *eq = r;
}
}
spin_unlock_irq(mlx4_tlock(dev));
+ if (!err && eq)
+ *eq = r;
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 6cb38304669f..2c6e3c7b7417 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -41,6 +41,13 @@
#include "mlx5_core.h"
+struct mlx5_db_pgdir {
+ struct list_head list;
+ unsigned long *bitmap;
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
/* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0.
*/
@@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free);
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
struct mlx5_db_pgdir *pgdir;
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
if (!pgdir)
return NULL;
- bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
+ pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+
+ if (!pgdir->bitmap) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ bitmap_fill(pgdir->bitmap, db_per_page);
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
&pgdir->db_dma, node);
if (!pgdir->db_page) {
+ kfree(pgdir->bitmap);
kfree(pgdir);
return NULL;
}
@@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
struct mlx5_db *db)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
int offset;
int i;
- i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
- if (i >= MLX5_DB_PER_PAGE)
+ i = find_first_bit(pgdir->bitmap, db_per_page);
+ if (i >= db_per_page)
return -ENOMEM;
__clear_bit(i, pgdir->bitmap);
db->u.pgdir = pgdir;
db->index = i;
- offset = db->index * L1_CACHE_BYTES;
+ offset = db->index * cache_line_size();
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
db->dma = pgdir->db_dma + offset;
@@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
mutex_lock(&dev->priv.pgdir_mutex);
__set_bit(db->index, db->u.pgdir->bitmap);
- if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
+ if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir->bitmap);
kfree(db->u.pgdir);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 460363b66cb1..7a43502a89cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -85,6 +85,9 @@
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
+#define MLX5E_DEFAULT_LRO_TIMEOUT 32
+#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
@@ -221,6 +224,7 @@ struct mlx5e_params {
struct ieee_ets ets;
#endif
bool rx_am_enabled;
+ u32 lro_timeout;
};
struct mlx5e_tstamp {
@@ -888,5 +892,6 @@ int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 03183eba7003..d3f13b5db5b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1971,9 +1971,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
(priv->params.lro_wqe_sz -
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
- MLX5_CAP_ETH(priv->mdev,
- lro_timer_supported_periods[2]));
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
}
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
@@ -3383,6 +3381,18 @@ static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
}
}
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+{
+ int i;
+
+ /* The supported periods are organized in ascending order */
+ for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
+ if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
+ break;
+
+ return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
+}
+
static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
const struct mlx5e_profile *profile,
@@ -3401,6 +3411,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->params.lro_timeout =
+ mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
@@ -4023,7 +4036,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- unregister_netdev(netdev);
destroy_workqueue(priv->wq);
if (profile->cleanup)
profile->cleanup(priv);
@@ -4040,6 +4052,7 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
+ unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(mdev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3c97da103d30..7fe6559e4ab3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -457,6 +457,7 @@ void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
struct mlx5e_priv *priv = rep->priv_data;
struct net_device *netdev = priv->netdev;
+ unregister_netdev(netdev);
mlx5e_detach_netdev(esw->dev, netdev);
mlx5e_destroy_netdev(esw->dev, priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index abbf2c369923..be1f7333ab7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -931,8 +931,8 @@ static void esw_vport_change_handler(struct work_struct *work)
mutex_unlock(&esw->state_lock);
}
-static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *vlan_grp = NULL;
@@ -949,9 +949,11 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
int table_size = 2;
int err = 0;
- if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
- !IS_ERR_OR_NULL(vport->egress.acl))
- return;
+ if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ if (!IS_ERR_OR_NULL(vport->egress.acl))
+ return 0;
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
@@ -959,12 +961,12 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
- return;
+ return -EIO;
}
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
- return;
+ return -ENOMEM;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
@@ -1009,6 +1011,7 @@ out:
mlx5_destroy_flow_group(vlan_grp);
if (err && !IS_ERR_OR_NULL(acl))
mlx5_destroy_flow_table(acl);
+ return err;
}
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
@@ -1041,8 +1044,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.acl = NULL;
}
-static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
@@ -1063,9 +1066,11 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
int table_size = 4;
int err = 0;
- if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
- !IS_ERR_OR_NULL(vport->ingress.acl))
- return;
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ if (!IS_ERR_OR_NULL(vport->ingress.acl))
+ return 0;
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
@@ -1073,12 +1078,12 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
- return;
+ return -EIO;
}
flow_group_in = mlx5_vzalloc(inlen);
if (!flow_group_in)
- return;
+ return -ENOMEM;
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
if (IS_ERR(acl)) {
@@ -1167,6 +1172,7 @@ out:
}
kvfree(flow_group_in);
+ return err;
}
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -1225,7 +1231,13 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
return 0;
}
- esw_vport_enable_ingress_acl(esw, vport);
+ err = esw_vport_enable_ingress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable ingress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
esw_debug(esw->dev,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
@@ -1299,7 +1311,13 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
return 0;
}
- esw_vport_enable_egress_acl(esw, vport);
+ err = esw_vport_enable_egress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable egress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5da2cc878582..89696048b045 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -436,6 +436,9 @@ static void del_flow_group(struct fs_node *node)
fs_get_obj(ft, fg->node.parent);
dev = get_dev(&ft->node);
+ if (ft->autogroup.active)
+ ft->autogroup.num_groups--;
+
if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id);
@@ -879,7 +882,7 @@ static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *
tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
tree_add_node(&fg->node, &ft->node);
/* Add node to group list */
- list_add(&fg->node.list, ft->node.children.prev);
+ list_add(&fg->node.list, prev_fg);
return fg;
}
@@ -893,7 +896,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
return ERR_PTR(-EPERM);
lock_ref_node(&ft->node);
- fg = create_flow_group_common(ft, fg_in, &ft->node.children, false);
+ fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
unlock_ref_node(&ft->node);
return fg;
@@ -1012,7 +1015,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
u32 *match_criteria)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct list_head *prev = &ft->node.children;
+ struct list_head *prev = ft->node.children.prev;
unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
void *match_criteria_addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 3a9195b4169d..3b026c151cf2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -218,6 +218,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
goto err_out;
if (aging) {
+ counter->cache.lastuse = jiffies;
counter->aging = true;
spin_lock(&fc_stats->addlist_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 1a05fb965c8d..5bcf93422ee0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -61,10 +61,15 @@ enum {
enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
- MLX5_NIC_IFC_NO_DRAM_NIC = 2
+ MLX5_NIC_IFC_NO_DRAM_NIC = 2,
+ MLX5_NIC_IFC_INVALID = 3
};
-static u8 get_nic_interface(struct mlx5_core_dev *dev)
+enum {
+ MLX5_DROP_NEW_HEALTH_WORK,
+};
+
+static u8 get_nic_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
}
@@ -97,7 +102,7 @@ static int in_fatal(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- if (get_nic_interface(dev) == MLX5_NIC_IFC_DISABLED)
+ if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
return 1;
if (ioread32be(&h->fw_ver) == 0xffffffff)
@@ -127,7 +132,7 @@ unlock:
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
{
- u8 nic_interface = get_nic_interface(dev);
+ u8 nic_interface = get_nic_state(dev);
switch (nic_interface) {
case MLX5_NIC_IFC_FULL:
@@ -149,8 +154,34 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
mlx5_disable_device(dev);
}
+static void health_recover(struct work_struct *work)
+{
+ struct mlx5_core_health *health;
+ struct delayed_work *dwork;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+ u8 nic_state;
+
+ dwork = container_of(work, struct delayed_work, work);
+ health = container_of(dwork, struct mlx5_core_health, recover_work);
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ nic_state = get_nic_state(dev);
+ if (nic_state == MLX5_NIC_IFC_INVALID) {
+ dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
+ return;
+ }
+
+ dev_err(&dev->pdev->dev, "starting health recovery flow\n");
+ mlx5_recover_device(dev);
+}
+
+/* How much time to wait until health resetting the driver (in msecs) */
+#define MLX5_RECOVERY_DELAY_MSECS 60000
static void health_care(struct work_struct *work)
{
+ unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
struct mlx5_core_health *health;
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
@@ -160,6 +191,14 @@ static void health_care(struct work_struct *work)
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_warn(dev, "handling bad device here\n");
mlx5_handle_bad_state(dev);
+
+ spin_lock(&health->wq_lock);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ schedule_delayed_work(&health->recover_work, recover_delay);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock(&health->wq_lock);
}
static const char *hsynd_str(u8 synd)
@@ -272,7 +311,13 @@ static void poll_health(unsigned long data)
if (in_fatal(dev) && !health->sick) {
health->sick = true;
print_health_info(dev);
- schedule_work(&health->work);
+ spin_lock(&health->wq_lock);
+ if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+ queue_work(health->wq, &health->work);
+ else
+ dev_err(&dev->pdev->dev,
+ "new health works are not permitted at this stage\n");
+ spin_unlock(&health->wq_lock);
}
}
@@ -281,6 +326,8 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
struct mlx5_core_health *health = &dev->priv.health;
init_timer(&health->timer);
+ health->sick = 0;
+ clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -297,11 +344,22 @@ void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
del_timer_sync(&health->timer);
}
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ spin_lock(&health->wq_lock);
+ set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ spin_unlock(&health->wq_lock);
+ cancel_delayed_work_sync(&health->recover_work);
+ cancel_work_sync(&health->work);
+}
+
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- flush_work(&health->work);
+ destroy_workqueue(health->wq);
}
int mlx5_health_init(struct mlx5_core_dev *dev)
@@ -316,9 +374,13 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
strcpy(name, "mlx5_health");
strcat(name, dev_name(&dev->pdev->dev));
+ health->wq = create_singlethread_workqueue(name);
kfree(name);
-
+ if (!health->wq)
+ return -ENOMEM;
+ spin_lock_init(&health->wq_lock);
INIT_WORK(&health->work, health_care);
+ INIT_DELAYED_WORK(&health->recover_work, health_recover);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d9c3c70b29e4..d5433c49b2b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -844,12 +844,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
struct pci_dev *pdev = dev->pdev;
int err;
- err = mlx5_query_hca_caps(dev);
- if (err) {
- dev_err(&pdev->dev, "query hca failed\n");
- goto out;
- }
-
err = mlx5_query_board_id(dev);
if (err) {
dev_err(&pdev->dev, "query board id failed\n");
@@ -1023,6 +1017,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_start_health_poll(dev);
+ err = mlx5_query_hca_caps(dev);
+ if (err) {
+ dev_err(&pdev->dev, "query hca failed\n");
+ goto err_stop_poll;
+ }
+
if (boot && mlx5_init_once(dev, priv)) {
dev_err(&pdev->dev, "sw objs init failed\n");
goto err_stop_poll;
@@ -1313,10 +1313,16 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
struct mlx5_priv *priv = &dev->priv;
dev_info(&pdev->dev, "%s was called\n", __func__);
+
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
- pci_save_state(pdev);
- mlx5_pci_disable_device(dev);
+ /* In case of kernel call save the pci state and drain health wq */
+ if (state) {
+ pci_save_state(pdev);
+ mlx5_drain_health_wq(dev);
+ mlx5_pci_disable_device(dev);
+ }
+
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
@@ -1373,11 +1379,6 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-void mlx5_disable_device(struct mlx5_core_dev *dev)
-{
- mlx5_pci_err_detected(dev->pdev, 0);
-}
-
static void mlx5_pci_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
@@ -1427,6 +1428,18 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+void mlx5_recover_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_disable_device(dev);
+ if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
+ mlx5_pci_resume(dev->pdev);
+}
+
static struct pci_driver mlx5_core_driver = {
.name = DRIVER_NAME,
.id_table = mlx5_core_pci_table,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 3d0cfb9f18f9..187662c8ea96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -83,6 +83,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
+void mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index cc4fd61914d3..a57d5a81eb05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -209,6 +209,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
struct page *page;
+ u64 zero_addr = 1;
u64 addr;
int err;
int nid = dev_to_node(&dev->pdev->dev);
@@ -218,26 +219,35 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
mlx5_core_warn(dev, "failed to allocate page\n");
return -ENOMEM;
}
+map:
addr = dma_map_page(&dev->pdev->dev, page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&dev->pdev->dev, addr)) {
mlx5_core_warn(dev, "failed dma mapping page\n");
err = -ENOMEM;
- goto out_alloc;
+ goto err_mapping;
}
+
+ /* Firmware doesn't support page with physical address 0 */
+ if (addr == 0) {
+ zero_addr = addr;
+ goto map;
+ }
+
err = insert_page(dev, addr, page, func_id);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
- goto out_mapping;
+ dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
}
- return 0;
-
-out_mapping:
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+err_mapping:
+ if (err)
+ __free_page(page);
-out_alloc:
- __free_page(page);
+ if (zero_addr == 0)
+ dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 63d89f787ad7..d5cf1ea1c89a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1757,11 +1757,17 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.cmd_exec = mlxsw_pci_cmd_exec,
};
-static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
+ const struct pci_device_id *id)
{
unsigned long end;
mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+ if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
+ msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+ return 0;
+ }
+
wmb(); /* reset needs to be written before we read control register */
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
do {
@@ -1829,7 +1835,7 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->pdev = pdev;
pci_set_drvdata(pdev, mlxsw_pci);
- err = mlxsw_pci_sw_reset(mlxsw_pci);
+ err = mlxsw_pci_sw_reset(mlxsw_pci, id);
if (err) {
dev_err(&pdev->dev, "Software reset failed\n");
goto err_sw_reset;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 107934fe06ce..348c77339d88 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -320,6 +320,8 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
lpm_tree);
if (err)
goto err_left_struct_set;
+ memcpy(&lpm_tree->prefix_usage, prefix_usage,
+ sizeof(lpm_tree->prefix_usage));
return lpm_tree;
err_left_struct_set:
@@ -343,7 +345,8 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
lpm_tree = &mlxsw_sp->router.lpm_trees[i];
- if (lpm_tree->proto == proto &&
+ if (lpm_tree->ref_count != 0 &&
+ lpm_tree->proto == proto &&
mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
prefix_usage))
goto inc_ref_count;
@@ -1815,19 +1818,17 @@ err_fib_entry_insert:
return err;
}
-static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct fib_entry_notifier_info *fen_info)
+static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib_entry *fib_entry;
if (mlxsw_sp->router.aborted)
- return 0;
+ return;
fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
- if (!fib_entry) {
- dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
- return -ENOENT;
- }
+ if (!fib_entry)
+ return;
if (fib_entry->ref_count == 1) {
mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
@@ -1835,7 +1836,6 @@ static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
}
mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
- return 0;
}
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -1857,7 +1857,8 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
if (err)
return err;
- mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4, 0);
+ mlxsw_reg_raltb_pack(raltb_pl, 0, MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_SP_LPM_TREE_MIN);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index dc3c5ed73ecd..8c8f5d8a2113 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -1094,6 +1094,7 @@ err_port_stp_state_set:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_set:
+ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
err_port_swid_set:
err_port_system_port_mapping_set:
port_not_usable:
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 0df1391f9663..32f2a45f4ab2 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -107,15 +107,7 @@ config QEDE
---help---
This enables the support for ...
-config INFINIBAND_QEDR
- tristate "QLogic qede RoCE sources [debug]"
- depends on QEDE && 64BIT
- select QED_LL2
- default n
- ---help---
- This provides a temporary node that allows the compilation
- and logical testing of the InfiniBand over Ethernet support
- for QLogic QED. This would be replaced by the 'real' option
- once the QEDR driver is added [+relocated].
+config QED_RDMA
+ bool
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index cda0af7fbc20..967acf322c09 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_selftest.o qed_dcbx.o qed_debug.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
-qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
+qed-$(CONFIG_QED_RDMA) += qed_roce.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 82370a1a59ad..0c42c240b5cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -47,13 +47,8 @@
#define TM_ALIGN BIT(TM_SHIFT)
#define TM_ELEM_SIZE 4
-/* ILT constants */
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
-#define ILT_DEFAULT_HW_P_SIZE 4
-#else
-#define ILT_DEFAULT_HW_P_SIZE 3
-#endif
+#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -349,14 +344,14 @@ static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
return NULL;
}
-void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
p_mgr->srq_count = num_srqs;
}
-u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
@@ -1804,8 +1799,8 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
return 0;
}
-void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
- struct qed_rdma_pf_params *p_params)
+static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_pf_params *p_params)
{
u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
enum protocol_type proto;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 130da1c0490b..a4789a93b692 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -1190,6 +1190,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
if (!dcbx_info)
return -ENOMEM;
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
if (rc) {
kfree(dcbx_info);
@@ -1225,6 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
if (!dcbx_info)
return NULL;
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
kfree(dcbx_info);
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 88e7d5bef909..68f19ca57f96 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -405,7 +405,7 @@ struct phy_defs {
/***************************** Constant Arrays *******************************/
/* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
@@ -4028,10 +4028,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
}
/* Dump MCP Trace */
-enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
@@ -4130,10 +4130,10 @@ enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
}
/* Dump GRC FIFO */
-enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data;
@@ -4192,10 +4192,10 @@ enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
}
/* Dump IGU FIFO */
-enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
{
u32 offset = 0, dwords_read, size_param_offset;
bool fifo_has_data;
@@ -4255,10 +4255,11 @@ enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
}
/* Protection Override dump */
-enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- bool dump, u32 *num_dumped_dwords)
+static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 *num_dumped_dwords)
{
u32 offset = 0, size_param_offset, override_window_dwords;
@@ -6339,10 +6340,11 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
}
/* Wrapper for unifying the idle_chk and mcp_trace api */
-enum dbg_status qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf)
+static enum dbg_status
+qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
{
u32 num_errors, num_warnnings;
@@ -6413,8 +6415,8 @@ static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
#define QED_RESULTS_BUF_MIN_SIZE 16
/* Generic function for decoding debug feature info */
-enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
- enum qed_dbg_features feature_idx)
+static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
+ enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx];
@@ -6480,8 +6482,9 @@ enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
}
/* Generic function for performing the dump of a debug feature. */
-enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
- enum qed_dbg_features feature_idx)
+static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_dbg_features feature_idx)
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_params.features[feature_idx];
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 754f6a908858..edae5fc5fccd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -497,12 +497,13 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
PROTOCOLID_ROCE,
- 0) * 2;
+ NULL) * 2;
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
num_cons =
qed_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI, 0);
+ PROTOCOLID_ISCSI,
+ NULL);
n_eqes += 2 * num_cons;
}
@@ -1422,19 +1423,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
u32 *feat_num = p_hwfn->hw_info.feat_num;
int num_features = 1;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
- * status blocks equally between L2 / RoCE but with consideration as
- * to how many l2 queues / cnqs we have
- */
- if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ if (IS_ENABLED(CONFIG_QED_RDMA) &&
+ p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
+ * the status blocks equally between L2 / RoCE but with
+ * consideration as to how many l2 queues / cnqs we have.
+ */
num_features++;
feat_num[QED_RDMA_CNQ] =
min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
}
-#endif
+
feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
num_features,
RESC_NUM(p_hwfn, QED_L2_QUEUE));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 02a8be2faed7..63e1a1b0ef8e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -38,6 +38,7 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_roce.h"
#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
@@ -140,11 +141,11 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev)
qed_ll2_dealloc_buffer(cdev, buffer);
}
-void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- struct qed_ll2_rx_packet *p_pkt,
- struct core_rx_fast_path_cqe *p_cqe,
- bool b_last_packet)
+static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ struct qed_ll2_rx_packet *p_pkt,
+ struct core_rx_fast_path_cqe *p_cqe,
+ bool b_last_packet)
{
u16 packet_length = le16_to_cpu(p_cqe->packet_length);
struct qed_ll2_buffer *buffer = p_pkt->cookie;
@@ -515,7 +516,7 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
return rc;
}
-void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
{
struct qed_ll2_info *p_ll2_conn = NULL;
struct qed_ll2_rx_packet *p_pkt = NULL;
@@ -537,8 +538,7 @@ void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
if (!p_pkt)
break;
- list_del(&p_pkt->list_entry);
- list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+ list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
rx_buf_addr = p_pkt->rx_buf_addr;
cookie = p_pkt->cookie;
@@ -992,9 +992,8 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
p_posting_packet = list_first_entry(&p_rx->posting_descq,
struct qed_ll2_rx_packet,
list_entry);
- list_del(&p_posting_packet->list_entry);
- list_add_tail(&p_posting_packet->list_entry,
- &p_rx->active_descq);
+ list_move_tail(&p_posting_packet->list_entry,
+ &p_rx->active_descq);
b_notify_fw = true;
}
@@ -1123,9 +1122,6 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(start_bd->addr, first_frag);
start_bd->nbytes = cpu_to_le16(first_frag_len);
- SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
- type);
-
DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
@@ -1188,8 +1184,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
if (!p_pkt)
break;
- list_del(&p_pkt->list_entry);
- list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
+ list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
}
SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 80a5dc2d652d..4e3d62a16cab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
*/
void qed_ll2_free(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_connections);
-void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t rx_buf_addr,
- u16 data_length,
- u8 data_length_error,
- u16 parse_flags,
- u16 vlan,
- u32 src_mac_addr_hi,
- u16 src_mac_addr_lo, bool b_last_packet);
-void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
-void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
- u8 connection_handle,
- void *cookie,
- dma_addr_t first_frag_addr,
- bool b_last_fragment, bool b_last_packet);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 4ee3151e80c2..c418360ba02a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -33,10 +33,8 @@
#include "qed_hw.h"
#include "qed_selftest.h"
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
-#endif
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
enum qed_int_mode int_mode)
{
struct qed_sb_cnt_info sb_cnt_info;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- int num_l2_queues;
-#endif
+ int num_l2_queues = 0;
int rc;
int i;
@@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
cdev->num_hwfns;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- num_l2_queues = 0;
+ if (!IS_ENABLED(CONFIG_QED_RDMA))
+ return 0;
+
for_each_hwfn(cdev, i)
num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
@@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
cdev->int_params.rdma_msix_cnt,
cdev->int_params.rdma_msix_base);
-#endif
return 0;
}
@@ -843,18 +839,20 @@ static void qed_update_pf_params(struct qed_dev *cdev,
{
int i;
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
- params->rdma_pf_params.num_qps = QED_ROCE_QPS;
- params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
- /* divide by 3 the MRs to avoid MF ILT overflow */
- params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
- params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
-#endif
for (i = 0; i < cdev->num_hwfns; i++) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
p_hwfn->pf_params = *params;
}
+
+ if (!IS_ENABLED(CONFIG_QED_RDMA))
+ return;
+
+ params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+ params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ /* divide by 3 the MRs to avoid MF ILT overflow */
+ params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
+ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
}
static int qed_slowpath_start(struct qed_dev *cdev,
@@ -880,6 +878,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
}
}
+ cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
rc = qed_nic_setup(cdev);
if (rc)
goto err;
@@ -1432,7 +1431,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
-struct qed_selftest_ops qed_selftest_ops_pass = {
+static struct qed_selftest_ops qed_selftest_ops_pass = {
.selftest_memory = &qed_selftest_memory,
.selftest_interrupt = &qed_selftest_interrupt,
.selftest_register = &qed_selftest_register,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index b11beb559981..6a353ffb87a4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -129,17 +129,12 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
}
}
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{
/* First sb id for RoCE is after all the l2 sb */
return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
}
-u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
-{
- return QED_CAU_DEF_RX_TIMER_RES;
-}
-
static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params)
@@ -162,7 +157,8 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
p_hwfn->p_rdma_info = p_rdma_info;
p_rdma_info->proto = PROTOCOLID_ROCE;
- num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
+ NULL);
p_rdma_info->num_qps = num_cons / 2;
@@ -275,7 +271,7 @@ free_rdma_info:
return rc;
}
-void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -527,6 +523,26 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
{
struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
@@ -573,7 +589,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
return qed_rdma_start_fw(p_hwfn, params, p_ptt);
}
-int qed_rdma_stop(void *rdma_cxt)
+static int qed_rdma_stop(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_close_func_ramrod_data *p_ramrod;
@@ -629,8 +645,8 @@ out:
return rc;
}
-int qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params)
+static int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 dpi_start_offset;
@@ -664,7 +680,7 @@ int qed_rdma_add_user(void *rdma_cxt,
return rc;
}
-struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
@@ -680,7 +696,7 @@ struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
return p_port;
}
-struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -690,7 +706,7 @@ struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
return p_hwfn->p_rdma_info->dev;
}
-void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -701,27 +717,7 @@ void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
}
-int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- int rc;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
-
- spin_lock_bh(&p_hwfn->p_rdma_info->lock);
- rc = qed_rdma_bmap_alloc_id(p_hwfn,
- &p_hwfn->p_rdma_info->tid_map, itid);
- spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
- if (rc)
- goto out;
-
- rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
-out:
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
- return rc;
-}
-
-void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
{
struct qed_hwfn *p_hwfn;
u16 qz_num;
@@ -816,7 +812,7 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
return 0;
}
-int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
u32 returned_id;
@@ -836,7 +832,7 @@ int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
return rc;
}
-void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -873,8 +869,9 @@ qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
return toggle_bit;
}
-int qed_rdma_create_cq(void *rdma_cxt,
- struct qed_rdma_create_cq_in_params *params, u16 *icid)
+static int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
@@ -957,98 +954,10 @@ err:
return rc;
}
-int qed_rdma_resize_cq(void *rdma_cxt,
- struct qed_rdma_resize_cq_in_params *in_params,
- struct qed_rdma_resize_cq_out_params *out_params)
-{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
- struct rdma_resize_cq_output_params *p_ramrod_res;
- struct rdma_resize_cq_ramrod_data *p_ramrod;
- enum qed_rdma_toggle_bit toggle_bit;
- struct qed_sp_init_data init_data;
- struct qed_spq_entry *p_ent;
- dma_addr_t ramrod_res_phys;
- u8 fw_return_code;
- int rc = -ENOMEM;
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
-
- p_ramrod_res =
- (struct rdma_resize_cq_output_params *)
- dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- &ramrod_res_phys, GFP_KERNEL);
- if (!p_ramrod_res) {
- DP_NOTICE(p_hwfn,
- "qed resize cq failed: cannot allocate memory (ramrod)\n");
- return rc;
- }
-
- /* Get SPQ entry */
- memset(&init_data, 0, sizeof(init_data));
- init_data.cid = in_params->icid;
- init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
- init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
-
- rc = qed_sp_init_request(p_hwfn, &p_ent,
- RDMA_RAMROD_RESIZE_CQ,
- p_hwfn->p_rdma_info->proto, &init_data);
- if (rc)
- goto err;
-
- p_ramrod = &p_ent->ramrod.rdma_resize_cq;
-
- p_ramrod->flags = 0;
-
- /* toggle the bit for every resize or create cq for a given icid */
- toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
- in_params->icid);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
-
- SET_FIELD(p_ramrod->flags,
- RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
- in_params->pbl_two_level);
-
- p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
- p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
- p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
- DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
- DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
-
- rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
- if (rc)
- goto err;
-
- if (fw_return_code != RDMA_RETURN_OK) {
- DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
- rc = -EINVAL;
- goto err;
- }
-
- out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
- out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
-
- dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
-
- DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
-
- return rc;
-
-err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- sizeof(struct rdma_resize_cq_output_params),
- p_ramrod_res, ramrod_res_phys);
- DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
-
- return rc;
-}
-
-int qed_rdma_destroy_cq(void *rdma_cxt,
- struct qed_rdma_destroy_cq_in_params *in_params,
- struct qed_rdma_destroy_cq_out_params *out_params)
+static int
+qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_destroy_cq_output_params *p_ramrod_res;
@@ -1169,7 +1078,7 @@ static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
return flavor;
}
-int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
{
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
u32 responder_icid;
@@ -1793,9 +1702,9 @@ err:
return rc;
}
-int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
struct roce_query_qp_req_output_params *p_req_ramrod_res;
@@ -1936,7 +1845,7 @@ err_resp:
return rc;
}
-int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
@@ -1985,9 +1894,9 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
return 0;
}
-int qed_rdma_query_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params)
+static int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc;
@@ -2022,7 +1931,7 @@ int qed_rdma_query_qp(void *rdma_cxt,
return rc;
}
-int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
int rc = 0;
@@ -2038,7 +1947,7 @@ int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
return rc;
}
-struct qed_rdma_qp *
+static struct qed_rdma_qp *
qed_rdma_create_qp(void *rdma_cxt,
struct qed_rdma_create_qp_in_params *in_params,
struct qed_rdma_create_qp_out_params *out_params)
@@ -2215,9 +2124,9 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_rdma_modify_qp(void *rdma_cxt,
- struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params)
+static int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
enum qed_roce_qp_state prev_state;
@@ -2312,8 +2221,9 @@ int qed_rdma_modify_qp(void *rdma_cxt,
return rc;
}
-int qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params)
+static int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_register_tid_ramrod_data *p_ramrod;
@@ -2450,7 +2360,7 @@ int qed_rdma_register_tid(void *rdma_cxt,
return rc;
}
-int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct rdma_deregister_tid_ramrod_data *p_ramrod;
@@ -2561,7 +2471,8 @@ void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_conf(p_hwfn, p_ptt);
}
-int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
+static int qed_rdma_start(void *rdma_cxt,
+ struct qed_rdma_start_in_params *params)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
struct qed_ptt *p_ptt;
@@ -2601,7 +2512,7 @@ static int qed_rdma_init(struct qed_dev *cdev,
return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
}
-void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
{
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
@@ -2808,11 +2719,6 @@ static int qed_roce_ll2_stop(struct qed_dev *cdev)
struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
int rc;
- if (!cdev) {
- DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
- return -EINVAL;
- }
-
if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
return -EINVAL;
@@ -2849,7 +2755,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev,
int rc;
int i;
- if (!cdev || !pkt || !params) {
+ if (!pkt || !params) {
DP_ERR(cdev,
"roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
cdev, pkt, params);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 2f091e8a0f40..279f342af8db 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -95,26 +95,6 @@ struct qed_rdma_info {
enum protocol_type proto;
};
-struct qed_rdma_resize_cq_in_params {
- u16 icid;
- u32 cq_size;
- bool pbl_two_level;
- u64 pbl_ptr;
- u16 pbl_num_pages;
- u8 pbl_page_size_log;
-};
-
-struct qed_rdma_resize_cq_out_params {
- u32 prod;
- u32 cons;
-};
-
-struct qed_rdma_resize_cnq_in_params {
- u32 cnq_id;
- u32 pbl_page_size_log;
- u64 pbl_ptr;
-};
-
struct qed_rdma_qp {
struct regpair qp_handle;
struct regpair qp_handle_async;
@@ -181,36 +161,55 @@ struct qed_rdma_qp {
dma_addr_t shared_queue_phys_addr;
};
-int
-qed_rdma_add_user(void *rdma_cxt,
- struct qed_rdma_add_user_out_params *out_params);
-int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
-int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
-int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
-void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
-struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
-struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
-int
-qed_rdma_register_tid(void *rdma_cxt,
- struct qed_rdma_register_tid_in_params *params);
-void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
-int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
-int qed_rdma_stop(void *rdma_cxt);
-u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
-u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
-void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
-void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_async_roce_event(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe);
-int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
-int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
- struct qed_rdma_modify_qp_in_params *params);
-int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
- struct qed_rdma_query_qp_out_params *out_params);
-
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet);
+void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo, bool b_last_packet);
#else
-void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
+static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet) {}
+static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet) {}
+static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ u16 data_length,
+ u8 data_length_error,
+ u16 parse_flags,
+ u16 vlan,
+ u32 src_mac_addr_hi,
+ u16 src_mac_addr_lo,
+ bool b_last_packet) {}
#endif
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 27c450fd2193..9c897bc68d05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -80,7 +80,6 @@ union ramrod_data {
struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
struct rdma_create_cq_ramrod_data rdma_create_cq;
- struct rdma_resize_cq_ramrod_data rdma_resize_cq;
struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 6c05402ea4dc..019960b7855a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -28,9 +28,7 @@
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
#include "qed_roce.h"
-#endif
/***************************************************************************
* Structures & Definitions
@@ -272,11 +270,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
switch (p_eqe->protocol_id) {
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
case PROTOCOLID_ROCE:
qed_async_roce_event(p_hwfn, p_eqe);
return 0;
-#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 28dc58919c85..048a230c3ce0 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
qede-$(CONFIG_DCB) += qede_dcbnl.o
-qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
+qede-$(CONFIG_QED_RDMA) += qede_roce.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 9135b9d37dfa..cf8d3547aecf 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -349,12 +349,13 @@ bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
u8 count);
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
#define NUM_RX_BDS_MIN 128
-#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX
+#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
#define TX_RING_SIZE_POW 13
#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index b7dbb4493a64..0100f5c0a4ec 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -748,6 +748,8 @@ static void qede_get_channels(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
channels->max_combined = QEDE_MAX_RSS_CNT(edev);
+ channels->max_rx = QEDE_MAX_RSS_CNT(edev);
+ channels->max_tx = QEDE_MAX_RSS_CNT(edev);
channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
edev->fp_num_rx;
channels->tx_count = edev->fp_num_tx;
@@ -812,6 +814,13 @@ static int qede_set_channels(struct net_device *dev,
edev->req_queues = count;
edev->req_num_tx = channels->tx_count;
edev->req_num_rx = channels->rx_count;
+ /* Reset the indirection table if rx queue count is updated */
+ if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
+ edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
+ memset(&edev->rss_params.rss_ind_table, 0,
+ sizeof(edev->rss_params.rss_ind_table));
+ }
+
if (netif_running(dev))
qede_reload(edev, NULL, NULL);
@@ -1045,6 +1054,12 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
struct qede_dev *edev = netdev_priv(dev);
int i;
+ if (edev->dev_info.common.num_hwfns > 1) {
+ DP_INFO(edev,
+ "RSS configuration is not supported for 100G devices\n");
+ return -EOPNOTSUPP;
+ }
+
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
@@ -1176,8 +1191,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
}
first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
txq->sw_tx_cons++;
txq->sw_tx_ring[idx].skb = NULL;
@@ -1191,8 +1206,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
struct qede_rx_queue *rxq = NULL;
struct sw_rx_data *sw_rx_data;
union eth_rx_cqe *cqe;
+ int i, rc = 0;
u8 *data_ptr;
- int i;
for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
@@ -1211,46 +1226,60 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev)
* queue and that the loopback traffic is not IP.
*/
for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
- if (qede_has_rx_work(rxq))
+ if (!qede_has_rx_work(rxq)) {
+ usleep_range(100, 200);
+ continue;
+ }
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative
+ * reads of CQE/BD before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB,
+ * and then the CPU reads the hw_comp_cons, it will use an old
+ * CQE.
+ */
+ rmb();
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset +
+ sw_rx_data->page_offset);
+ if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
+ ether_addr_equal(data_ptr + ETH_ALEN,
+ edev->ndev->dev_addr)) {
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ rc = -1;
+ break;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
break;
- usleep_range(100, 200);
+ }
+
+ DP_INFO(edev, "Not the transmitted packet\n");
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
}
- if (!qede_has_rx_work(rxq)) {
+ if (i == QEDE_SELFTEST_POLL_COUNT) {
DP_NOTICE(edev, "Failed to receive the traffic\n");
return -1;
}
- hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
- sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ qede_update_rx_prod(edev, rxq);
- /* Memory barrier to prevent the CPU from doing speculative reads of CQE
- * / BD before reading hw_comp_cons. If the CQE is read before it is
- * written by FW, then FW writes CQE and SB, and then the CPU reads the
- * hw_comp_cons, it will use an old CQE.
- */
- rmb();
-
- /* Get the CQE from the completion ring */
- cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
-
- /* Get the data from the SW ring */
- sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
- sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
- fp_cqe = &cqe->fast_path_regular;
- len = le16_to_cpu(fp_cqe->len_on_first_bd);
- data_ptr = (u8 *)(page_address(sw_rx_data->data) +
- fp_cqe->placement_offset + sw_rx_data->page_offset);
- for (i = ETH_HLEN; i < len; i++)
- if (data_ptr[i] != (unsigned char)(i & 0xff)) {
- DP_NOTICE(edev, "Loopback test failed\n");
- qede_recycle_rx_bd_ring(rxq, edev, 1);
- return -1;
- }
-
- qede_recycle_rx_bd_ring(rxq, edev, 1);
-
- return 0;
+ return rc;
}
static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 4f298656bf47..8488ad36a2b8 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -317,8 +317,8 @@ static int qede_free_tx_pkt(struct qede_dev *edev,
split_bd_len = BD_UNMAP_LEN(split);
bds_consumed++;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
@@ -363,8 +363,8 @@ static void qede_free_failed_tx_pkt(struct qede_dev *edev,
nbd--;
}
- dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
- BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
/* Unmap the data of the skb frags */
for (i = 0; i < nbd; i++) {
@@ -964,8 +964,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
return 0;
}
-static inline void qede_update_rx_prod(struct qede_dev *edev,
- struct qede_rx_queue *rxq)
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
@@ -3006,7 +3005,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
txq->num_tx_buffers = edev->q_num_tx_buffers;
/* Allocate the parallel driver ring for Tx buffers */
- size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX;
+ size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE;
txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
if (!txq->sw_tx_ring) {
DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
@@ -3017,7 +3016,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16,
- NUM_TX_BDS_MAX,
+ TX_RING_SIZE,
sizeof(*p_virt), &txq->tx_pbl);
if (rc)
goto err;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index e97968ed4b8f..6fb3bee904d3 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1021,14 +1021,18 @@ void emac_mac_down(struct emac_adapter *adpt)
napi_disable(&adpt->rx_q.napi);
phy_stop(adpt->phydev);
- phy_disconnect(adpt->phydev);
- /* disable mac irq */
+ /* Interrupts must be disabled before the PHY is disconnected, to
+ * avoid a race condition where adjust_link is null when we get
+ * an interrupt.
+ */
writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
writel(0, adpt->base + EMAC_INT_MASK);
synchronize_irq(adpt->irq.irq);
free_irq(adpt->irq.irq, &adpt->irq);
+ phy_disconnect(adpt->phydev);
+
emac_mac_reset(adpt);
emac_tx_q_descs_free(adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index e4e1925d18a4..8be526af659a 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -568,6 +568,7 @@ static const struct of_device_id emac_dt_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, emac_dt_match);
#if IS_ENABLED(CONFIG_ACPI)
static const struct acpi_device_id emac_acpi_match[] = {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b698ea544bfc..2830190aaace 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8269,7 +8269,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((sizeof(dma_addr_t) > 4) &&
(use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
/* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
if (!pci_is_pcie(pdev))
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 55b2ab9dc320..67df4cf93362 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1471,7 +1471,7 @@ static int rocker_world_check_init(struct rocker_port *rocker_port)
if (rocker->wops) {
if (rocker->wops->mode != mode) {
dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n");
- return err;
+ return -EINVAL;
}
return 0;
}
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 431a60804272..4ca461322d60 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1493,8 +1493,6 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags);
found = ofdpa_neigh_tbl_find(ofdpa, ip_addr);
- if (found)
- *index = found->index;
updating = found && adding;
removing = found && !adding;
@@ -1508,9 +1506,11 @@ static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port,
resolved = false;
} else if (removing) {
ofdpa_neigh_del(trans, found);
+ *index = found->index;
} else if (updating) {
ofdpa_neigh_update(found, trans, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
+ *index = found->index;
} else {
err = -ENOENT;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 4ec7397e7fb3..a1b17cd7886b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -347,10 +347,9 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
for (i = 0; i < size; i++) {
- if (p->des0)
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
- i, (unsigned int)virt_to_phys(p),
- p->des0, p->des1, p->des2, p->des3);
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(p),
+ p->des0, p->des1, p->des2, p->des3);
p++;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f94e0282451b..758b4e2c783c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -144,7 +144,7 @@ int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
-int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fa4a82f4656f..10909c9c0033 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -678,7 +678,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
- return stmmac_ptp_register(priv);
+ stmmac_ptp_register(priv);
+
+ return 0;
}
static void stmmac_release_ptp(struct stmmac_priv *priv)
@@ -1710,7 +1712,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (init_ptp) {
ret = stmmac_init_ptp(priv);
if (ret)
- netdev_warn(priv->dev, "PTP support cannot init.\n");
+ netdev_warn(priv->dev, "fail to init PTP.\n");
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 289d52725a6c..1477471f8d44 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -177,7 +177,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
* Description: this function will register the ptp clock driver
* to kernel. It also does some house keeping work.
*/
-int stmmac_ptp_register(struct stmmac_priv *priv)
+void stmmac_ptp_register(struct stmmac_priv *priv)
{
spin_lock_init(&priv->ptp_lock);
priv->ptp_clock_ops = stmmac_ptp_clock_ops;
@@ -185,15 +185,10 @@ int stmmac_ptp_register(struct stmmac_priv *priv)
priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
priv->device);
if (IS_ERR(priv->ptp_clock)) {
+ netdev_err(priv->dev, "ptp_clock_register failed\n");
priv->ptp_clock = NULL;
- return PTR_ERR(priv->ptp_clock);
- }
-
- spin_lock_init(&priv->ptp_lock);
-
- netdev_dbg(priv->dev, "Added PTP HW clock successfully\n");
-
- return 0;
+ } else if (priv->ptp_clock)
+ netdev_info(priv->dev, "registered PTP clock\n");
}
/**
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index eaa51ce8bd6d..70533017aaa7 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -982,11 +982,13 @@ static int dwceqos_mii_probe(struct net_device *ndev)
if (netif_msg_probe(lp))
phy_attached_info(phydev);
- phydev->supported &= PHY_GBIT_FEATURES;
+ phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
lp->link = 0;
lp->speed = 0;
lp->duplex = DUPLEX_UNKNOWN;
+ lp->flowcontrol.autoneg = AUTONEG_ENABLE;
return 0;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 752bcaa852e4..85a423a66478 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -58,9 +58,9 @@ struct geneve_dev {
struct hlist_node hlist; /* vni hash table */
struct net *net; /* netns for packet i/o */
struct net_device *dev; /* netdev for geneve tunnel */
- struct geneve_sock *sock4; /* IPv4 socket used for geneve tunnel */
+ struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */
#if IS_ENABLED(CONFIG_IPV6)
- struct geneve_sock *sock6; /* IPv6 socket used for geneve tunnel */
+ struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */
#endif
u8 vni[3]; /* virtual network ID for tunnel */
u8 ttl; /* TTL override */
@@ -453,7 +453,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
skb_gro_pull(skb, gh_len);
skb_gro_postpull_rcsum(skb, gh, gh_len);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
@@ -543,9 +543,19 @@ static void __geneve_sock_release(struct geneve_sock *gs)
static void geneve_sock_release(struct geneve_dev *geneve)
{
- __geneve_sock_release(geneve->sock4);
+ struct geneve_sock *gs4 = rtnl_dereference(geneve->sock4);
#if IS_ENABLED(CONFIG_IPV6)
- __geneve_sock_release(geneve->sock6);
+ struct geneve_sock *gs6 = rtnl_dereference(geneve->sock6);
+
+ rcu_assign_pointer(geneve->sock6, NULL);
+#endif
+
+ rcu_assign_pointer(geneve->sock4, NULL);
+ synchronize_net();
+
+ __geneve_sock_release(gs4);
+#if IS_ENABLED(CONFIG_IPV6)
+ __geneve_sock_release(gs6);
#endif
}
@@ -586,10 +596,10 @@ out:
gs->flags = geneve->flags;
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
- geneve->sock6 = gs;
+ rcu_assign_pointer(geneve->sock6, gs);
else
#endif
- geneve->sock4 = gs;
+ rcu_assign_pointer(geneve->sock4, gs);
hash = geneve_net_vni_hash(geneve->vni);
hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
@@ -603,9 +613,7 @@ static int geneve_open(struct net_device *dev)
bool metadata = geneve->collect_md;
int ret = 0;
- geneve->sock4 = NULL;
#if IS_ENABLED(CONFIG_IPV6)
- geneve->sock6 = NULL;
if (ipv6 || metadata)
ret = geneve_sock_add(geneve, true);
#endif
@@ -720,6 +728,9 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
struct rtable *rt = NULL;
__u8 tos;
+ if (!rcu_dereference(geneve->sock4))
+ return ERR_PTR(-EIO);
+
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_mark = skb->mark;
fl4->flowi4_proto = IPPROTO_UDP;
@@ -772,11 +783,15 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
{
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL;
struct dst_cache *dst_cache;
+ struct geneve_sock *gs6;
__u8 prio;
+ gs6 = rcu_dereference(geneve->sock6);
+ if (!gs6)
+ return ERR_PTR(-EIO);
+
memset(fl6, 0, sizeof(*fl6));
fl6->flowi6_mark = skb->mark;
fl6->flowi6_proto = IPPROTO_UDP;
@@ -842,7 +857,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info)
{
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs4 = geneve->sock4;
+ struct geneve_sock *gs4;
struct rtable *rt = NULL;
const struct iphdr *iip; /* interior IP header */
int err = -EINVAL;
@@ -853,6 +868,10 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
u32 flags = geneve->flags;
+ gs4 = rcu_dereference(geneve->sock4);
+ if (!gs4)
+ goto tx_error;
+
if (geneve->collect_md) {
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
@@ -932,9 +951,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_info *info)
{
struct geneve_dev *geneve = netdev_priv(dev);
- struct geneve_sock *gs6 = geneve->sock6;
struct dst_entry *dst = NULL;
const struct iphdr *iip; /* interior IP header */
+ struct geneve_sock *gs6;
int err = -EINVAL;
struct flowi6 fl6;
__u8 prio, ttl;
@@ -943,6 +962,10 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
u32 flags = geneve->flags;
+ gs6 = rcu_dereference(geneve->sock6);
+ if (!gs6)
+ goto tx_error;
+
if (geneve->collect_md) {
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
netdev_dbg(dev, "no tunnel metadata\n");
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 3b28cf127f9d..9522763c8faf 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -447,7 +447,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
* Setup the sendside checksum offload only if this is not a
* GSO packet.
*/
- if (skb_is_gso(skb)) {
+ if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
@@ -607,15 +607,18 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
- if (csum_info) {
- /* We only look at the IP checksum here.
- * Should we be dropping the packet if checksum
- * failed? How do we deal with other checksums - TCP/UDP?
- */
- if (csum_info->receive.ip_checksum_succeeded)
+
+ /* skb is already created with CHECKSUM_NONE */
+ skb_checksum_none_assert(skb);
+
+ /*
+ * In Linux, the IP checksum is always checked.
+ * Do L4 checksum offload if enabled and present.
+ */
+ if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+ if (csum_info->receive.tcp_checksum_succeeded ||
+ csum_info->receive.udp_checksum_succeeded)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb->ip_summed = CHECKSUM_NONE;
}
if (vlan_tci & VLAN_TAG_PRESENT)
@@ -696,12 +699,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *dev = net_device_ctx->device_ctx;
-
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, vmbus_dev_name(dev), sizeof(info->bus_info));
}
static void netvsc_get_channels(struct net_device *net,
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 0a715ab9d9cc..cc00eb0db5d2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -397,6 +397,14 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
#define DEFAULT_ENCRYPT false
#define DEFAULT_ENCODING_SA 0
+static bool send_sci(const struct macsec_secy *secy)
+{
+ const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+
+ return tx_sc->send_sci ||
+ (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
+}
+
static sci_t make_sci(u8 *addr, __be16 port)
{
sci_t sci;
@@ -437,15 +445,15 @@ static unsigned int macsec_extra_len(bool sci_present)
/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
static void macsec_fill_sectag(struct macsec_eth_header *h,
- const struct macsec_secy *secy, u32 pn)
+ const struct macsec_secy *secy, u32 pn,
+ bool sci_present)
{
const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
- memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci));
+ memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
h->eth.h_proto = htons(ETH_P_MACSEC);
- if (tx_sc->send_sci ||
- (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) {
+ if (sci_present) {
h->tci_an |= MACSEC_TCI_SC;
memcpy(&h->secure_channel_id, &secy->sci,
sizeof(h->secure_channel_id));
@@ -650,6 +658,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct macsec_tx_sc *tx_sc;
struct macsec_tx_sa *tx_sa;
struct macsec_dev *macsec = macsec_priv(dev);
+ bool sci_present;
u32 pn;
secy = &macsec->secy;
@@ -687,7 +696,8 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
unprotected_len = skb->len;
eth = eth_hdr(skb);
- hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci));
+ sci_present = send_sci(secy);
+ hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(sci_present));
memmove(hh, eth, 2 * ETH_ALEN);
pn = tx_sa_update_pn(tx_sa, secy);
@@ -696,7 +706,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
kfree_skb(skb);
return ERR_PTR(-ENOLINK);
}
- macsec_fill_sectag(hh, secy, pn);
+ macsec_fill_sectag(hh, secy, pn, sci_present);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
skb_put(skb, secy->icv_len);
@@ -726,10 +736,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
skb_to_sgvec(skb, sg, 0, skb->len);
if (tx_sc->encrypt) {
- int len = skb->len - macsec_hdr_len(tx_sc->send_sci) -
+ int len = skb->len - macsec_hdr_len(sci_present) -
secy->icv_len;
aead_request_set_crypt(req, sg, sg, len, iv);
- aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci));
+ aead_request_set_ad(req, macsec_hdr_len(sci_present));
} else {
aead_request_set_crypt(req, sg, sg, 0, iv);
aead_request_set_ad(req, skb->len - secy->icv_len);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index dd47b69ddcff..c1e52b9dc58d 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -42,19 +42,24 @@
#define AT803X_MMD_ACCESS_CONTROL 0x0D
#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
#define AT803X_FUNC_DATA 0x4003
+#define AT803X_REG_CHIP_CONFIG 0x1f
+#define AT803X_BT_BX_REG_SEL 0x8000
#define AT803X_DEBUG_ADDR 0x1D
#define AT803X_DEBUG_DATA 0x1E
+#define AT803X_MODE_CFG_MASK 0x0F
+#define AT803X_MODE_CFG_SGMII 0x01
+
+#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
+#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
+
#define AT803X_DEBUG_REG_0 0x00
#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
#define AT803X_DEBUG_REG_5 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
-#define AT803X_REG_CHIP_CONFIG 0x1f
-#define AT803X_BT_BX_REG_SEL 0x8000
-
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
@@ -210,7 +215,6 @@ static int at803x_suspend(struct phy_device *phydev)
{
int value;
int wol_enabled;
- int ccr;
mutex_lock(&phydev->lock);
@@ -226,16 +230,6 @@ static int at803x_suspend(struct phy_device *phydev)
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
- goto done;
-
- /* also power-down SGMII interface */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
- phy_write(phydev, MII_BMCR, phy_read(phydev, MII_BMCR) | BMCR_PDOWN);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
-done:
mutex_unlock(&phydev->lock);
return 0;
@@ -244,7 +238,6 @@ done:
static int at803x_resume(struct phy_device *phydev)
{
int value;
- int ccr;
mutex_lock(&phydev->lock);
@@ -252,17 +245,6 @@ static int at803x_resume(struct phy_device *phydev)
value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
phy_write(phydev, MII_BMCR, value);
- if (phydev->interface != PHY_INTERFACE_MODE_SGMII)
- goto done;
-
- /* also power-up SGMII interface */
- ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
- value = phy_read(phydev, MII_BMCR) & ~(BMCR_PDOWN | BMCR_ISOLATE);
- phy_write(phydev, MII_BMCR, value);
- phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
-
-done:
mutex_unlock(&phydev->lock);
return 0;
@@ -382,6 +364,36 @@ static void at803x_link_change_notify(struct phy_device *phydev)
}
}
+static int at803x_aneg_done(struct phy_device *phydev)
+{
+ int ccr;
+
+ int aneg_done = genphy_aneg_done(phydev);
+ if (aneg_done != BMSR_ANEGCOMPLETE)
+ return aneg_done;
+
+ /*
+ * in SGMII mode, if copper side autoneg is successful,
+ * also check SGMII side autoneg result
+ */
+ ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ if ((ccr & AT803X_MODE_CFG_MASK) != AT803X_MODE_CFG_SGMII)
+ return aneg_done;
+
+ /* switch to SGMII/fiber page */
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL);
+
+ /* check if the SGMII link is OK. */
+ if (!(phy_read(phydev, AT803X_PSSR) & AT803X_PSSR_MR_AN_COMPLETE)) {
+ pr_warn("803x_aneg_done: SGMII link is not ok\n");
+ aneg_done = 0;
+ }
+ /* switch back to copper page */
+ phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL);
+
+ return aneg_done;
+}
+
static struct phy_driver at803x_driver[] = {
{
/* ATHEROS 8035 */
@@ -433,6 +445,7 @@ static struct phy_driver at803x_driver[] = {
.flags = PHY_HAS_INTERRUPT,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
+ .aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
} };
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 03d54c4adc88..800b39f06279 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -19,6 +19,7 @@
#define TI_DP83848C_PHY_ID 0x20005ca0
#define NS_DP83848C_PHY_ID 0x20005c90
#define TLK10X_PHY_ID 0x2000a210
+#define TI_DP83822_PHY_ID 0x2000a240
/* Registers */
#define DP83848_MICR 0x11 /* MII Interrupt Control Register */
@@ -77,6 +78,7 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
{ TLK10X_PHY_ID, 0xfffffff0 },
+ { TI_DP83822_PHY_ID, 0xfffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
@@ -105,6 +107,7 @@ static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
+ DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
};
module_phy_driver(dp83848_driver);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index f79eb12c326a..125cff57c759 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -433,13 +433,13 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 0);
- if (ret == -ENODEV)
+ if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 0);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
- if (ret == -ENODEV) {
+ if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
@@ -497,13 +497,13 @@ int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
mutex_lock(&dev->phy_mutex);
do {
ret = asix_set_sw_mii(dev, 1);
- if (ret == -ENODEV)
+ if (ret == -ENODEV || ret == -ETIMEDOUT)
break;
usleep_range(1000, 1100);
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 1);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
- if (ret == -ENODEV) {
+ if (ret == -ENODEV || ret == -ETIMEDOUT) {
mutex_unlock(&dev->phy_mutex);
return ret;
}
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 5662babf0583..3e37724d30ae 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -151,7 +151,7 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
- if (status < 0) {
+ if (status) {
usb_set_intfdata(intf, NULL);
usb_driver_release_interface(driver_of(intf), intf);
return status;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0c36de121eb0..e34b1297c96a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2279,6 +2279,7 @@ vmxnet3_set_mc(struct net_device *netdev)
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
dma_addr_t new_table_pa = 0;
+ bool new_table_pa_valid = false;
u32 new_mode = VMXNET3_RXM_UCAST;
if (netdev->flags & IFF_PROMISC) {
@@ -2307,13 +2308,15 @@ vmxnet3_set_mc(struct net_device *netdev)
new_table,
sz,
PCI_DMA_TODEVICE);
+ if (!dma_mapping_error(&adapter->pdev->dev,
+ new_table_pa)) {
+ new_mode |= VMXNET3_RXM_MCAST;
+ new_table_pa_valid = true;
+ rxConf->mfTablePA = cpu_to_le64(
+ new_table_pa);
+ }
}
-
- if (!dma_mapping_error(&adapter->pdev->dev,
- new_table_pa)) {
- new_mode |= VMXNET3_RXM_MCAST;
- rxConf->mfTablePA = cpu_to_le64(new_table_pa);
- } else {
+ if (!new_table_pa_valid) {
netdev_info(netdev,
"failed to copy mcast list, setting ALL_MULTI\n");
new_mode |= VMXNET3_RXM_ALL_MULTI;
@@ -2338,7 +2341,7 @@ vmxnet3_set_mc(struct net_device *netdev)
VMXNET3_CMD_UPDATE_MAC_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- if (new_table_pa)
+ if (new_table_pa_valid)
dma_unmap_single(&adapter->pdev->dev, new_table_pa,
rxConf->mfTableLen, PCI_DMA_TODEVICE);
kfree(new_table);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 85c271c70d42..820de6a9ddde 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -956,6 +956,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
if (skb->pkt_type == PACKET_LOOPBACK) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
+ IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
skb->pkt_type = PACKET_HOST;
goto out;
}
@@ -996,6 +997,7 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
{
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
+ IPCB(skb)->flags |= IPSKB_L3SLAVE;
/* loopback traffic; do not push through packet taps again.
* Reset pkt_type for upper layers to process skb
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index c0170b6956bb..cb5cc7c03160 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -583,7 +583,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
}
}
- pp = eth_gro_receive(head, skb);
+ pp = call_gro_receive(eth_gro_receive, head, skb);
flush = 0;
out:
@@ -943,17 +943,20 @@ static bool vxlan_snoop(struct net_device *dev,
static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
{
struct vxlan_dev *vxlan;
+ struct vxlan_sock *sock4;
+ struct vxlan_sock *sock6 = NULL;
unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
+ sock4 = rtnl_dereference(dev->vn4_sock);
+
/* The vxlan_sock is only used by dev, leaving group has
* no effect on other vxlan devices.
*/
- if (family == AF_INET && dev->vn4_sock &&
- atomic_read(&dev->vn4_sock->refcnt) == 1)
+ if (family == AF_INET && sock4 && atomic_read(&sock4->refcnt) == 1)
return false;
#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 && dev->vn6_sock &&
- atomic_read(&dev->vn6_sock->refcnt) == 1)
+ sock6 = rtnl_dereference(dev->vn6_sock);
+ if (family == AF_INET6 && sock6 && atomic_read(&sock6->refcnt) == 1)
return false;
#endif
@@ -961,10 +964,12 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
if (!netif_running(vxlan->dev) || vxlan == dev)
continue;
- if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock)
+ if (family == AF_INET &&
+ rtnl_dereference(vxlan->vn4_sock) != sock4)
continue;
#if IS_ENABLED(CONFIG_IPV6)
- if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock)
+ if (family == AF_INET6 &&
+ rtnl_dereference(vxlan->vn6_sock) != sock6)
continue;
#endif
@@ -1005,22 +1010,25 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
static void vxlan_sock_release(struct vxlan_dev *vxlan)
{
- bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock);
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
#if IS_ENABLED(CONFIG_IPV6)
- bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock);
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ rcu_assign_pointer(vxlan->vn6_sock, NULL);
#endif
+ rcu_assign_pointer(vxlan->vn4_sock, NULL);
synchronize_net();
- if (ipv4) {
- udp_tunnel_sock_release(vxlan->vn4_sock->sock);
- kfree(vxlan->vn4_sock);
+ if (__vxlan_sock_release_prep(sock4)) {
+ udp_tunnel_sock_release(sock4->sock);
+ kfree(sock4);
}
#if IS_ENABLED(CONFIG_IPV6)
- if (ipv6) {
- udp_tunnel_sock_release(vxlan->vn6_sock->sock);
- kfree(vxlan->vn6_sock);
+ if (__vxlan_sock_release_prep(sock6)) {
+ udp_tunnel_sock_release(sock6->sock);
+ kfree(sock6);
}
#endif
}
@@ -1036,18 +1044,21 @@ static int vxlan_igmp_join(struct vxlan_dev *vxlan)
int ret = -EINVAL;
if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
.imr_ifindex = ifindex,
};
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
lock_sock(sk);
ret = ip_mc_join_group(sk, &mreq);
release_sock(sk);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- sk = vxlan->vn6_sock->sock->sk;
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
lock_sock(sk);
ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
&ip->sin6.sin6_addr);
@@ -1067,18 +1078,21 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
int ret = -EINVAL;
if (ip->sa.sa_family == AF_INET) {
+ struct vxlan_sock *sock4 = rtnl_dereference(vxlan->vn4_sock);
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
.imr_ifindex = ifindex,
};
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
lock_sock(sk);
ret = ip_mc_leave_group(sk, &mreq);
release_sock(sk);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- sk = vxlan->vn6_sock->sock->sk;
+ struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
+
+ sk = sock6->sock->sk;
lock_sock(sk);
ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
&ip->sin6.sin6_addr);
@@ -1828,11 +1842,15 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
+ struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst;
struct flowi6 fl6;
int err;
+ if (!sock6)
+ return ERR_PTR(-EIO);
+
if (tos && !info)
use_cache = false;
if (use_cache) {
@@ -1850,7 +1868,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowi6_proto = IPPROTO_UDP;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
- vxlan->vn6_sock->sock->sk,
+ sock6->sock->sk,
&ndst, &fl6);
if (err < 0)
return ERR_PTR(err);
@@ -1995,9 +2013,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
if (dst->sa.sa_family == AF_INET) {
- if (!vxlan->vn4_sock)
+ struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
+
+ if (!sock4)
goto drop;
- sk = vxlan->vn4_sock->sock->sk;
+ sk = sock4->sock->sk;
rt = vxlan_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
@@ -2050,12 +2070,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
src_port, dst_port, xnet, !udp_sum);
#if IS_ENABLED(CONFIG_IPV6)
} else {
+ struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
struct dst_entry *ndst;
u32 rt6i_flags;
- if (!vxlan->vn6_sock)
+ if (!sock6)
goto drop;
- sk = vxlan->vn6_sock->sock->sk;
+ sk = sock6->sock->sk;
ndst = vxlan6_get_route(vxlan, skb,
rdst ? rdst->remote_ifindex : 0, tos,
@@ -2403,9 +2424,10 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
if (ip_tunnel_info_af(info) == AF_INET) {
+ struct vxlan_sock *sock4 = rcu_dereference(vxlan->vn4_sock);
struct rtable *rt;
- if (!vxlan->vn4_sock)
+ if (!sock4)
return -EINVAL;
rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
@@ -2417,8 +2439,6 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
#if IS_ENABLED(CONFIG_IPV6)
struct dst_entry *ndst;
- if (!vxlan->vn6_sock)
- return -EINVAL;
ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos,
info->key.label, &info->key.u.ipv6.dst,
&info->key.u.ipv6.src, NULL, info);
@@ -2728,10 +2748,10 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
return PTR_ERR(vs);
#if IS_ENABLED(CONFIG_IPV6)
if (ipv6)
- vxlan->vn6_sock = vs;
+ rcu_assign_pointer(vxlan->vn6_sock, vs);
else
#endif
- vxlan->vn4_sock = vs;
+ rcu_assign_pointer(vxlan->vn4_sock, vs);
vxlan_vs_add_dev(vs, vxlan);
return 0;
}
@@ -2742,9 +2762,9 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan)
bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
int ret = 0;
- vxlan->vn4_sock = NULL;
+ RCU_INIT_POINTER(vxlan->vn4_sock, NULL);
#if IS_ENABLED(CONFIG_IPV6)
- vxlan->vn6_sock = NULL;
+ RCU_INIT_POINTER(vxlan->vn6_sock, NULL);
if (ipv6 || metadata)
ret = __vxlan_sock_add(vxlan, true);
#endif
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 33ab3345d333..4e9fe75d7067 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -294,7 +294,7 @@ config FSL_UCC_HDLC
config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
- depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index d06a887a2352..b776a0ab106c 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -223,12 +223,19 @@ static int slic_ds26522_probe(struct spi_device *spi)
return ret;
}
+static const struct spi_device_id slic_ds26522_id[] = {
+ { .name = "ds26522" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(spi, slic_ds26522_id);
+
static const struct of_device_id slic_ds26522_match[] = {
{
.compatible = "maxim,ds26522",
},
{},
};
+MODULE_DEVICE_TABLE(of, slic_ds26522_match);
static struct spi_driver slic_ds26522_driver = {
.driver = {
@@ -239,6 +246,7 @@ static struct spi_driver slic_ds26522_driver = {
},
.probe = slic_ds26522_probe,
.remove = slic_ds26522_remove,
+ .id_table = slic_ds26522_id,
};
static int __init slic_ds26522_init(void)
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index dda49af1eb74..521f1c55c19e 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -450,6 +450,7 @@ struct ath10k_debug {
u32 pktlog_filter;
u32 reg_addr;
u32 nf_cal_period;
+ void *cal_data;
struct ath10k_fw_crash_data *fw_crash_data;
};
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 832da6ed9f13..82a4c67f3672 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -30,6 +30,8 @@
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+#define ATH10K_DEBUG_CAL_DATA_LEN 12064
+
#define ATH10K_FW_CRASH_DUMP_VERSION 1
/**
@@ -1451,56 +1453,51 @@ static const struct file_operations fops_fw_dbglog = {
.llseek = default_llseek,
};
-static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
{
- struct ath10k *ar = inode->i_private;
- void *buf;
u32 hi_addr;
__le32 addr;
int ret;
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH10K_STATE_ON &&
- ar->state != ATH10K_STATE_UTF) {
- ret = -ENETDOWN;
- goto err;
- }
+ lockdep_assert_held(&ar->conf_mutex);
- buf = vmalloc(ar->hw_params.cal_data_len);
- if (!buf) {
- ret = -ENOMEM;
- goto err;
- }
+ if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
+ return -EINVAL;
hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
if (ret) {
- ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
- goto err_vfree;
+ ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
+ ret);
+ return ret;
}
- ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
+ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
ar->hw_params.cal_data_len);
if (ret) {
ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
- goto err_vfree;
+ return ret;
}
- file->private_data = buf;
+ return 0;
+}
- mutex_unlock(&ar->conf_mutex);
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
- return 0;
+ mutex_lock(&ar->conf_mutex);
-err_vfree:
- vfree(buf);
+ if (ar->state == ATH10K_STATE_ON ||
+ ar->state == ATH10K_STATE_UTF) {
+ ath10k_debug_cal_data_fetch(ar);
+ }
-err:
+ file->private_data = ar;
mutex_unlock(&ar->conf_mutex);
- return ret;
+ return 0;
}
static ssize_t ath10k_debug_cal_data_read(struct file *file,
@@ -1508,18 +1505,16 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- void *buf = file->private_data;
- return simple_read_from_buffer(user_buf, count, ppos,
- buf, ar->hw_params.cal_data_len);
-}
+ mutex_lock(&ar->conf_mutex);
-static int ath10k_debug_cal_data_release(struct inode *inode,
- struct file *file)
-{
- vfree(file->private_data);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
- return 0;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
}
static ssize_t ath10k_write_ani_enable(struct file *file,
@@ -1580,7 +1575,6 @@ static const struct file_operations fops_ani_enable = {
static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read,
- .release = ath10k_debug_cal_data_release,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
@@ -1932,6 +1926,8 @@ void ath10k_debug_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_debug_cal_data_fetch(ar);
+
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
* warning from del_timer(). */
@@ -2344,6 +2340,10 @@ int ath10k_debug_create(struct ath10k *ar)
if (!ar->debug.fw_crash_data)
return -ENOMEM;
+ ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
+ if (!ar->debug.cal_data)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
@@ -2357,6 +2357,9 @@ void ath10k_debug_destroy(struct ath10k *ar)
vfree(ar->debug.fw_crash_data);
ar->debug.fw_crash_data = NULL;
+ vfree(ar->debug.cal_data);
+ ar->debug.cal_data = NULL;
+
ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats);
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index eab0ab976af2..76eb33679d4b 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1401,6 +1401,7 @@ static const struct sdio_device_id ath6kl_sdio_devices[] = {
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x2))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x18))},
{},
};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index b6f064a8d264..7e27a06e5df1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -33,7 +33,6 @@ struct coeff {
enum ar9003_cal_types {
IQ_MISMATCH_CAL = BIT(0),
- TEMP_COMP_CAL = BIT(1),
};
static void ar9003_hw_setup_calibration(struct ath_hw *ah,
@@ -59,12 +58,6 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
/* Kick-off cal */
REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
break;
- case TEMP_COMP_CAL:
- ath_dbg(common, CALIBRATE,
- "starting Temperature Compensation Calibration\n");
- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL);
- REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START);
- break;
default:
ath_err(common, "Invalid calibration type\n");
break;
@@ -93,8 +86,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
/*
* Accumulate cal measures for active chains
*/
- if (cur_caldata->calCollect)
- cur_caldata->calCollect(ah);
+ cur_caldata->calCollect(ah);
ah->cal_samples++;
if (ah->cal_samples >= cur_caldata->calNumSamples) {
@@ -107,8 +99,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
/*
* Process accumulated data
*/
- if (cur_caldata->calPostProc)
- cur_caldata->calPostProc(ah, numChains);
+ cur_caldata->calPostProc(ah, numChains);
/* Calibration has finished. */
caldata->CalValid |= cur_caldata->calType;
@@ -323,16 +314,9 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
ar9003_hw_iqcalibrate
};
-static const struct ath9k_percal_data temp_cal_single_sample = {
- TEMP_COMP_CAL,
- MIN_CAL_SAMPLES,
- PER_MAX_LOG_COUNT,
-};
-
static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
{
ah->iq_caldata.calData = &iq_cal_single_sample;
- ah->temp_caldata.calData = &temp_cal_single_sample;
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
@@ -340,7 +324,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
- ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL;
+ ah->supp_cals = IQ_MISMATCH_CAL;
}
#define OFF_UPPER_LT 24
@@ -1399,9 +1383,6 @@ static void ar9003_hw_init_cal_common(struct ath_hw *ah)
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- INIT_CAL(&ah->temp_caldata);
- INSERT_CAL(ah, &ah->temp_caldata);
-
/* Initialize current pointer to first element in list */
ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 2a5d3ad1169c..9cbca1229bac 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -830,7 +830,6 @@ struct ath_hw {
/* Calibration */
u32 supp_cals;
struct ath9k_cal_list iq_caldata;
- struct ath9k_cal_list temp_caldata;
struct ath9k_cal_list adcgain_caldata;
struct ath9k_cal_list adcdc_caldata;
struct ath9k_cal_list *cal_list;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 94480123efa3..274dd5a1574a 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -45,7 +45,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev.iftype, 0, false);
+ priv->wdev.iftype, 0, NULL, NULL);
while (!skb_queue_empty(&list)) {
struct rx_packet_hdr *rx_hdr;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 1016628926d2..08d587a342d3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -238,7 +238,7 @@ struct rtl8xxxu_rxdesc16 {
u32 pattern1match:1;
u32 pattern0match:1;
#endif
- __le32 tsfl;
+ u32 tsfl;
#if 0
u32 bassn:12;
u32 bavld:1;
@@ -368,7 +368,7 @@ struct rtl8xxxu_rxdesc24 {
u32 ldcp:1;
u32 splcp:1;
#endif
- __le32 tsfl;
+ u32 tsfl;
};
struct rtl8xxxu_txdesc32 {
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index df54d27e7851..a793fedc3654 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -1461,7 +1461,9 @@ static int rtl8192eu_active_to_emu(struct rtl8xxxu_priv *priv)
int count, ret = 0;
/* Turn off RF */
- rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+ val8 = rtl8xxxu_read8(priv, REG_RF_CTRL);
+ val8 &= ~RF_ENABLE;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
/* Switch DPDT_SEL_P output from register 0x65[2] */
val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
@@ -1593,6 +1595,10 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
u32 val32;
u8 val8;
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 |= (BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+
val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
val8 |= BIT(5);
rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 6c086b5657e9..02b8ddd98a95 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1498,6 +1498,10 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
u32 val32;
u8 val8;
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 |= (BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+
/*
* No indication anywhere as to what 0x0790 does. The 2 antenna
* vendor code preserves bits 6-7 here.
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index b2d7f6e69667..a5e6ec2152bf 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -5197,7 +5197,12 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift +
sizeof(struct rtl8xxxu_rxdesc16), 128);
- if (pkt_cnt > 1)
+ /*
+ * Only clone the skb if there's enough data at the end to
+ * at least cover the rx descriptor
+ */
+ if (pkt_cnt > 1 &&
+ urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
next_skb = skb_clone(skb, GFP_ATOMIC);
rx_status = IEEE80211_SKB_RXCB(skb);
@@ -5215,7 +5220,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (!rx_desc->swdec)
@@ -5285,7 +5290,7 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
rx_desc->rxmcs);
- rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
+ rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
if (!rx_desc->swdec)
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index f95760c13c56..8e7f23c11680 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -111,7 +111,7 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
if (!err)
goto found_alt;
}
- pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ pr_err("Selected firmware is not available\n");
rtlpriv->max_fw_size = 0;
return;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index e7b11b40e68d..f361808def47 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -86,6 +86,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 tid;
+ char *fw_name;
rtl8188ee_bt_reg_init(hw);
rtlpriv->dm.dm_initialgain_enable = 1;
@@ -169,10 +170,10 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8188efw.bin";
+ fw_name = "rtlwifi/rtl8188efw.bin";
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -284,7 +285,6 @@ static const struct rtl_hal_cfg rtl88ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl88e_pci",
- .fw_name = "rtlwifi/rtl8188efw.bin",
.ops = &rtl8188ee_hal_ops,
.mod_params = &rtl88ee_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index 87aa209ae325..8b6e37ce3f66 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -96,6 +96,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ char *fw_name = "rtlwifi/rtl8192cfwU.bin";
rtl8192ce_bt_reg_init(hw);
@@ -167,15 +168,12 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
}
/* request fw */
- if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
- !IS_92C_SERIAL(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin";
- else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+ if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
+ fw_name = "rtlwifi/rtl8192cfwU_B.bin";
rtlpriv->max_fw_size = 0x4000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -262,7 +260,6 @@ static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92c_pci",
- .fw_name = "rtlwifi/rtl8192cfw.bin",
.ops = &rtl8192ce_hal_ops,
.mod_params = &rtl92ce_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 7c6f7f0d18c6..f953320f0e23 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -59,6 +59,7 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
+ char *fw_name;
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -77,18 +78,18 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
}
if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
!IS_92C_SERIAL(rtlpriv->rtlhal.version)) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin";
+ fw_name = "rtlwifi/rtl8192cufw_A.bin";
} else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin";
+ fw_name = "rtlwifi/rtl8192cufw_B.bin";
} else {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+ fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
}
/* provide name of alternative file */
rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
- pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ pr_info("Loading firmware %s\n", fw_name);
rtlpriv->max_fw_size = 0x4000;
err = request_firmware_nowait(THIS_MODULE, 1,
- rtlpriv->cfg->fw_name, rtlpriv->io.dev,
+ fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
return err;
}
@@ -187,7 +188,6 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.name = "rtl92c_usb",
- .fw_name = "rtlwifi/rtl8192cufw.bin",
.ops = &rtl8192cu_hal_ops,
.mod_params = &rtl92cu_mod_params,
.usb_interface_cfg = &rtl92cu_interface_cfg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index 0538a4d09568..1ebfee18882f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -92,6 +92,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
u8 tid;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ char *fw_name = "rtlwifi/rtl8192defw.bin";
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -181,10 +182,10 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->max_fw_size = 0x8000;
pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
- pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
+ pr_info("Loading firmware file %s\n", fw_name);
/* request fw */
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92de_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8192de",
- .fw_name = "rtlwifi/rtl8192defw.bin",
.ops = &rtl8192de_hal_ops,
.mod_params = &rtl92de_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index ac299cbe59b0..46b605de36e7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -91,6 +91,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int err = 0;
+ char *fw_name;
rtl92ee_bt_reg_init(hw);
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
@@ -170,11 +171,11 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
}
/* request fw */
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8192eefw.bin";
+ fw_name = "rtlwifi/rtl8192eefw.bin";
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -266,7 +267,6 @@ static const struct rtl_hal_cfg rtl92ee_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl92ee_pci",
- .fw_name = "rtlwifi/rtl8192eefw.bin",
.ops = &rtl8192ee_hal_ops,
.mod_params = &rtl92ee_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 5e8e02d5de8a..3e1eaeac4fdc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -89,12 +89,13 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
struct ieee80211_hw *hw = context;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rt_firmware *pfirmware = NULL;
+ char *fw_name = "rtlwifi/rtl8192sefw.bin";
RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
- pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
+ pr_err("Firmware %s not available\n", fw_name);
rtlpriv->max_fw_size = 0;
return;
}
@@ -117,6 +118,7 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
int err = 0;
u16 earlyrxthreshold = 7;
+ char *fw_name = "rtlwifi/rtl8192sefw.bin";
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
@@ -214,9 +216,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 +
sizeof(struct fw_hdr);
pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n"
- "Loading firmware %s\n", rtlpriv->cfg->fw_name);
+ "Loading firmware %s\n", fw_name);
/* request fw */
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl92se_fw_cb);
if (err) {
@@ -310,7 +312,6 @@ static const struct rtl_hal_cfg rtl92se_hal_cfg = {
.bar_id = 1,
.write_readback = false,
.name = "rtl92s_pci",
- .fw_name = "rtlwifi/rtl8192sefw.bin",
.ops = &rtl8192se_hal_ops,
.mod_params = &rtl92se_mod_params,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 89c828ad89f4..c51a9e8234e9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -94,6 +94,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
int err = 0;
+ char *fw_name = "rtlwifi/rtl8723fw.bin";
rtl8723e_bt_reg_init(hw);
@@ -176,14 +177,12 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
- if (IS_VENDOR_8723_A_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw.bin";
- else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin";
+ if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
+ fw_name = "rtlwifi/rtl8723fw_B.bin";
rtlpriv->max_fw_size = 0x6000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -280,7 +279,6 @@ static const struct rtl_hal_cfg rtl8723e_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723e_pci",
- .fw_name = "rtlwifi/rtl8723efw.bin",
.ops = &rtl8723e_hal_ops,
.mod_params = &rtl8723e_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 20b53f035483..847644d1f5f5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -91,6 +91,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ char *fw_name = "rtlwifi/rtl8723befw.bin";
rtl8723be_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -184,8 +185,8 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
}
rtlpriv->max_fw_size = 0x8000;
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -280,7 +281,6 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8723be_pci",
- .fw_name = "rtlwifi/rtl8723befw.bin",
.ops = &rtl8723be_hal_ops,
.mod_params = &rtl8723be_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 22f687b1f133..297938e0effd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -93,6 +93,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ char *fw_name, *wowlan_fw_name;
rtl8821ae_bt_reg_init(hw);
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
@@ -203,17 +204,17 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
}
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8812aefw.bin";
- rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
+ fw_name = "rtlwifi/rtl8812aefw.bin";
+ wowlan_fw_name = "rtlwifi/rtl8812aefw_wowlan.bin";
} else {
- rtlpriv->cfg->fw_name = "rtlwifi/rtl8821aefw.bin";
- rtlpriv->cfg->wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
+ fw_name = "rtlwifi/rtl8821aefw.bin";
+ wowlan_fw_name = "rtlwifi/rtl8821aefw_wowlan.bin";
}
rtlpriv->max_fw_size = 0x8000;
/*load normal firmware*/
- pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
- err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
+ pr_info("Using firmware %s\n", fw_name);
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_fw_cb);
if (err) {
@@ -222,9 +223,9 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
return 1;
}
/*load wowlan firmware*/
- pr_info("Using firmware %s\n", rtlpriv->cfg->wowlan_fw_name);
+ pr_info("Using firmware %s\n", wowlan_fw_name);
err = request_firmware_nowait(THIS_MODULE, 1,
- rtlpriv->cfg->wowlan_fw_name,
+ wowlan_fw_name,
rtlpriv->io.dev, GFP_KERNEL, hw,
rtl_wowlan_fw_cb);
if (err) {
@@ -320,7 +321,6 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = {
.bar_id = 2,
.write_readback = true,
.name = "rtl8821ae_pci",
- .fw_name = "rtlwifi/rtl8821aefw.bin",
.ops = &rtl8821ae_hal_ops,
.mod_params = &rtl8821ae_mod_params,
.maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 595f7d5d091a..dafe486f8448 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2278,9 +2278,7 @@ struct rtl_hal_cfg {
u8 bar_id;
bool write_readback;
char *name;
- char *fw_name;
char *alt_fw_name;
- char *wowlan_fw_name;
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
struct rtl_hal_usbint_cfg *usb_interface_cfg;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index a6e94b1a12cb..47fe7f96a242 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -391,7 +391,6 @@ static void wl1271_remove(struct sdio_func *func)
pm_runtime_get_noresume(&func->dev);
platform_device_unregister(glue->core);
- kfree(glue);
}
#ifdef CONFIG_PM
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 8b2b740d6679..124c2432ac9c 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -89,7 +89,7 @@ config NVDIMM_PFN
Select Y if unsure
config NVDIMM_DAX
- tristate "NVDIMM DAX: Raw access to persistent memory"
+ bool "NVDIMM DAX: Raw access to persistent memory"
default LIBNVDIMM
depends on NVDIMM_PFN
help
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 3509cff68ef9..abe5c6bc756c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -2176,12 +2176,14 @@ static struct device **scan_labels(struct nd_region *nd_region)
return devs;
err:
- for (i = 0; devs[i]; i++)
- if (is_nd_blk(&nd_region->dev))
- namespace_blk_release(devs[i]);
- else
- namespace_pmem_release(devs[i]);
- kfree(devs);
+ if (devs) {
+ for (i = 0; devs[i]; i++)
+ if (is_nd_blk(&nd_region->dev))
+ namespace_blk_release(devs[i]);
+ else
+ namespace_pmem_release(devs[i]);
+ kfree(devs);
+ }
return NULL;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 42b3a8217073..24618431a14b 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -47,7 +47,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
return to_nd_region(to_dev(pmem)->parent);
}
-static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
+static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
unsigned int len)
{
struct device *dev = to_dev(pmem);
@@ -62,8 +62,12 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
__func__, (unsigned long long) sector,
cleared / 512, cleared / 512 > 1 ? "s" : "");
badblocks_clear(&pmem->bb, sector, cleared / 512);
+ } else {
+ return -EIO;
}
+
invalidate_pmem(pmem->virt_addr + offset, len);
+ return 0;
}
static void write_pmem(void *pmem_addr, struct page *page,
@@ -123,7 +127,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
flush_dcache_page(page);
write_pmem(pmem_addr, page, off, len);
if (unlikely(bad_pmem)) {
- pmem_clear_poison(pmem, pmem_off, len);
+ rc = pmem_clear_poison(pmem, pmem_off, len);
write_pmem(pmem_addr, page, off, len);
}
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 329381a28edf..79e679d12f3b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -554,7 +554,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
c.identify.opcode = nvme_admin_identify;
- c.identify.cns = cpu_to_le32(1);
+ c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL);
*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
if (!*id)
@@ -572,7 +572,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
struct nvme_command c = { };
c.identify.opcode = nvme_admin_identify;
- c.identify.cns = cpu_to_le32(2);
+ c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST);
c.identify.nsid = cpu_to_le32(nsid);
return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
}
@@ -900,9 +900,9 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
return -ENODEV;
}
- if (ns->ctrl->vs >= NVME_VS(1, 1))
+ if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
- if (ns->ctrl->vs >= NVME_VS(1, 2))
+ if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
return 0;
@@ -1086,6 +1086,8 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
int ret;
while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
+ if (csts == ~0)
+ return -ENODEV;
if ((csts & NVME_CSTS_RDY) == bit)
break;
@@ -1240,7 +1242,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
page_shift = NVME_CAP_MPSMIN(cap) + 12;
- if (ctrl->vs >= NVME_VS(1, 1))
+ if (ctrl->vs >= NVME_VS(1, 1, 0))
ctrl->subsystem = NVME_CAP_NSSRC(cap);
ret = nvme_identify_ctrl(ctrl, &id);
@@ -1840,7 +1842,7 @@ static void nvme_scan_work(struct work_struct *work)
return;
nn = le32_to_cpu(id->nn);
- if (ctrl->vs >= NVME_VS(1, 1) &&
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
if (!nvme_scan_ns_list(ctrl, nn))
goto done;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0fc99f0f2571..0248d0e21fee 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -99,6 +99,7 @@ struct nvme_dev {
dma_addr_t cmb_dma_addr;
u64 cmb_size;
u32 cmbsz;
+ u32 cmbloc;
struct nvme_ctrl ctrl;
struct completion ioq_wait;
};
@@ -893,7 +894,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
- queue_work(nvme_workq, &dev->reset_work);
+ nvme_reset(dev);
/*
* Mark the request as handled, since the inline shutdown
@@ -1214,7 +1215,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
struct nvme_queue *nvmeq;
- dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ?
+ dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
NVME_CAP_NSSRC(cap) : 0;
if (dev->subsystem &&
@@ -1291,7 +1292,7 @@ static void nvme_watchdog_timer(unsigned long data)
/* Skip controllers under certain specific conditions. */
if (nvme_should_reset(dev, csts)) {
- if (queue_work(nvme_workq, &dev->reset_work))
+ if (!nvme_reset(dev))
dev_warn(dev->dev,
"Failed status: 0x%x, reset controller.\n",
csts);
@@ -1331,28 +1332,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
return ret >= 0 ? 0 : ret;
}
+static ssize_t nvme_cmb_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+ return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
+ ndev->cmbloc, ndev->cmbsz);
+}
+static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
+
static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
{
u64 szu, size, offset;
- u32 cmbloc;
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
void __iomem *cmb;
dma_addr_t dma_addr;
- if (!use_cmb_sqes)
- return NULL;
-
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!(NVME_CMB_SZ(dev->cmbsz)))
return NULL;
+ dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
- cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
+ if (!use_cmb_sqes)
+ return NULL;
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
size = szu * NVME_CMB_SZ(dev->cmbsz);
- offset = szu * NVME_CMB_OFST(cmbloc);
- bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
+ offset = szu * NVME_CMB_OFST(dev->cmbloc);
+ bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
if (offset > bar_size)
return NULL;
@@ -1365,7 +1375,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
+ dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
cmb = ioremap_wc(dma_addr, size);
if (!cmb)
return NULL;
@@ -1511,9 +1521,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return 0;
}
-static void nvme_disable_io_queues(struct nvme_dev *dev)
+static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
{
- int pass, queues = dev->online_queues - 1;
+ int pass;
unsigned long timeout;
u8 opcode = nvme_admin_delete_sq;
@@ -1616,9 +1626,25 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->q_depth);
}
- if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2))
+ /*
+ * CMBs can currently only exist on >=1.2 PCIe devices. We only
+ * populate sysfs if a CMB is implemented. Note that we add the
+ * CMB attribute to the nvme_ctrl kobj which removes the need to remove
+ * it on exit. Since nvme_dev_attrs_group has no name we can pass
+ * NULL as final argument to sysfs_add_file_to_group.
+ */
+
+ if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
dev->cmb = nvme_map_cmb(dev);
+ if (dev->cmbsz) {
+ if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
+ &dev_attr_cmb.attr, NULL))
+ dev_warn(dev->dev,
+ "failed to add sysfs attribute for CMB\n");
+ }
+ }
+
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
return 0;
@@ -1649,7 +1675,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
- int i;
+ int i, queues;
u32 csts = -1;
del_timer_sync(&dev->watchdog_timer);
@@ -1660,6 +1686,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
csts = readl(dev->bar + NVME_REG_CSTS);
}
+ queues = dev->online_queues - 1;
for (i = dev->queue_count - 1; i > 0; i--)
nvme_suspend_queue(dev->queues[i]);
@@ -1671,7 +1698,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (dev->queue_count)
nvme_suspend_queue(dev->queues[0]);
} else {
- nvme_disable_io_queues(dev);
+ nvme_disable_io_queues(dev, queues);
nvme_disable_admin_queue(dev, shutdown);
}
nvme_pci_disable(dev);
@@ -1818,11 +1845,10 @@ static int nvme_reset(struct nvme_dev *dev)
{
if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
return -ENODEV;
-
+ if (work_busy(&dev->reset_work))
+ return -ENODEV;
if (!queue_work(nvme_workq, &dev->reset_work))
return -EBUSY;
-
- flush_work(&dev->reset_work);
return 0;
}
@@ -1846,7 +1872,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
{
- return nvme_reset(to_nvme_dev(ctrl));
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+ int ret = nvme_reset(dev);
+
+ if (!ret)
+ flush_work(&dev->reset_work);
+ return ret;
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
@@ -1940,7 +1971,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
if (prepare)
nvme_dev_disable(dev, false);
else
- queue_work(nvme_workq, &dev->reset_work);
+ nvme_reset(dev);
}
static void nvme_shutdown(struct pci_dev *pdev)
@@ -2009,7 +2040,7 @@ static int nvme_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- queue_work(nvme_workq, &ndev->reset_work);
+ nvme_reset(ndev);
return 0;
}
#endif
@@ -2048,7 +2079,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev);
- queue_work(nvme_workq, &dev->reset_work);
+ nvme_reset(dev);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index c2a0a1c7d05d..3eaa4d27801e 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -606,7 +606,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
eui = id_ns->eui64;
len = sizeof(id_ns->eui64);
- if (ns->ctrl->vs >= NVME_VS(1, 2)) {
+ if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) {
if (bitmap_empty(eui, len * 8)) {
eui = id_ns->nguid;
len = sizeof(id_ns->nguid);
@@ -679,7 +679,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{
int res;
- if (ns->ctrl->vs >= NVME_VS(1, 1)) {
+ if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) {
res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
if (res != -EOPNOTSUPP)
return res;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 7ab9c9381b98..6fe4c48a21e4 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -199,7 +199,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
*/
/* we support multiple ports and multiples hosts: */
- id->mic = (1 << 0) | (1 << 1);
+ id->cmic = (1 << 0) | (1 << 1);
/* no limit on data transfer sizes for now */
id->mdts = 0;
@@ -511,13 +511,13 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req)
case nvme_admin_identify:
req->data_len = 4096;
switch (le32_to_cpu(cmd->identify.cns)) {
- case 0x00:
+ case NVME_ID_CNS_NS:
req->execute = nvmet_execute_identify_ns;
return 0;
- case 0x01:
+ case NVME_ID_CNS_CTRL:
req->execute = nvmet_execute_identify_ctrl;
return 0;
- case 0x02:
+ case NVME_ID_CNS_NS_ACTIVE_LIST:
req->execute = nvmet_execute_identify_nslist;
return 0;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6559d5afa7bf..b4cacb6f0258 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -882,7 +882,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
if (!subsys)
return NULL;
- subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */
+ subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
switch (type) {
case NVME_NQN_NVME:
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 6f65646e89cf..12f39eea569f 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -54,7 +54,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
/* we support only dynamic controllers */
e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
- e->nqntype = type;
+ e->subtype = type;
memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
@@ -187,7 +187,7 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req)
case nvme_admin_identify:
req->data_len = 4096;
switch (le32_to_cpu(cmd->identify.cns)) {
- case 0x01:
+ case NVME_ID_CNS_CTRL:
req->execute =
nvmet_execute_identify_disc_ctrl;
return 0;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index f811d2796437..e4bf07d20f9b 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -29,6 +29,7 @@
const struct of_device_id of_default_bus_match_table[] = {
{ .compatible = "simple-bus", },
{ .compatible = "simple-mfd", },
+ { .compatible = "isa", },
#ifdef CONFIG_ARM_AMBA
{ .compatible = "arm,amba-bus", },
#endif /* CONFIG_ARM_AMBA */
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 2cb7315e26d0..653707996342 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -247,6 +247,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
pp = &pcie->pp;
pp->dev = dev;
+ pcie->drvdata = match->data;
pp->ops = pcie->drvdata->ops;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
@@ -256,7 +257,6 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->pp.dbi_base);
}
- pcie->drvdata = match->data;
pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
if (!ls_pcie_is_bridge(pcie))
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index 537f58a664fa..8df6312ed300 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
- * Authors: Joao Pinto <jpinto@synopsys.com>
+ * Authors: Joao Pinto <jpmpinto@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index bfdd0744b686..ad70507cfb56 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -610,6 +610,7 @@ static int msi_verify_entries(struct pci_dev *dev)
* msi_capability_init - configure device's MSI capability structure
* @dev: pointer to the pci_dev data structure of MSI device function
* @nvec: number of interrupts to allocate
+ * @affinity: flag to indicate cpu irq affinity mask should be set
*
* Setup the MSI capability structure of the device with the requested
* number of interrupts. A return value of zero indicates the successful
@@ -752,6 +753,7 @@ static void msix_program_entries(struct pci_dev *dev,
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of struct msix_entry entries
* @nvec: number of @entries
+ * @affinity: flag to indicate cpu irq affinity mask should be set
*
* Setup the MSI-X capability structure of device function with a
* single MSI-X irq. A return of zero indicates the successful setup of
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index c2ac7646b99f..a8ac4bcef2c0 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1011,7 +1011,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
rc = acpi_dev_get_resources(adev, &resource_list,
acpi_pmu_dev_add_resource, &res);
acpi_dev_free_resource_list(&resource_list);
- if (rc < 0 || IS_ERR(&res)) {
+ if (rc < 0) {
dev_err(dev, "PMU type %d: No resource address found\n", type);
goto err;
}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index e1ab864e1a7f..c8c72e8259d3 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -151,21 +151,21 @@ FUNC_GROUP_DECL(GPID0, F19, E21);
#define GPID2_DESC SIG_DESC_SET(SCU8C, 9)
-#define D20 26
+#define F20 26
SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC);
SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC);
SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID);
-MS_PIN_DECL(D20, GPIOD2, SD2DAT0, GPID2IN);
+MS_PIN_DECL(F20, GPIOD2, SD2DAT0, GPID2IN);
-#define D21 27
+#define D20 27
SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC);
SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC);
SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID);
-MS_PIN_DECL(D21, GPIOD3, SD2DAT1, GPID2OUT);
+MS_PIN_DECL(D20, GPIOD3, SD2DAT1, GPID2OUT);
-FUNC_GROUP_DECL(GPID2, D20, D21);
+FUNC_GROUP_DECL(GPID2, F20, D20);
#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 21)
#define GPIE0_DESC SIG_DESC_SET(SCU8C, 12)
@@ -182,28 +182,88 @@ SIG_EXPR_LIST_DECL_SINGLE(NDCD3, NDCD3, SIG_DESC_SET(SCU80, 17));
SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC);
SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE);
-MS_PIN_DECL(C20, GPIE0, NDCD3, GPIE0OUT);
+MS_PIN_DECL(C20, GPIOE1, NDCD3, GPIE0OUT);
FUNC_GROUP_DECL(GPIE0, B20, C20);
-#define SPI1_DESC SIG_DESC_SET(HW_STRAP1, 13)
+#define SPI1_DESC { HW_STRAP1, GENMASK(13, 12), 1, 0 }
+#define SPI1DEBUG_DESC { HW_STRAP1, GENMASK(13, 12), 2, 0 }
+#define SPI1PASSTHRU_DESC { HW_STRAP1, GENMASK(13, 12), 3, 0 }
+
#define C18 64
-SIG_EXPR_LIST_DECL_SINGLE(SYSCS, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCS, SPI1DEBUG, SPI1PASSTHRU);
SS_PIN_DECL(C18, GPIOI0, SYSCS);
#define E15 65
-SIG_EXPR_LIST_DECL_SINGLE(SYSCK, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCK, SPI1DEBUG, SPI1PASSTHRU);
SS_PIN_DECL(E15, GPIOI1, SYSCK);
-#define A14 66
-SIG_EXPR_LIST_DECL_SINGLE(SYSMOSI, SPI1, COND1, SPI1_DESC);
-SS_PIN_DECL(A14, GPIOI2, SYSMOSI);
+#define B16 66
+SIG_EXPR_DECL(SYSMOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSMOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSMOSI, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(B16, GPIOI2, SYSMOSI);
#define C16 67
-SIG_EXPR_LIST_DECL_SINGLE(SYSMISO, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSMISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSMISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSMISO, SPI1DEBUG, SPI1PASSTHRU);
SS_PIN_DECL(C16, GPIOI3, SYSMISO);
-FUNC_GROUP_DECL(SPI1, C18, E15, A14, C16);
+#define VB_DESC SIG_DESC_SET(HW_STRAP1, 5)
+
+#define B15 68
+SIG_EXPR_DECL(SPI1CS0, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CS0, SIG_EXPR_PTR(SPI1CS0, SPI1),
+ SIG_EXPR_PTR(SPI1CS0, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1CS0, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCS, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(B15, GPIOI4, SPI1CS0, VBCS);
+
+#define C15 69
+SIG_EXPR_DECL(SPI1CK, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CK, SIG_EXPR_PTR(SPI1CK, SPI1),
+ SIG_EXPR_PTR(SPI1CK, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1CK, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCK, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(C15, GPIOI5, SPI1CK, VBCK);
+
+#define A14 70
+SIG_EXPR_DECL(SPI1MOSI, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1MOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1MOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1MOSI, SIG_EXPR_PTR(SPI1MOSI, SPI1),
+ SIG_EXPR_PTR(SPI1MOSI, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1MOSI, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBMOSI, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(A14, GPIOI6, SPI1MOSI, VBMOSI);
+
+#define A15 71
+SIG_EXPR_DECL(SPI1MISO, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1MISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1MISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1MISO, SIG_EXPR_PTR(SPI1MISO, SPI1),
+ SIG_EXPR_PTR(SPI1MISO, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1MISO, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBMISO, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(A15, GPIOI7, SPI1MISO, VBMISO);
+
+FUNC_GROUP_DECL(SPI1, B15, C15, A14, A15);
+FUNC_GROUP_DECL(SPI1DEBUG, C18, E15, B16, C16, B15, C15, A14, A15);
+FUNC_GROUP_DECL(SPI1PASSTHRU, C18, E15, B16, C16, B15, C15, A14, A15);
+FUNC_GROUP_DECL(VGABIOSROM, B15, C15, A14, A15);
+
+#define R2 72
+SIG_EXPR_LIST_DECL_SINGLE(SGPMCK, SGPM, SIG_DESC_SET(SCU84, 8));
+SS_PIN_DECL(R2, GPIOJ0, SGPMCK);
#define L2 73
SIG_EXPR_LIST_DECL_SINGLE(SGPMLD, SGPM, SIG_DESC_SET(SCU84, 9));
@@ -580,6 +640,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(A12),
ASPEED_PINCTRL_PIN(A13),
ASPEED_PINCTRL_PIN(A14),
+ ASPEED_PINCTRL_PIN(A15),
ASPEED_PINCTRL_PIN(A2),
ASPEED_PINCTRL_PIN(A3),
ASPEED_PINCTRL_PIN(A4),
@@ -592,6 +653,8 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(B12),
ASPEED_PINCTRL_PIN(B13),
ASPEED_PINCTRL_PIN(B14),
+ ASPEED_PINCTRL_PIN(B15),
+ ASPEED_PINCTRL_PIN(B16),
ASPEED_PINCTRL_PIN(B2),
ASPEED_PINCTRL_PIN(B20),
ASPEED_PINCTRL_PIN(B3),
@@ -603,6 +666,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(C12),
ASPEED_PINCTRL_PIN(C13),
ASPEED_PINCTRL_PIN(C14),
+ ASPEED_PINCTRL_PIN(C15),
ASPEED_PINCTRL_PIN(C16),
ASPEED_PINCTRL_PIN(C18),
ASPEED_PINCTRL_PIN(C2),
@@ -614,7 +678,6 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(D10),
ASPEED_PINCTRL_PIN(D2),
ASPEED_PINCTRL_PIN(D20),
- ASPEED_PINCTRL_PIN(D21),
ASPEED_PINCTRL_PIN(D4),
ASPEED_PINCTRL_PIN(D5),
ASPEED_PINCTRL_PIN(D6),
@@ -630,6 +693,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(E7),
ASPEED_PINCTRL_PIN(E9),
ASPEED_PINCTRL_PIN(F19),
+ ASPEED_PINCTRL_PIN(F20),
ASPEED_PINCTRL_PIN(F9),
ASPEED_PINCTRL_PIN(H20),
ASPEED_PINCTRL_PIN(L1),
@@ -691,11 +755,14 @@ static const struct aspeed_pin_group aspeed_g5_groups[] = {
ASPEED_PINCTRL_GROUP(RMII2),
ASPEED_PINCTRL_GROUP(SD1),
ASPEED_PINCTRL_GROUP(SPI1),
+ ASPEED_PINCTRL_GROUP(SPI1DEBUG),
+ ASPEED_PINCTRL_GROUP(SPI1PASSTHRU),
ASPEED_PINCTRL_GROUP(TIMER4),
ASPEED_PINCTRL_GROUP(TIMER5),
ASPEED_PINCTRL_GROUP(TIMER6),
ASPEED_PINCTRL_GROUP(TIMER7),
ASPEED_PINCTRL_GROUP(TIMER8),
+ ASPEED_PINCTRL_GROUP(VGABIOSROM),
};
static const struct aspeed_pin_function aspeed_g5_functions[] = {
@@ -733,11 +800,14 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
ASPEED_PINCTRL_FUNC(RMII2),
ASPEED_PINCTRL_FUNC(SD1),
ASPEED_PINCTRL_FUNC(SPI1),
+ ASPEED_PINCTRL_FUNC(SPI1DEBUG),
+ ASPEED_PINCTRL_FUNC(SPI1PASSTHRU),
ASPEED_PINCTRL_FUNC(TIMER4),
ASPEED_PINCTRL_FUNC(TIMER5),
ASPEED_PINCTRL_FUNC(TIMER6),
ASPEED_PINCTRL_FUNC(TIMER7),
ASPEED_PINCTRL_FUNC(TIMER8),
+ ASPEED_PINCTRL_FUNC(VGABIOSROM),
};
static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 0391f9f13f3e..49aeba912531 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -166,13 +166,9 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
bool enable, struct regmap *map)
{
int i;
- bool ret;
-
- ret = aspeed_sig_expr_eval(expr, enable, map);
- if (ret)
- return ret;
for (i = 0; i < expr->ndescs; i++) {
+ bool ret;
const struct aspeed_sig_desc *desc = &expr->descs[i];
u32 pattern = enable ? desc->enable : desc->disable;
@@ -199,12 +195,18 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr,
struct regmap *map)
{
+ if (aspeed_sig_expr_eval(expr, true, map))
+ return true;
+
return aspeed_sig_expr_set(expr, true, map);
}
static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr,
struct regmap *map)
{
+ if (!aspeed_sig_expr_eval(expr, true, map))
+ return true;
+
return aspeed_sig_expr_set(expr, false, map);
}
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index d22a9fe2e6df..71bbeb9321ba 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1808,6 +1808,8 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(vg->pctl_dev);
}
+ raw_spin_lock_init(&vg->lock);
+
ret = byt_gpio_probe(vg);
if (ret) {
pinctrl_unregister(vg->pctl_dev);
@@ -1815,7 +1817,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, vg);
- raw_spin_lock_init(&vg->lock);
pm_runtime_enable(&pdev->dev);
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 63387a40b973..01443762e570 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -19,6 +19,7 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include "../core.h"
#include "pinctrl-intel.h"
/* Offset from regs */
@@ -1056,6 +1057,26 @@ int intel_pinctrl_remove(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
#ifdef CONFIG_PM_SLEEP
+static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin)
+{
+ const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
+
+ if (!pd || !intel_pad_usable(pctrl, pin))
+ return false;
+
+ /*
+ * Only restore the pin if it is actually in use by the kernel (or
+ * by userspace). It is possible that some pins are used by the
+ * BIOS during resume and those are not always locked down so leave
+ * them alone.
+ */
+ if (pd->mux_owner || pd->gpio_owner ||
+ gpiochip_line_is_irq(&pctrl->chip, pin))
+ return true;
+
+ return false;
+}
+
int intel_pinctrl_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1069,7 +1090,7 @@ int intel_pinctrl_suspend(struct device *dev)
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
u32 val;
- if (!intel_pad_usable(pctrl, desc->number))
+ if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0));
@@ -1130,7 +1151,7 @@ int intel_pinctrl_resume(struct device *dev)
void __iomem *padcfg;
u32 val;
- if (!intel_pad_usable(pctrl, desc->number))
+ if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 07462d79d040..1aba2c74160e 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -309,7 +309,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
* much memory to the process.
*/
down_read(&current->mm->mmap_sem);
- ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
+ ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
+ &page, NULL);
up_read(&current->mm->mmap_sem);
if (ret < 0)
break;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 81b8dcca8891..b8a21d7b25d4 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -576,6 +576,7 @@ config ASUS_WMI
config ASUS_NB_WMI
tristate "Asus Notebook WMI Driver"
depends on ASUS_WMI
+ depends on SERIO_I8042 || SERIO_I8042 = n
---help---
This is a driver for newer Asus notebooks. It adds extra features
like wireless radio and bluetooth control, leds, hotkeys, backlight...
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index d1a091b93192..a2323941e677 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -933,6 +933,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
},
},
+ {
+ .ident = "Lenovo YOGA 910-13IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
+ },
+ },
{}
};
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 436dfe871d32..9013a585507e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -892,7 +892,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
down_read(&current->mm->mmap_sem);
pinned = get_user_pages(
(unsigned long)xfer->loc_addr & PAGE_MASK,
- nr_pages, dir == DMA_FROM_DEVICE, 0,
+ nr_pages,
+ dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
page_list, NULL);
up_read(&current->mm->mmap_sem);
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index 8b2558e7363e..968c3ae4535c 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -154,7 +154,7 @@ const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = {
UNIPHIER_RESET_END,
};
-const struct uniphier_reset_data uniphier_pro5_mio_reset_data[] = {
+const struct uniphier_reset_data uniphier_pro5_sd_reset_data[] = {
UNIPHIER_MIO_RESET_SD(0, 0),
UNIPHIER_MIO_RESET_SD(1, 1),
UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1),
@@ -360,7 +360,7 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-ld20-reset",
.data = uniphier_ld20_sys_reset_data,
},
- /* Media I/O reset */
+ /* Media I/O reset, SD reset */
{
.compatible = "socionext,uniphier-sld3-mio-reset",
.data = uniphier_sld3_mio_reset_data,
@@ -378,20 +378,20 @@ static const struct of_device_id uniphier_reset_match[] = {
.data = uniphier_sld3_mio_reset_data,
},
{
- .compatible = "socionext,uniphier-pro5-mio-reset",
- .data = uniphier_pro5_mio_reset_data,
+ .compatible = "socionext,uniphier-pro5-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
},
{
- .compatible = "socionext,uniphier-pxs2-mio-reset",
- .data = uniphier_pro5_mio_reset_data,
+ .compatible = "socionext,uniphier-pxs2-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
},
{
.compatible = "socionext,uniphier-ld11-mio-reset",
.data = uniphier_sld3_mio_reset_data,
},
{
- .compatible = "socionext,uniphier-ld20-mio-reset",
- .data = uniphier_pro5_mio_reset_data,
+ .compatible = "socionext,uniphier-ld20-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
},
/* Peripheral reset */
{
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index d1e080701264..e859d148aba9 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -208,14 +208,14 @@ config RTC_DRV_AS3722
will be called rtc-as3722.
config RTC_DRV_DS1307
- tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025"
+ tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025, ISL12057"
help
If you say yes here you get support for various compatible RTC
chips (often with battery backup) connected with I2C. This driver
should handle DS1307, DS1337, DS1338, DS1339, DS1340, ST M41T00,
- EPSON RX-8025 and probably other chips. In some cases the RTC
- must already have been initialized (by manufacturing or a
- bootloader).
+ EPSON RX-8025, Intersil ISL12057 and probably other chips. In some
+ cases the RTC must already have been initialized (by manufacturing or
+ a bootloader).
The first seven registers on these chips hold an RTC, and other
registers may add features such as NVRAM, a trickle charger for
@@ -234,6 +234,20 @@ config RTC_DRV_DS1307_HWMON
Say Y here if you want to expose temperature sensor data on
rtc-ds1307 (only DS3231)
+config RTC_DRV_DS1307_CENTURY
+ bool "Century bit support for rtc-ds1307"
+ depends on RTC_DRV_DS1307
+ default n
+ help
+ The DS1307 driver suffered from a bug where it was enabling the
+ century bit inconditionnally but never used it when reading the time.
+ It made the driver unable to support dates beyond 2099.
+ Setting this option will add proper support for the century bit but if
+ the time was previously set using a kernel predating this option,
+ reading the date will return a date in the next century.
+ To solve that, you could boot a kernel without this option set, set
+ the RTC date and then boot a kernel with this option set.
+
config RTC_DRV_DS1374
tristate "Dallas/Maxim DS1374"
help
@@ -374,16 +388,6 @@ config RTC_DRV_ISL12022
This driver can also be built as a module. If so, the module
will be called rtc-isl12022.
-config RTC_DRV_ISL12057
- select REGMAP_I2C
- tristate "Intersil ISL12057"
- help
- If you say yes here you get support for the Intersil ISL12057
- I2C RTC chip.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-isl12057.
-
config RTC_DRV_X1205
tristate "Xicor/Intersil X1205"
help
@@ -661,6 +665,7 @@ config RTC_DRV_DS1343
will be called rtc-ds1343.
config RTC_DRV_DS1347
+ select REGMAP_SPI
tristate "Dallas/Maxim DS1347"
help
If you say yes here you get support for the
@@ -1201,7 +1206,7 @@ comment "on-CPU RTC drivers"
config RTC_DRV_ASM9260
tristate "Alphascale asm9260 RTC"
- depends on MACH_ASM9260
+ depends on MACH_ASM9260 || COMPILE_TEST
help
If you say yes here you get support for the RTC on the
Alphascale asm9260 SoC.
@@ -1241,6 +1246,9 @@ config RTC_DRV_IMXDI
config RTC_DRV_OMAP
tristate "TI OMAP Real Time Clock"
depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST
+ depends on OF
+ depends on PINCTRL
+ select GENERIC_PINCONF
help
Say "yes" here to support the on chip real time clock
present on TI OMAP1, AM33xx, DA8xx/OMAP-L13x, AM43xx and DRA7xx.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 8fb994bacdf7..1ac694a330c8 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -72,7 +72,6 @@ obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o
obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o
obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o
obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o
-obj-$(CONFIG_RTC_DRV_ISL12057) += rtc-isl12057.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o
obj-$(CONFIG_RTC_DRV_LP8788) += rtc-lp8788.o
diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c
index 70b4fd0f6122..9e336184491c 100644
--- a/drivers/rtc/rtc-ac100.c
+++ b/drivers/rtc/rtc-ac100.c
@@ -327,6 +327,8 @@ static int ac100_rtc_register_clks(struct ac100_rtc_dev *chip)
.flags = 0,
};
+ of_property_read_string_index(np, "clock-output-names",
+ i, &init.name);
clk->regmap = chip->regmap;
clk->offset = AC100_CLKOUT_CTRL1 + i;
clk->hw.init = &init;
@@ -552,6 +554,9 @@ static int ac100_rtc_probe(struct platform_device *pdev)
int ret;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
platform_set_drvdata(pdev, chip);
chip->dev = &pdev->dev;
chip->regmap = ac100->regmap;
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c
index 5219916ce11d..18a93d3e3f93 100644
--- a/drivers/rtc/rtc-asm9260.c
+++ b/drivers/rtc/rtc-asm9260.c
@@ -112,8 +112,6 @@ struct asm9260_rtc_priv {
void __iomem *iobase;
struct rtc_device *rtc;
struct clk *clk;
- /* io lock */
- spinlock_t lock;
};
static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id)
@@ -122,11 +120,15 @@ static irqreturn_t asm9260_rtc_irq(int irq, void *dev_id)
u32 isr;
unsigned long events = 0;
+ mutex_lock(&priv->rtc->ops_lock);
isr = ioread32(priv->iobase + HW_CIIR);
- if (!isr)
+ if (!isr) {
+ mutex_unlock(&priv->rtc->ops_lock);
return IRQ_NONE;
+ }
iowrite32(0, priv->iobase + HW_CIIR);
+ mutex_unlock(&priv->rtc->ops_lock);
events |= RTC_AF | RTC_IRQF;
@@ -139,9 +141,7 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
u32 ctime0, ctime1, ctime2;
- unsigned long irq_flags;
- spin_lock_irqsave(&priv->lock, irq_flags);
ctime0 = ioread32(priv->iobase + HW_CTIME0);
ctime1 = ioread32(priv->iobase + HW_CTIME1);
ctime2 = ioread32(priv->iobase + HW_CTIME2);
@@ -155,7 +155,6 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm)
ctime1 = ioread32(priv->iobase + HW_CTIME1);
ctime2 = ioread32(priv->iobase + HW_CTIME2);
}
- spin_unlock_irqrestore(&priv->lock, irq_flags);
tm->tm_sec = (ctime0 >> BM_CTIME0_SEC_S) & BM_CTIME0_SEC_M;
tm->tm_min = (ctime0 >> BM_CTIME0_MIN_S) & BM_CTIME0_MIN_M;
@@ -174,9 +173,7 @@ static int asm9260_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
- unsigned long irq_flags;
- spin_lock_irqsave(&priv->lock, irq_flags);
/*
* make sure SEC counter will not flip other counter on write time,
* real value will be written at the enf of sequence.
@@ -191,7 +188,6 @@ static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm)
iowrite32(tm->tm_hour, priv->iobase + HW_HOUR);
iowrite32(tm->tm_min, priv->iobase + HW_MIN);
iowrite32(tm->tm_sec, priv->iobase + HW_SEC);
- spin_unlock_irqrestore(&priv->lock, irq_flags);
return 0;
}
@@ -199,9 +195,7 @@ static int asm9260_rtc_set_time(struct device *dev, struct rtc_time *tm)
static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
- unsigned long irq_flags;
- spin_lock_irqsave(&priv->lock, irq_flags);
alrm->time.tm_year = ioread32(priv->iobase + HW_ALYEAR);
alrm->time.tm_mon = ioread32(priv->iobase + HW_ALMON);
alrm->time.tm_mday = ioread32(priv->iobase + HW_ALDOM);
@@ -213,7 +207,6 @@ static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->enabled = ioread32(priv->iobase + HW_AMR) ? 1 : 0;
alrm->pending = ioread32(priv->iobase + HW_CIIR) ? 1 : 0;
- spin_unlock_irqrestore(&priv->lock, irq_flags);
return rtc_valid_tm(&alrm->time);
}
@@ -221,9 +214,7 @@ static int asm9260_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct asm9260_rtc_priv *priv = dev_get_drvdata(dev);
- unsigned long irq_flags;
- spin_lock_irqsave(&priv->lock, irq_flags);
iowrite32(alrm->time.tm_year, priv->iobase + HW_ALYEAR);
iowrite32(alrm->time.tm_mon, priv->iobase + HW_ALMON);
iowrite32(alrm->time.tm_mday, priv->iobase + HW_ALDOM);
@@ -234,7 +225,6 @@ static int asm9260_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
iowrite32(alrm->time.tm_sec, priv->iobase + HW_ALSEC);
iowrite32(alrm->enabled ? 0 : BM_AMR_OFF, priv->iobase + HW_AMR);
- spin_unlock_irqrestore(&priv->lock, irq_flags);
return 0;
}
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index 83ac2337c0f7..de8bf56a41e7 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -187,7 +187,7 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
return ret;
}
-static struct rtc_class_ops at32_rtc_ops = {
+static const struct rtc_class_ops at32_rtc_ops = {
.read_time = at32_rtc_readtime,
.set_time = at32_rtc_settime,
.read_alarm = at32_rtc_readalarm,
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 0299988b4f13..397742446007 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -93,8 +93,15 @@ static int bq32k_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (error)
return error;
+ /*
+ * In case of oscillator failure, the register contents should be
+ * considered invalid. The flag is cleared the next time the RTC is set.
+ */
+ if (regs.minutes & BQ32K_OF)
+ return -EINVAL;
+
tm->tm_sec = bcd2bin(regs.seconds & BQ32K_SECONDS_MASK);
- tm->tm_min = bcd2bin(regs.minutes & BQ32K_SECONDS_MASK);
+ tm->tm_min = bcd2bin(regs.minutes & BQ32K_MINUTES_MASK);
tm->tm_hour = bcd2bin(regs.cent_hours & BQ32K_HOURS_MASK);
tm->tm_mday = bcd2bin(regs.date);
tm->tm_wday = bcd2bin(regs.day) - 1;
@@ -204,13 +211,10 @@ static int bq32k_probe(struct i2c_client *client,
/* Check Oscillator Failure flag */
error = bq32k_read(dev, &reg, BQ32K_MINUTES, 1);
- if (!error && (reg & BQ32K_OF)) {
- dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
- reg &= ~BQ32K_OF;
- error = bq32k_write(dev, &reg, BQ32K_MINUTES, 1);
- }
if (error)
return error;
+ if (reg & BQ32K_OF)
+ dev_warn(dev, "Oscillator Failure. Check RTC battery.\n");
if (client->dev.of_node)
trickle_charger_of_init(dev, client->dev.of_node);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 43745cac0141..dd3d59806ffa 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -62,6 +62,8 @@ struct cmos_rtc {
u8 day_alrm;
u8 mon_alrm;
u8 century;
+
+ struct rtc_wkalrm saved_wkalrm;
};
/* both platform and pnp busses use negative numbers for invalid irqs */
@@ -707,6 +709,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup1;
}
+ hpet_rtc_timer_init();
+
if (is_valid_irq(rtc_irq)) {
irq_handler_t rtc_cmos_int_handler;
@@ -714,6 +718,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
rtc_cmos_int_handler = hpet_rtc_interrupt;
retval = hpet_register_irq_handler(cmos_interrupt);
if (retval) {
+ hpet_mask_rtc_irq_bit(RTC_IRQMASK);
dev_warn(dev, "hpet_register_irq_handler "
" failed in rtc_init().");
goto cleanup1;
@@ -729,7 +734,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup1;
}
}
- hpet_rtc_timer_init();
/* export at least the first block of NVRAM */
nvram.size = address_space - NVRAM_OFFSET;
@@ -844,8 +848,6 @@ static int cmos_aie_poweroff(struct device *dev)
return retval;
}
-#ifdef CONFIG_PM
-
static int cmos_suspend(struct device *dev)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -877,6 +879,8 @@ static int cmos_suspend(struct device *dev)
enable_irq_wake(cmos->irq);
}
+ cmos_read_alarm(dev, &cmos->saved_wkalrm);
+
dev_dbg(dev, "suspend%s, ctrl %02x\n",
(tmp & RTC_AIE) ? ", alarm may wake" : "",
tmp);
@@ -892,12 +896,32 @@ static int cmos_suspend(struct device *dev)
*/
static inline int cmos_poweroff(struct device *dev)
{
+ if (!IS_ENABLED(CONFIG_PM))
+ return -ENOSYS;
+
return cmos_suspend(dev);
}
-#ifdef CONFIG_PM_SLEEP
+static void cmos_check_wkalrm(struct device *dev)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ struct rtc_wkalrm current_alarm;
+ time64_t t_current_expires;
+ time64_t t_saved_expires;
+
+ cmos_read_alarm(dev, &current_alarm);
+ t_current_expires = rtc_tm_to_time64(&current_alarm.time);
+ t_saved_expires = rtc_tm_to_time64(&cmos->saved_wkalrm.time);
+ if (t_current_expires != t_saved_expires ||
+ cmos->saved_wkalrm.enabled != current_alarm.enabled) {
+ cmos_set_alarm(dev, &cmos->saved_wkalrm);
+ }
+}
+
+static void cmos_check_acpi_rtc_status(struct device *dev,
+ unsigned char *rtc_control);
-static int cmos_resume(struct device *dev)
+static int __maybe_unused cmos_resume(struct device *dev)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char tmp;
@@ -910,6 +934,9 @@ static int cmos_resume(struct device *dev)
cmos->enabled_wake = 0;
}
+ /* The BIOS might have changed the alarm, restore it */
+ cmos_check_wkalrm(dev);
+
spin_lock_irq(&rtc_lock);
tmp = cmos->suspend_ctrl;
cmos->suspend_ctrl = 0;
@@ -936,6 +963,9 @@ static int cmos_resume(struct device *dev)
tmp &= ~RTC_AIE;
hpet_mask_rtc_irq_bit(RTC_AIE);
} while (mask & RTC_AIE);
+
+ if (tmp & RTC_AIE)
+ cmos_check_acpi_rtc_status(dev, &tmp);
}
spin_unlock_irq(&rtc_lock);
@@ -944,16 +974,6 @@ static int cmos_resume(struct device *dev)
return 0;
}
-#endif
-#else
-
-static inline int cmos_poweroff(struct device *dev)
-{
- return -ENOSYS;
-}
-
-#endif
-
static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
/*----------------------------------------------------------------*/
@@ -973,6 +993,20 @@ static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
static u32 rtc_handler(void *context)
{
struct device *dev = context;
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ unsigned char rtc_control = 0;
+ unsigned char rtc_intr;
+
+ spin_lock_irq(&rtc_lock);
+ if (cmos_rtc.suspend_ctrl)
+ rtc_control = CMOS_READ(RTC_CONTROL);
+ if (rtc_control & RTC_AIE) {
+ cmos_rtc.suspend_ctrl &= ~RTC_AIE;
+ CMOS_WRITE(rtc_control, RTC_CONTROL);
+ rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
+ rtc_update_irq(cmos->rtc, 1, rtc_intr);
+ }
+ spin_unlock_irq(&rtc_lock);
pm_wakeup_event(dev, 0);
acpi_clear_event(ACPI_EVENT_RTC);
@@ -1039,12 +1073,39 @@ static void cmos_wake_setup(struct device *dev)
device_init_wakeup(dev, 1);
}
+static void cmos_check_acpi_rtc_status(struct device *dev,
+ unsigned char *rtc_control)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ acpi_event_status rtc_status;
+ acpi_status status;
+
+ if (acpi_gbl_FADT.flags & ACPI_FADT_FIXED_RTC)
+ return;
+
+ status = acpi_get_event_status(ACPI_EVENT_RTC, &rtc_status);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Could not get RTC status\n");
+ } else if (rtc_status & ACPI_EVENT_FLAG_SET) {
+ unsigned char mask;
+ *rtc_control &= ~RTC_AIE;
+ CMOS_WRITE(*rtc_control, RTC_CONTROL);
+ mask = CMOS_READ(RTC_INTR_FLAGS);
+ rtc_update_irq(cmos->rtc, 1, mask);
+ }
+}
+
#else
static void cmos_wake_setup(struct device *dev)
{
}
+static void cmos_check_acpi_rtc_status(struct device *dev,
+ unsigned char *rtc_control)
+{
+}
+
#endif
#ifdef CONFIG_PNP
@@ -1206,9 +1267,7 @@ static struct platform_driver cmos_platform_driver = {
.shutdown = cmos_platform_shutdown,
.driver = {
.name = driver_name,
-#ifdef CONFIG_PM
.pm = &cmos_pm_ops,
-#endif
.of_match_table = of_match_ptr(of_cmos_match),
}
};
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 101b7a240e0f..cfc4141d99cd 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -140,7 +140,7 @@ static int coh901331_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static struct rtc_class_ops coh901331_ops = {
+static const struct rtc_class_ops coh901331_ops = {
.read_time = coh901331_read_time,
.set_mmss = coh901331_set_mmss,
.read_alarm = coh901331_read_alarm,
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index dba60c1dfce2..caf35567e14c 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -469,7 +469,7 @@ static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
return 0;
}
-static struct rtc_class_ops davinci_rtc_ops = {
+static const struct rtc_class_ops davinci_rtc_ops = {
.ioctl = davinci_rtc_ioctl,
.read_time = davinci_rtc_read_time,
.set_time = davinci_rtc_set_time,
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
index 8d05596a6765..b253bf1b3531 100644
--- a/drivers/rtc/rtc-digicolor.c
+++ b/drivers/rtc/rtc-digicolor.c
@@ -159,7 +159,7 @@ static int dc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static struct rtc_class_ops dc_rtc_ops = {
+static const struct rtc_class_ops dc_rtc_ops = {
.read_time = dc_rtc_read_time,
.set_mmss = dc_rtc_set_mmss,
.read_alarm = dc_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index f5dd09fe5add..0ec4be62322b 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -102,7 +102,7 @@ static int ds1302_rtc_get_time(struct device *dev, struct rtc_time *time)
return rtc_valid_tm(time);
}
-static struct rtc_class_ops ds1302_rtc_ops = {
+static const struct rtc_class_ops ds1302_rtc_ops = {
.read_time = ds1302_rtc_get_time,
.set_time = ds1302_rtc_set_time,
};
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 8e1c5cb6ece6..4e31036ee259 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -186,6 +186,7 @@ static const struct i2c_device_id ds1307_id[] = {
{ "mcp7941x", mcp794xx },
{ "pt7c4338", ds_1307 },
{ "rx8025", rx_8025 },
+ { "isl12057", ds_1337 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds1307_id);
@@ -382,10 +383,25 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
t->tm_mday = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
tmp = ds1307->regs[DS1307_REG_MONTH] & 0x1f;
t->tm_mon = bcd2bin(tmp) - 1;
-
- /* assume 20YY not 19YY, and ignore DS1337_BIT_CENTURY */
t->tm_year = bcd2bin(ds1307->regs[DS1307_REG_YEAR]) + 100;
+#ifdef CONFIG_RTC_DRV_DS1307_CENTURY
+ switch (ds1307->type) {
+ case ds_1337:
+ case ds_1339:
+ case ds_3231:
+ if (ds1307->regs[DS1307_REG_MONTH] & DS1337_BIT_CENTURY)
+ t->tm_year += 100;
+ break;
+ case ds_1340:
+ if (ds1307->regs[DS1307_REG_HOUR] & DS1340_BIT_CENTURY)
+ t->tm_year += 100;
+ break;
+ default:
+ break;
+ }
+#endif
+
dev_dbg(dev, "%s secs=%d, mins=%d, "
"hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
"read", t->tm_sec, t->tm_min,
@@ -409,6 +425,27 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
t->tm_hour, t->tm_mday,
t->tm_mon, t->tm_year, t->tm_wday);
+#ifdef CONFIG_RTC_DRV_DS1307_CENTURY
+ if (t->tm_year < 100)
+ return -EINVAL;
+
+ switch (ds1307->type) {
+ case ds_1337:
+ case ds_1339:
+ case ds_3231:
+ case ds_1340:
+ if (t->tm_year > 299)
+ return -EINVAL;
+ default:
+ if (t->tm_year > 199)
+ return -EINVAL;
+ break;
+ }
+#else
+ if (t->tm_year < 100 || t->tm_year > 199)
+ return -EINVAL;
+#endif
+
buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
buf[DS1307_REG_MIN] = bin2bcd(t->tm_min);
buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
@@ -424,11 +461,13 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
case ds_1337:
case ds_1339:
case ds_3231:
- buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
+ if (t->tm_year > 199)
+ buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
break;
case ds_1340:
- buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
- | DS1340_BIT_CENTURY;
+ buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN;
+ if (t->tm_year > 199)
+ buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY;
break;
case mcp794xx:
/*
@@ -1295,6 +1334,11 @@ static int ds1307_probe(struct i2c_client *client,
if (of_property_read_bool(client->dev.of_node, "wakeup-source")) {
ds1307_can_wakeup_device = true;
}
+ /* Intersil ISL12057 DT backward compatibility */
+ if (of_property_read_bool(client->dev.of_node,
+ "isil,irq2-can-wakeup-machine")) {
+ ds1307_can_wakeup_device = true;
+ }
#endif
switch (ds1307->type) {
diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c
index 641e8e8a0dd7..ccfc9d43eb1e 100644
--- a/drivers/rtc/rtc-ds1347.c
+++ b/drivers/rtc/rtc-ds1347.c
@@ -18,6 +18,7 @@
#include <linux/rtc.h>
#include <linux/spi/spi.h>
#include <linux/bcd.h>
+#include <linux/regmap.h>
/* Registers in ds1347 rtc */
@@ -32,37 +33,28 @@
#define DS1347_STATUS_REG 0x17
#define DS1347_CLOCK_BURST 0x3F
-static int ds1347_read_reg(struct device *dev, unsigned char address,
- unsigned char *data)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- *data = address | 0x80;
-
- return spi_write_then_read(spi, data, 1, data, 1);
-}
-
-static int ds1347_write_reg(struct device *dev, unsigned char address,
- unsigned char data)
-{
- struct spi_device *spi = to_spi_device(dev);
- unsigned char buf[2];
-
- buf[0] = address & 0x7F;
- buf[1] = data;
+static const struct regmap_range ds1347_ranges[] = {
+ {
+ .range_min = DS1347_SECONDS_REG,
+ .range_max = DS1347_STATUS_REG,
+ },
+};
- return spi_write_then_read(spi, buf, 2, NULL, 0);
-}
+static const struct regmap_access_table ds1347_access_table = {
+ .yes_ranges = ds1347_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ds1347_ranges),
+};
static int ds1347_read_time(struct device *dev, struct rtc_time *dt)
{
struct spi_device *spi = to_spi_device(dev);
+ struct regmap *map;
int err;
unsigned char buf[8];
- buf[0] = DS1347_CLOCK_BURST | 0x80;
+ map = spi_get_drvdata(spi);
- err = spi_write_then_read(spi, buf, 1, buf, 8);
+ err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8);
if (err)
return err;
@@ -80,25 +72,27 @@ static int ds1347_read_time(struct device *dev, struct rtc_time *dt)
static int ds1347_set_time(struct device *dev, struct rtc_time *dt)
{
struct spi_device *spi = to_spi_device(dev);
- unsigned char buf[9];
+ struct regmap *map;
+ unsigned char buf[8];
+
+ map = spi_get_drvdata(spi);
- buf[0] = DS1347_CLOCK_BURST & 0x7F;
- buf[1] = bin2bcd(dt->tm_sec);
- buf[2] = bin2bcd(dt->tm_min);
- buf[3] = (bin2bcd(dt->tm_hour) & 0x3F);
- buf[4] = bin2bcd(dt->tm_mday);
- buf[5] = bin2bcd(dt->tm_mon + 1);
- buf[6] = bin2bcd(dt->tm_wday + 1);
+ buf[0] = bin2bcd(dt->tm_sec);
+ buf[1] = bin2bcd(dt->tm_min);
+ buf[2] = (bin2bcd(dt->tm_hour) & 0x3F);
+ buf[3] = bin2bcd(dt->tm_mday);
+ buf[4] = bin2bcd(dt->tm_mon + 1);
+ buf[5] = bin2bcd(dt->tm_wday + 1);
/* year in linux is from 1900 i.e in range of 100
in rtc it is from 00 to 99 */
dt->tm_year = dt->tm_year % 100;
- buf[7] = bin2bcd(dt->tm_year);
- buf[8] = bin2bcd(0x00);
+ buf[6] = bin2bcd(dt->tm_year);
+ buf[7] = bin2bcd(0x00);
/* write the rtc settings */
- return spi_write_then_read(spi, buf, 9, NULL, 0);
+ return regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8);
}
static const struct rtc_class_ops ds1347_rtc_ops = {
@@ -109,35 +103,53 @@ static const struct rtc_class_ops ds1347_rtc_ops = {
static int ds1347_probe(struct spi_device *spi)
{
struct rtc_device *rtc;
- unsigned char data;
+ struct regmap_config config;
+ struct regmap *map;
+ unsigned int data;
int res;
+ memset(&config, 0, sizeof(config));
+ config.reg_bits = 8;
+ config.val_bits = 8;
+ config.read_flag_mask = 0x80;
+ config.max_register = 0x3F;
+ config.wr_table = &ds1347_access_table;
+
/* spi setup with ds1347 in mode 3 and bits per word as 8 */
spi->mode = SPI_MODE_3;
spi->bits_per_word = 8;
spi_setup(spi);
+ map = devm_regmap_init_spi(spi, &config);
+
+ if (IS_ERR(map)) {
+ dev_err(&spi->dev, "ds1347 regmap init spi failed\n");
+ return PTR_ERR(map);
+ }
+
+ spi_set_drvdata(spi, map);
+
/* RTC Settings */
- res = ds1347_read_reg(&spi->dev, DS1347_SECONDS_REG, &data);
+ res = regmap_read(map, DS1347_SECONDS_REG, &data);
if (res)
return res;
/* Disable the write protect of rtc */
- ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data);
+ regmap_read(map, DS1347_CONTROL_REG, &data);
data = data & ~(1<<7);
- ds1347_write_reg(&spi->dev, DS1347_CONTROL_REG, data);
+ regmap_write(map, DS1347_CONTROL_REG, data);
/* Enable the oscillator , disable the oscillator stop flag,
and glitch filter to reduce current consumption */
- ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data);
+ regmap_read(map, DS1347_STATUS_REG, &data);
data = data & 0x1B;
- ds1347_write_reg(&spi->dev, DS1347_STATUS_REG, data);
+ regmap_write(map, DS1347_STATUS_REG, data);
/* display the settings */
- ds1347_read_reg(&spi->dev, DS1347_CONTROL_REG, &data);
+ regmap_read(map, DS1347_CONTROL_REG, &data);
dev_info(&spi->dev, "DS1347 RTC CTRL Reg = 0x%02x\n", data);
- ds1347_read_reg(&spi->dev, DS1347_STATUS_REG, &data);
+ regmap_read(map, DS1347_STATUS_REG, &data);
dev_info(&spi->dev, "DS1347 RTC Status Reg = 0x%02x\n", data);
rtc = devm_rtc_device_register(&spi->dev, "ds1347",
@@ -146,8 +158,6 @@ static int ds1347_probe(struct spi_device *spi)
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- spi_set_drvdata(spi, rtc);
-
return 0;
}
diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-gemini.c
index b57505efadbc..688debc14348 100644
--- a/drivers/rtc/rtc-gemini.c
+++ b/drivers/rtc/rtc-gemini.c
@@ -110,7 +110,7 @@ static int gemini_rtc_set_time(struct device *dev, struct rtc_time *tm)
return 0;
}
-static struct rtc_class_ops gemini_rtc_ops = {
+static const struct rtc_class_ops gemini_rtc_ops = {
.read_time = gemini_rtc_read_time,
.set_time = gemini_rtc_set_time,
};
diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c
deleted file mode 100644
index 0e7f0f52bfe4..000000000000
--- a/drivers/rtc/rtc-isl12057.c
+++ /dev/null
@@ -1,643 +0,0 @@
-/*
- * rtc-isl12057 - Driver for Intersil ISL12057 I2C Real Time Clock
- *
- * Copyright (C) 2013, Arnaud EBALARD <arno@natisbad.org>
- *
- * This work is largely based on Intersil ISL1208 driver developed by
- * Hebert Valerio Riedel <hvr@gnu.org>.
- *
- * Detailed datasheet on which this development is based is available here:
- *
- * http://natisbad.org/NAS2/refs/ISL12057.pdf
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/rtc.h>
-#include <linux/i2c.h>
-#include <linux/bcd.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
-
-#define DRV_NAME "rtc-isl12057"
-
-/* RTC section */
-#define ISL12057_REG_RTC_SC 0x00 /* Seconds */
-#define ISL12057_REG_RTC_MN 0x01 /* Minutes */
-#define ISL12057_REG_RTC_HR 0x02 /* Hours */
-#define ISL12057_REG_RTC_HR_PM BIT(5) /* AM/PM bit in 12h format */
-#define ISL12057_REG_RTC_HR_MIL BIT(6) /* 24h/12h format */
-#define ISL12057_REG_RTC_DW 0x03 /* Day of the Week */
-#define ISL12057_REG_RTC_DT 0x04 /* Date */
-#define ISL12057_REG_RTC_MO 0x05 /* Month */
-#define ISL12057_REG_RTC_MO_CEN BIT(7) /* Century bit */
-#define ISL12057_REG_RTC_YR 0x06 /* Year */
-#define ISL12057_RTC_SEC_LEN 7
-
-/* Alarm 1 section */
-#define ISL12057_REG_A1_SC 0x07 /* Alarm 1 Seconds */
-#define ISL12057_REG_A1_MN 0x08 /* Alarm 1 Minutes */
-#define ISL12057_REG_A1_HR 0x09 /* Alarm 1 Hours */
-#define ISL12057_REG_A1_HR_PM BIT(5) /* AM/PM bit in 12h format */
-#define ISL12057_REG_A1_HR_MIL BIT(6) /* 24h/12h format */
-#define ISL12057_REG_A1_DWDT 0x0A /* Alarm 1 Date / Day of the week */
-#define ISL12057_REG_A1_DWDT_B BIT(6) /* DW / DT selection bit */
-#define ISL12057_A1_SEC_LEN 4
-
-/* Alarm 2 section */
-#define ISL12057_REG_A2_MN 0x0B /* Alarm 2 Minutes */
-#define ISL12057_REG_A2_HR 0x0C /* Alarm 2 Hours */
-#define ISL12057_REG_A2_DWDT 0x0D /* Alarm 2 Date / Day of the week */
-#define ISL12057_A2_SEC_LEN 3
-
-/* Control/Status registers */
-#define ISL12057_REG_INT 0x0E
-#define ISL12057_REG_INT_A1IE BIT(0) /* Alarm 1 interrupt enable bit */
-#define ISL12057_REG_INT_A2IE BIT(1) /* Alarm 2 interrupt enable bit */
-#define ISL12057_REG_INT_INTCN BIT(2) /* Interrupt control enable bit */
-#define ISL12057_REG_INT_RS1 BIT(3) /* Freq out control bit 1 */
-#define ISL12057_REG_INT_RS2 BIT(4) /* Freq out control bit 2 */
-#define ISL12057_REG_INT_EOSC BIT(7) /* Oscillator enable bit */
-
-#define ISL12057_REG_SR 0x0F
-#define ISL12057_REG_SR_A1F BIT(0) /* Alarm 1 interrupt bit */
-#define ISL12057_REG_SR_A2F BIT(1) /* Alarm 2 interrupt bit */
-#define ISL12057_REG_SR_OSF BIT(7) /* Oscillator failure bit */
-
-/* Register memory map length */
-#define ISL12057_MEM_MAP_LEN 0x10
-
-struct isl12057_rtc_data {
- struct rtc_device *rtc;
- struct regmap *regmap;
- struct mutex lock;
- int irq;
-};
-
-static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs)
-{
- tm->tm_sec = bcd2bin(regs[ISL12057_REG_RTC_SC]);
- tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]);
-
- if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */
- tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x1f);
- if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM)
- tm->tm_hour += 12;
- } else { /* 24 hour mode */
- tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x3f);
- }
-
- tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]);
- tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */
- tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO] & 0x1f) - 1; /* ditto */
- tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100;
-
- /* Check if years register has overflown from 99 to 00 */
- if (regs[ISL12057_REG_RTC_MO] & ISL12057_REG_RTC_MO_CEN)
- tm->tm_year += 100;
-}
-
-static int isl12057_rtc_tm_to_regs(u8 *regs, struct rtc_time *tm)
-{
- u8 century_bit;
-
- /*
- * The clock has an 8 bit wide bcd-coded register for the year.
- * It also has a century bit encoded in MO flag which provides
- * information about overflow of year register from 99 to 00.
- * tm_year is an offset from 1900 and we are interested in the
- * 2000-2199 range, so any value less than 100 or larger than
- * 299 is invalid.
- */
- if (tm->tm_year < 100 || tm->tm_year > 299)
- return -EINVAL;
-
- century_bit = (tm->tm_year > 199) ? ISL12057_REG_RTC_MO_CEN : 0;
-
- regs[ISL12057_REG_RTC_SC] = bin2bcd(tm->tm_sec);
- regs[ISL12057_REG_RTC_MN] = bin2bcd(tm->tm_min);
- regs[ISL12057_REG_RTC_HR] = bin2bcd(tm->tm_hour); /* 24-hour format */
- regs[ISL12057_REG_RTC_DT] = bin2bcd(tm->tm_mday);
- regs[ISL12057_REG_RTC_MO] = bin2bcd(tm->tm_mon + 1) | century_bit;
- regs[ISL12057_REG_RTC_YR] = bin2bcd(tm->tm_year % 100);
- regs[ISL12057_REG_RTC_DW] = bin2bcd(tm->tm_wday + 1);
-
- return 0;
-}
-
-/*
- * Try and match register bits w/ fixed null values to see whether we
- * are dealing with an ISL12057. Note: this function is called early
- * during init and hence does need mutex protection.
- */
-static int isl12057_i2c_validate_chip(struct regmap *regmap)
-{
- u8 regs[ISL12057_MEM_MAP_LEN];
- static const u8 mask[ISL12057_MEM_MAP_LEN] = { 0x80, 0x80, 0x80, 0xf8,
- 0xc0, 0x60, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x60, 0x7c };
- int ret, i;
-
- ret = regmap_bulk_read(regmap, 0, regs, ISL12057_MEM_MAP_LEN);
- if (ret)
- return ret;
-
- for (i = 0; i < ISL12057_MEM_MAP_LEN; ++i) {
- if (regs[i] & mask[i]) /* check if bits are cleared */
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int _isl12057_rtc_clear_alarm(struct device *dev)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- int ret;
-
- ret = regmap_update_bits(data->regmap, ISL12057_REG_SR,
- ISL12057_REG_SR_A1F, 0);
- if (ret)
- dev_err(dev, "%s: clearing alarm failed (%d)\n", __func__, ret);
-
- return ret;
-}
-
-static int _isl12057_rtc_update_alarm(struct device *dev, int enable)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- int ret;
-
- ret = regmap_update_bits(data->regmap, ISL12057_REG_INT,
- ISL12057_REG_INT_A1IE,
- enable ? ISL12057_REG_INT_A1IE : 0);
- if (ret)
- dev_err(dev, "%s: changing alarm interrupt flag failed (%d)\n",
- __func__, ret);
-
- return ret;
-}
-
-/*
- * Note: as we only read from device and do not perform any update, there is
- * no need for an equivalent function which would try and get driver's main
- * lock. Here, it is safe for everyone if we just use regmap internal lock
- * on the device when reading.
- */
-static int _isl12057_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- u8 regs[ISL12057_RTC_SEC_LEN];
- unsigned int sr;
- int ret;
-
- ret = regmap_read(data->regmap, ISL12057_REG_SR, &sr);
- if (ret) {
- dev_err(dev, "%s: unable to read oscillator status flag (%d)\n",
- __func__, ret);
- goto out;
- } else {
- if (sr & ISL12057_REG_SR_OSF) {
- ret = -ENODATA;
- goto out;
- }
- }
-
- ret = regmap_bulk_read(data->regmap, ISL12057_REG_RTC_SC, regs,
- ISL12057_RTC_SEC_LEN);
- if (ret)
- dev_err(dev, "%s: unable to read RTC time section (%d)\n",
- __func__, ret);
-
-out:
- if (ret)
- return ret;
-
- isl12057_rtc_regs_to_tm(tm, regs);
-
- return rtc_valid_tm(tm);
-}
-
-static int isl12057_rtc_update_alarm(struct device *dev, int enable)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- int ret;
-
- mutex_lock(&data->lock);
- ret = _isl12057_rtc_update_alarm(dev, enable);
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static int isl12057_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- struct rtc_time *alarm_tm = &alarm->time;
- u8 regs[ISL12057_A1_SEC_LEN];
- unsigned int ir;
- int ret;
-
- mutex_lock(&data->lock);
- ret = regmap_bulk_read(data->regmap, ISL12057_REG_A1_SC, regs,
- ISL12057_A1_SEC_LEN);
- if (ret) {
- dev_err(dev, "%s: reading alarm section failed (%d)\n",
- __func__, ret);
- goto err_unlock;
- }
-
- alarm_tm->tm_sec = bcd2bin(regs[0] & 0x7f);
- alarm_tm->tm_min = bcd2bin(regs[1] & 0x7f);
- alarm_tm->tm_hour = bcd2bin(regs[2] & 0x3f);
- alarm_tm->tm_mday = bcd2bin(regs[3] & 0x3f);
-
- ret = regmap_read(data->regmap, ISL12057_REG_INT, &ir);
- if (ret) {
- dev_err(dev, "%s: reading alarm interrupt flag failed (%d)\n",
- __func__, ret);
- goto err_unlock;
- }
-
- alarm->enabled = !!(ir & ISL12057_REG_INT_A1IE);
-
-err_unlock:
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static int isl12057_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- struct rtc_time *alarm_tm = &alarm->time;
- unsigned long rtc_secs, alarm_secs;
- u8 regs[ISL12057_A1_SEC_LEN];
- struct rtc_time rtc_tm;
- int ret, enable = 1;
-
- mutex_lock(&data->lock);
- ret = _isl12057_rtc_read_time(dev, &rtc_tm);
- if (ret)
- goto err_unlock;
-
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err_unlock;
-
- ret = rtc_tm_to_time(alarm_tm, &alarm_secs);
- if (ret)
- goto err_unlock;
-
- /* If alarm time is before current time, disable the alarm */
- if (!alarm->enabled || alarm_secs <= rtc_secs) {
- enable = 0;
- } else {
- /*
- * Chip only support alarms up to one month in the future. Let's
- * return an error if we get something after that limit.
- * Comparison is done by incrementing rtc_tm month field by one
- * and checking alarm value is still below.
- */
- if (rtc_tm.tm_mon == 11) { /* handle year wrapping */
- rtc_tm.tm_mon = 0;
- rtc_tm.tm_year += 1;
- } else {
- rtc_tm.tm_mon += 1;
- }
-
- ret = rtc_tm_to_time(&rtc_tm, &rtc_secs);
- if (ret)
- goto err_unlock;
-
- if (alarm_secs > rtc_secs) {
- dev_err(dev, "%s: max for alarm is one month (%d)\n",
- __func__, ret);
- ret = -EINVAL;
- goto err_unlock;
- }
- }
-
- /* Disable the alarm before modifying it */
- ret = _isl12057_rtc_update_alarm(dev, 0);
- if (ret < 0) {
- dev_err(dev, "%s: unable to disable the alarm (%d)\n",
- __func__, ret);
- goto err_unlock;
- }
-
- /* Program alarm registers */
- regs[0] = bin2bcd(alarm_tm->tm_sec) & 0x7f;
- regs[1] = bin2bcd(alarm_tm->tm_min) & 0x7f;
- regs[2] = bin2bcd(alarm_tm->tm_hour) & 0x3f;
- regs[3] = bin2bcd(alarm_tm->tm_mday) & 0x3f;
-
- ret = regmap_bulk_write(data->regmap, ISL12057_REG_A1_SC, regs,
- ISL12057_A1_SEC_LEN);
- if (ret < 0) {
- dev_err(dev, "%s: writing alarm section failed (%d)\n",
- __func__, ret);
- goto err_unlock;
- }
-
- /* Enable or disable alarm */
- ret = _isl12057_rtc_update_alarm(dev, enable);
-
-err_unlock:
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-static int isl12057_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
- u8 regs[ISL12057_RTC_SEC_LEN];
- int ret;
-
- ret = isl12057_rtc_tm_to_regs(regs, tm);
- if (ret)
- return ret;
-
- mutex_lock(&data->lock);
- ret = regmap_bulk_write(data->regmap, ISL12057_REG_RTC_SC, regs,
- ISL12057_RTC_SEC_LEN);
- if (ret) {
- dev_err(dev, "%s: unable to write RTC time section (%d)\n",
- __func__, ret);
- goto out;
- }
-
- /*
- * Now that RTC time has been updated, let's clear oscillator
- * failure flag, if needed.
- */
- ret = regmap_update_bits(data->regmap, ISL12057_REG_SR,
- ISL12057_REG_SR_OSF, 0);
- if (ret < 0)
- dev_err(dev, "%s: unable to clear osc. failure bit (%d)\n",
- __func__, ret);
-
-out:
- mutex_unlock(&data->lock);
-
- return ret;
-}
-
-/*
- * Check current RTC status and enable/disable what needs to be. Return 0 if
- * everything went ok and a negative value upon error. Note: this function
- * is called early during init and hence does need mutex protection.
- */
-static int isl12057_check_rtc_status(struct device *dev, struct regmap *regmap)
-{
- int ret;
-
- /* Enable oscillator if not already running */
- ret = regmap_update_bits(regmap, ISL12057_REG_INT,
- ISL12057_REG_INT_EOSC, 0);
- if (ret < 0) {
- dev_err(dev, "%s: unable to enable oscillator (%d)\n",
- __func__, ret);
- return ret;
- }
-
- /* Clear alarm bit if needed */
- ret = regmap_update_bits(regmap, ISL12057_REG_SR,
- ISL12057_REG_SR_A1F, 0);
- if (ret < 0) {
- dev_err(dev, "%s: unable to clear alarm bit (%d)\n",
- __func__, ret);
- return ret;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_OF
-/*
- * One would expect the device to be marked as a wakeup source only
- * when an IRQ pin of the RTC is routed to an interrupt line of the
- * CPU. In practice, such an IRQ pin can be connected to a PMIC and
- * this allows the device to be powered up when RTC alarm rings. This
- * is for instance the case on ReadyNAS 102, 104 and 2120. On those
- * devices with no IRQ driectly connected to the SoC, the RTC chip
- * can be forced as a wakeup source by stating that explicitly in
- * the device's .dts file using the "wakeup-source" boolean property.
- * This will guarantee 'wakealarm' sysfs entry is available on the device.
- *
- * The function below returns 1, i.e. the capability of the chip to
- * wakeup the device, based on IRQ availability or if the boolean
- * property has been set in the .dts file. Otherwise, it returns 0.
- */
-
-static bool isl12057_can_wakeup_machine(struct device *dev)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-
- return data->irq || of_property_read_bool(dev->of_node, "wakeup-source")
- || of_property_read_bool(dev->of_node, /* legacy */
- "isil,irq2-can-wakeup-machine");
-}
-#else
-static bool isl12057_can_wakeup_machine(struct device *dev)
-{
- struct isl12057_rtc_data *data = dev_get_drvdata(dev);
-
- return !!data->irq;
-}
-#endif
-
-static int isl12057_rtc_alarm_irq_enable(struct device *dev,
- unsigned int enable)
-{
- struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
- int ret = -ENOTTY;
-
- if (rtc_data->irq)
- ret = isl12057_rtc_update_alarm(dev, enable);
-
- return ret;
-}
-
-static irqreturn_t isl12057_rtc_interrupt(int irq, void *data)
-{
- struct i2c_client *client = data;
- struct isl12057_rtc_data *rtc_data = dev_get_drvdata(&client->dev);
- struct rtc_device *rtc = rtc_data->rtc;
- int ret, handled = IRQ_NONE;
- unsigned int sr;
-
- ret = regmap_read(rtc_data->regmap, ISL12057_REG_SR, &sr);
- if (!ret && (sr & ISL12057_REG_SR_A1F)) {
- dev_dbg(&client->dev, "RTC alarm!\n");
-
- rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF);
-
- /* Acknowledge and disable the alarm */
- _isl12057_rtc_clear_alarm(&client->dev);
- _isl12057_rtc_update_alarm(&client->dev, 0);
-
- handled = IRQ_HANDLED;
- }
-
- return handled;
-}
-
-static const struct rtc_class_ops rtc_ops = {
- .read_time = _isl12057_rtc_read_time,
- .set_time = isl12057_rtc_set_time,
- .read_alarm = isl12057_rtc_read_alarm,
- .set_alarm = isl12057_rtc_set_alarm,
- .alarm_irq_enable = isl12057_rtc_alarm_irq_enable,
-};
-
-static const struct regmap_config isl12057_rtc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
-static int isl12057_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct device *dev = &client->dev;
- struct isl12057_rtc_data *data;
- struct regmap *regmap;
- int ret;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK))
- return -ENODEV;
-
- regmap = devm_regmap_init_i2c(client, &isl12057_rtc_regmap_config);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- dev_err(dev, "%s: regmap allocation failed (%d)\n",
- __func__, ret);
- return ret;
- }
-
- ret = isl12057_i2c_validate_chip(regmap);
- if (ret)
- return ret;
-
- ret = isl12057_check_rtc_status(dev, regmap);
- if (ret)
- return ret;
-
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- mutex_init(&data->lock);
- data->regmap = regmap;
- dev_set_drvdata(dev, data);
-
- if (client->irq > 0) {
- ret = devm_request_threaded_irq(dev, client->irq, NULL,
- isl12057_rtc_interrupt,
- IRQF_SHARED|IRQF_ONESHOT,
- DRV_NAME, client);
- if (!ret)
- data->irq = client->irq;
- else
- dev_err(dev, "%s: irq %d unavailable (%d)\n", __func__,
- client->irq, ret);
- }
-
- if (isl12057_can_wakeup_machine(dev))
- device_init_wakeup(dev, true);
-
- data->rtc = devm_rtc_device_register(dev, DRV_NAME, &rtc_ops,
- THIS_MODULE);
- ret = PTR_ERR_OR_ZERO(data->rtc);
- if (ret) {
- dev_err(dev, "%s: unable to register RTC device (%d)\n",
- __func__, ret);
- goto err;
- }
-
- /* We cannot support UIE mode if we do not have an IRQ line */
- if (!data->irq)
- data->rtc->uie_unsupported = 1;
-
-err:
- return ret;
-}
-
-static int isl12057_remove(struct i2c_client *client)
-{
- if (isl12057_can_wakeup_machine(&client->dev))
- device_init_wakeup(&client->dev, false);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int isl12057_rtc_suspend(struct device *dev)
-{
- struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
-
- if (rtc_data->irq && device_may_wakeup(dev))
- return enable_irq_wake(rtc_data->irq);
-
- return 0;
-}
-
-static int isl12057_rtc_resume(struct device *dev)
-{
- struct isl12057_rtc_data *rtc_data = dev_get_drvdata(dev);
-
- if (rtc_data->irq && device_may_wakeup(dev))
- return disable_irq_wake(rtc_data->irq);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(isl12057_rtc_pm_ops, isl12057_rtc_suspend,
- isl12057_rtc_resume);
-
-#ifdef CONFIG_OF
-static const struct of_device_id isl12057_dt_match[] = {
- { .compatible = "isl,isl12057" }, /* for backward compat., don't use */
- { .compatible = "isil,isl12057" },
- { },
-};
-MODULE_DEVICE_TABLE(of, isl12057_dt_match);
-#endif
-
-static const struct i2c_device_id isl12057_id[] = {
- { "isl12057", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, isl12057_id);
-
-static struct i2c_driver isl12057_driver = {
- .driver = {
- .name = DRV_NAME,
- .pm = &isl12057_rtc_pm_ops,
- .of_match_table = of_match_ptr(isl12057_dt_match),
- },
- .probe = isl12057_probe,
- .remove = isl12057_remove,
- .id_table = isl12057_id,
-};
-module_i2c_driver(isl12057_driver);
-
-MODULE_AUTHOR("Arnaud EBALARD <arno@natisbad.org>");
-MODULE_DESCRIPTION("Intersil ISL12057 RTC driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index b2bcfc0bf2e5..5e14651b71a8 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -174,7 +174,7 @@ static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AF_IRQ, enable);
}
-static struct rtc_class_ops jz4740_rtc_ops = {
+static const struct rtc_class_ops jz4740_rtc_ops = {
.read_time = jz4740_rtc_read_time,
.set_mmss = jz4740_rtc_set_mmss,
.read_alarm = jz4740_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c
index 025bb33b9cd2..4021fd04cb0a 100644
--- a/drivers/rtc/rtc-mcp795.c
+++ b/drivers/rtc/rtc-mcp795.c
@@ -151,7 +151,7 @@ static int mcp795_read_time(struct device *dev, struct rtc_time *tim)
return rtc_valid_tm(tim);
}
-static struct rtc_class_ops mcp795_rtc_ops = {
+static const struct rtc_class_ops mcp795_rtc_ops = {
.read_time = mcp795_read_time,
.set_time = mcp795_set_time
};
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 44f622c3e048..1a61fa56f3ad 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -301,7 +301,7 @@ exit:
return ret;
}
-static struct rtc_class_ops mtk_rtc_ops = {
+static const struct rtc_class_ops mtk_rtc_ops = {
.read_time = mtk_rtc_read_time,
.set_time = mtk_rtc_set_time,
.read_alarm = mtk_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index 09fc1c19f0df..b1b6b3041bfb 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -214,7 +214,7 @@ static int nuc900_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return 0;
}
-static struct rtc_class_ops nuc900_rtc_ops = {
+static const struct rtc_class_ops nuc900_rtc_ops = {
.read_time = nuc900_rtc_read_time,
.set_time = nuc900_rtc_set_time,
.read_alarm = nuc900_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index ec2e9c5fb993..b04ea9b5ae67 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -13,19 +13,23 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/kernel.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <linux/bcd.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/io.h>
#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/rtc.h>
-#include <linux/bcd.h>
-#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/io.h>
-#include <linux/clk.h>
+#include <linux/rtc.h>
/*
* The OMAP RTC is a year/month/day/hours/minutes/seconds BCD clock
@@ -115,6 +119,8 @@
/* OMAP_RTC_PMIC bit fields: */
#define OMAP_RTC_PMIC_POWER_EN_EN BIT(16)
+#define OMAP_RTC_PMIC_EXT_WKUP_EN(x) BIT(x)
+#define OMAP_RTC_PMIC_EXT_WKUP_POL(x) BIT(4 + x)
/* OMAP_RTC_KICKER values */
#define KICK0_VALUE 0x83e70b13
@@ -141,6 +147,7 @@ struct omap_rtc {
bool is_pmic_controller;
bool has_ext_clk;
const struct omap_rtc_device_type *type;
+ struct pinctrl_dev *pctldev;
};
static inline u8 rtc_read(struct omap_rtc *rtc, unsigned int reg)
@@ -469,7 +476,7 @@ static void omap_rtc_power_off(void)
mdelay(2500);
}
-static struct rtc_class_ops omap_rtc_ops = {
+static const struct rtc_class_ops omap_rtc_ops = {
.read_time = omap_rtc_read_time,
.set_time = omap_rtc_set_time,
.read_alarm = omap_rtc_read_alarm,
@@ -525,6 +532,139 @@ static const struct of_device_id omap_rtc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, omap_rtc_of_match);
+static const struct pinctrl_pin_desc rtc_pins_desc[] = {
+ PINCTRL_PIN(0, "ext_wakeup0"),
+ PINCTRL_PIN(1, "ext_wakeup1"),
+ PINCTRL_PIN(2, "ext_wakeup2"),
+ PINCTRL_PIN(3, "ext_wakeup3"),
+};
+
+static int rtc_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return 0;
+}
+
+static const char *rtc_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int group)
+{
+ return NULL;
+}
+
+static const struct pinctrl_ops rtc_pinctrl_ops = {
+ .get_groups_count = rtc_pinctrl_get_groups_count,
+ .get_group_name = rtc_pinctrl_get_group_name,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+enum rtc_pin_config_param {
+ PIN_CONFIG_ACTIVE_HIGH = PIN_CONFIG_END + 1,
+};
+
+static const struct pinconf_generic_params rtc_params[] = {
+ {"ti,active-high", PIN_CONFIG_ACTIVE_HIGH, 0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item rtc_conf_items[ARRAY_SIZE(rtc_params)] = {
+ PCONFDUMP(PIN_CONFIG_ACTIVE_HIGH, "input active high", NULL, false),
+};
+#endif
+
+static int rtc_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int param = pinconf_to_config_param(*config);
+ u32 val;
+ u16 arg = 0;
+
+ rtc->type->unlock(rtc);
+ val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
+ rtc->type->lock(rtc);
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_ENABLE:
+ if (!(val & OMAP_RTC_PMIC_EXT_WKUP_EN(pin)))
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_ACTIVE_HIGH:
+ if (val & OMAP_RTC_PMIC_EXT_WKUP_POL(pin))
+ return -EINVAL;
+ break;
+ default:
+ return -ENOTSUPP;
+ };
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return 0;
+}
+
+static int rtc_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct omap_rtc *rtc = pinctrl_dev_get_drvdata(pctldev);
+ u32 val;
+ unsigned int param;
+ u16 param_val;
+ int i;
+
+ rtc->type->unlock(rtc);
+ val = rtc_readl(rtc, OMAP_RTC_PMIC_REG);
+ rtc->type->lock(rtc);
+
+ /* active low by default */
+ val |= OMAP_RTC_PMIC_EXT_WKUP_POL(pin);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ param_val = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_INPUT_ENABLE:
+ if (param_val)
+ val |= OMAP_RTC_PMIC_EXT_WKUP_EN(pin);
+ else
+ val &= ~OMAP_RTC_PMIC_EXT_WKUP_EN(pin);
+ break;
+ case PIN_CONFIG_ACTIVE_HIGH:
+ val &= ~OMAP_RTC_PMIC_EXT_WKUP_POL(pin);
+ break;
+ default:
+ dev_err(&rtc->rtc->dev, "Property %u not supported\n",
+ param);
+ return -ENOTSUPP;
+ }
+ }
+
+ rtc->type->unlock(rtc);
+ rtc_writel(rtc, OMAP_RTC_PMIC_REG, val);
+ rtc->type->lock(rtc);
+
+ return 0;
+}
+
+static const struct pinconf_ops rtc_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = rtc_pinconf_get,
+ .pin_config_set = rtc_pinconf_set,
+};
+
+static struct pinctrl_desc rtc_pinctrl_desc = {
+ .pins = rtc_pins_desc,
+ .npins = ARRAY_SIZE(rtc_pins_desc),
+ .pctlops = &rtc_pinctrl_ops,
+ .confops = &rtc_pinconf_ops,
+ .custom_params = rtc_params,
+ .num_custom_params = ARRAY_SIZE(rtc_params),
+#ifdef CONFIG_DEBUG_FS
+ .custom_conf_items = rtc_conf_items,
+#endif
+ .owner = THIS_MODULE,
+};
+
static int omap_rtc_probe(struct platform_device *pdev)
{
struct omap_rtc *rtc;
@@ -681,6 +821,15 @@ static int omap_rtc_probe(struct platform_device *pdev)
}
}
+ /* Support ext_wakeup pinconf */
+ rtc_pinctrl_desc.name = dev_name(&pdev->dev);
+
+ rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc);
+ if (IS_ERR(rtc->pctldev)) {
+ dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
+ return PTR_ERR(rtc->pctldev);
+ }
+
return 0;
err:
@@ -724,6 +873,9 @@ static int __exit omap_rtc_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ /* Remove ext_wakeup pinconf */
+ pinctrl_unregister(rtc->pctldev);
+
return 0;
}
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index 6080e0edef63..4bcfb88674d3 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -225,7 +225,7 @@ static irqreturn_t palmas_rtc_interrupt(int irq, void *context)
return IRQ_HANDLED;
}
-static struct rtc_class_ops palmas_rtc_ops = {
+static const struct rtc_class_ops palmas_rtc_ops = {
.read_time = palmas_rtc_read_time,
.set_time = palmas_rtc_set_time,
.read_alarm = palmas_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index b4478cc92b55..8895f77726e8 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -182,7 +182,8 @@ static ssize_t pcf2123_show(struct device *dev, struct device_attribute *attr,
}
static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
- const char *buffer, size_t count) {
+ const char *buffer, size_t count)
+{
struct pcf2123_sysfs_reg *r;
unsigned long reg;
unsigned long val;
@@ -199,7 +200,7 @@ static ssize_t pcf2123_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- pcf2123_write_reg(dev, reg, val);
+ ret = pcf2123_write_reg(dev, reg, val);
if (ret < 0)
return -EIO;
return count;
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index e6b6911c8e05..00c31c91b245 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -232,7 +232,7 @@ static int pcf50633_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
}
-static struct rtc_class_ops pcf50633_rtc_ops = {
+static const struct rtc_class_ops pcf50633_rtc_ops = {
.read_time = pcf50633_rtc_read_time,
.set_time = pcf50633_rtc_set_time,
.read_alarm = pcf50633_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
index 64e1e4578492..5cfb6df5c430 100644
--- a/drivers/rtc/rtc-pic32.c
+++ b/drivers/rtc/rtc-pic32.c
@@ -400,7 +400,6 @@ static struct platform_driver pic32_rtc_driver = {
.remove = pic32_rtc_remove,
.driver = {
.name = "pic32-rtc",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pic32_rtc_dt_ids),
},
};
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 9a2f6a95d5a7..f9277e536f7e 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -52,11 +52,21 @@
#define RV8803_CTRL_TIE BIT(4)
#define RV8803_CTRL_UIE BIT(5)
+#define RX8900_BACKUP_CTRL 0x18
+#define RX8900_FLAG_SWOFF BIT(2)
+#define RX8900_FLAG_VDETOFF BIT(3)
+
+enum rv8803_type {
+ rv_8803,
+ rx_8900
+};
+
struct rv8803_data {
struct i2c_client *client;
struct rtc_device *rtc;
struct mutex flags_lock;
u8 ctrl;
+ enum rv8803_type type;
};
static int rv8803_read_reg(const struct i2c_client *client, u8 reg)
@@ -497,6 +507,35 @@ static struct rtc_class_ops rv8803_rtc_ops = {
.ioctl = rv8803_ioctl,
};
+static int rx8900_trickle_charger_init(struct rv8803_data *rv8803)
+{
+ struct i2c_client *client = rv8803->client;
+ struct device_node *node = client->dev.of_node;
+ int err;
+ u8 flags;
+
+ if (!node)
+ return 0;
+
+ if (rv8803->type != rx_8900)
+ return 0;
+
+ err = i2c_smbus_read_byte_data(rv8803->client, RX8900_BACKUP_CTRL);
+ if (err < 0)
+ return err;
+
+ flags = ~(RX8900_FLAG_VDETOFF | RX8900_FLAG_SWOFF) & (u8)err;
+
+ if (of_property_read_bool(node, "epson,vdet-disable"))
+ flags |= RX8900_FLAG_VDETOFF;
+
+ if (of_property_read_bool(node, "trickle-diode-disable"))
+ flags |= RX8900_FLAG_SWOFF;
+
+ return i2c_smbus_write_byte_data(rv8803->client, RX8900_BACKUP_CTRL,
+ flags);
+}
+
static int rv8803_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -517,6 +556,7 @@ static int rv8803_probe(struct i2c_client *client,
mutex_init(&rv8803->flags_lock);
rv8803->client = client;
+ rv8803->type = id->driver_data;
i2c_set_clientdata(client, rv8803);
flags = rv8803_read_reg(client, RV8803_FLAG);
@@ -558,6 +598,12 @@ static int rv8803_probe(struct i2c_client *client,
if (err)
return err;
+ err = rx8900_trickle_charger_init(rv8803);
+ if (err) {
+ dev_err(&client->dev, "failed to init charger\n");
+ return err;
+ }
+
err = device_create_bin_file(&client->dev, &rv8803_nvram_attr);
if (err)
return err;
@@ -575,8 +621,8 @@ static int rv8803_remove(struct i2c_client *client)
}
static const struct i2c_device_id rv8803_id[] = {
- { "rv8803", 0 },
- { "rx8900", 0 },
+ { "rv8803", rv_8803 },
+ { "rx8900", rx_8900 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rv8803_id);
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index bbad00b233bc..7c9c08eab5e5 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -317,7 +317,7 @@ static int rx6110_init(struct rx6110_data *rx6110)
return ret;
}
-static struct rtc_class_ops rx6110_rtc_ops = {
+static const struct rtc_class_ops rx6110_rtc_ops = {
.read_time = rx6110_get_time,
.set_time = rx6110_set_time,
};
@@ -388,7 +388,6 @@ MODULE_DEVICE_TABLE(spi, rx6110_id);
static struct spi_driver rx6110_driver = {
.driver = {
.name = RX6110_DRIVER_NAME,
- .owner = THIS_MODULE,
},
.probe = rx6110_probe,
.remove = rx6110_remove,
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index 2b85cc7a24e7..91857d8d2df8 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -403,7 +403,7 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static struct rtc_class_ops rx8025_rtc_ops = {
+static const struct rtc_class_ops rx8025_rtc_ops = {
.read_time = rx8025_get_time,
.set_time = rx8025_set_time,
.read_alarm = rx8025_read_alarm,
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index f05ef8568480..e377f42abae7 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -343,7 +343,7 @@ static int spear_alarm_irq_enable(struct device *dev, unsigned int enabled)
return ret;
}
-static struct rtc_class_ops spear_rtc_ops = {
+static const struct rtc_class_ops spear_rtc_ops = {
.read_time = spear_rtc_read_time,
.set_time = spear_rtc_set_time,
.read_alarm = spear_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index e6aaaa52e7fe..d578e40d5a50 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -231,7 +231,7 @@ static int stmp3xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
return 0;
}
-static struct rtc_class_ops stmp3xxx_rtc_ops = {
+static const struct rtc_class_ops stmp3xxx_rtc_ops = {
.alarm_irq_enable =
stmp3xxx_alarm_irq_enable,
.read_time = stmp3xxx_rtc_gettime,
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 63b9fb1318c2..1218d5d4224d 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -160,7 +160,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
unsigned long push = 0;
struct rtc_wkalrm alm;
struct rtc_device *rtc = to_rtc_device(dev);
- char *buf_ptr;
+ const char *buf_ptr;
int adjust = 0;
/* Only request alarms that trigger in the future. Disable them
@@ -171,7 +171,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr,
return retval;
rtc_tm_to_time(&alm.time, &now);
- buf_ptr = (char *)buf;
+ buf_ptr = buf;
if (*buf_ptr == '+') {
buf_ptr++;
if (*buf_ptr == '=') {
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 15ac597d54da..3853ba963bb5 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -291,7 +291,7 @@ static irqreturn_t tegra_rtc_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static struct rtc_class_ops tegra_rtc_ops = {
+static const struct rtc_class_ops tegra_rtc_ops = {
.read_time = tegra_rtc_read_time,
.set_time = tegra_rtc_set_time,
.read_alarm = tegra_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 2dc787dc06c1..176720b7b9e5 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -462,7 +462,7 @@ out:
return ret;
}
-static struct rtc_class_ops twl_rtc_ops = {
+static const struct rtc_class_ops twl_rtc_ops = {
.read_time = twl_rtc_read_time,
.set_time = twl_rtc_set_time,
.read_alarm = twl_rtc_read_alarm,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 831935af7389..a7a88476e215 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1205,7 +1205,7 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
mdc, lpm);
return mdc;
}
- fcx_max_data = mdc * FCX_MAX_DATA_FACTOR;
+ fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
if (fcx_max_data < private->fcx_max_data) {
dev_warn(&device->cdev->dev,
"The maximum data size for zHPF requests %u "
@@ -1675,7 +1675,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
" data size for zHPF requests failed\n");
return 0;
} else
- return mdc * FCX_MAX_DATA_FACTOR;
+ return (u32)mdc * FCX_MAX_DATA_FACTOR;
}
/*
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 46be25c7461e..876c7e6e3a99 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -780,7 +780,7 @@ static int cfg_wait_idle(void)
static int __init chp_init(void)
{
struct chp_id chpid;
- int ret;
+ int state, ret;
ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
if (ret)
@@ -791,7 +791,9 @@ static int __init chp_init(void)
return 0;
/* Register available channel-paths. */
chp_id_for_each(&chpid) {
- if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
+ state = chp_info_get_status(chpid);
+ if (state == CHP_STATUS_CONFIGURED ||
+ state == CHP_STATUS_STANDBY)
chp_new(chpid);
}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 637cf8973c9e..581001989937 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -384,7 +384,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
/* if (len > rec_len):
* dump data up to cap_len ignoring small duplicate in rec->payload
*/
- spin_lock_irqsave(&dbf->pay_lock, flags);
+ spin_lock(&dbf->pay_lock);
memset(payload, 0, sizeof(*payload));
memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
payload->fsf_req_id = req_id;
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index db2739079cbb..790babc5ef66 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -353,7 +353,7 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
#endif
-static int probe_irq __initdata;
+static int probe_irq;
/**
* probe_intr - helper for IRQ autoprobe
@@ -365,7 +365,7 @@ static int probe_irq __initdata;
* used by the IRQ probe code.
*/
-static irqreturn_t __init probe_intr(int irq, void *dev_id)
+static irqreturn_t probe_intr(int irq, void *dev_id)
{
probe_irq = irq;
return IRQ_HANDLED;
@@ -380,7 +380,7 @@ static irqreturn_t __init probe_intr(int irq, void *dev_id)
* and then looking to see what interrupt actually turned up.
*/
-static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
+static int __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
int possible)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 6a6906f847db..d9239c2d49b1 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -61,7 +61,7 @@ MODULE_PARM_DESC(be_max_phys_size,
"memory that can be allocated. Range is 16 - 128");
#define beiscsi_disp_param(_name)\
-ssize_t \
+static ssize_t \
beiscsi_##_name##_disp(struct device *dev,\
struct device_attribute *attrib, char *buf) \
{ \
@@ -74,7 +74,7 @@ beiscsi_##_name##_disp(struct device *dev,\
}
#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
-int \
+static int \
beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
{\
if (val >= _minval && val <= _maxval) {\
@@ -93,7 +93,7 @@ beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
}
#define beiscsi_store_param(_name) \
-ssize_t \
+static ssize_t \
beiscsi_##_name##_store(struct device *dev,\
struct device_attribute *attr, const char *buf,\
size_t count) \
@@ -112,7 +112,7 @@ beiscsi_##_name##_store(struct device *dev,\
}
#define beiscsi_init_param(_name, _minval, _maxval, _defval) \
-int \
+static int \
beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
{ \
if (val >= _minval && val <= _maxval) {\
@@ -900,8 +900,9 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
+ unsigned long flags;
- spin_lock_bh(&phba->io_sgl_lock);
+ spin_lock_irqsave(&phba->io_sgl_lock, flags);
if (phba->io_sgl_hndl_avbl) {
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
"BM_%d : In alloc_io_sgl_handle,"
@@ -919,14 +920,16 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
phba->io_sgl_alloc_index++;
} else
psgl_handle = NULL;
- spin_unlock_bh(&phba->io_sgl_lock);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
return psgl_handle;
}
static void
free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
- spin_lock_bh(&phba->io_sgl_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->io_sgl_lock, flags);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
"BM_%d : In free_,io_sgl_free_index=%d\n",
phba->io_sgl_free_index);
@@ -941,7 +944,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
"value there=%p\n", phba->io_sgl_free_index,
phba->io_sgl_hndl_base
[phba->io_sgl_free_index]);
- spin_unlock_bh(&phba->io_sgl_lock);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
return;
}
phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
@@ -950,7 +953,7 @@ free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
phba->io_sgl_free_index = 0;
else
phba->io_sgl_free_index++;
- spin_unlock_bh(&phba->io_sgl_lock);
+ spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
}
static inline struct wrb_handle *
@@ -958,15 +961,16 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
unsigned int wrbs_per_cxn)
{
struct wrb_handle *pwrb_handle;
+ unsigned long flags;
- spin_lock_bh(&pwrb_context->wrb_lock);
+ spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
pwrb_context->wrb_handles_available--;
if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
pwrb_context->alloc_index = 0;
else
pwrb_context->alloc_index++;
- spin_unlock_bh(&pwrb_context->wrb_lock);
+ spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
if (pwrb_handle)
memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
@@ -1001,14 +1005,16 @@ beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
struct wrb_handle *pwrb_handle,
unsigned int wrbs_per_cxn)
{
- spin_lock_bh(&pwrb_context->wrb_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
pwrb_context->wrb_handles_available++;
if (pwrb_context->free_index == (wrbs_per_cxn - 1))
pwrb_context->free_index = 0;
else
pwrb_context->free_index++;
- spin_unlock_bh(&pwrb_context->wrb_lock);
+ spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
}
/**
@@ -1037,8 +1043,9 @@ free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
{
struct sgl_handle *psgl_handle;
+ unsigned long flags;
- spin_lock_bh(&phba->mgmt_sgl_lock);
+ spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
if (phba->eh_sgl_hndl_avbl) {
psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
@@ -1056,14 +1063,16 @@ static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
phba->eh_sgl_alloc_index++;
} else
psgl_handle = NULL;
- spin_unlock_bh(&phba->mgmt_sgl_lock);
+ spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
return psgl_handle;
}
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
{
- spin_lock_bh(&phba->mgmt_sgl_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BM_%d : In free_mgmt_sgl_handle,"
"eh_sgl_free_index=%d\n",
@@ -1078,7 +1087,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
"BM_%d : Double Free in eh SGL ,"
"eh_sgl_free_index=%d\n",
phba->eh_sgl_free_index);
- spin_unlock_bh(&phba->mgmt_sgl_lock);
+ spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
return;
}
phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
@@ -1088,7 +1097,7 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
phba->eh_sgl_free_index = 0;
else
phba->eh_sgl_free_index++;
- spin_unlock_bh(&phba->mgmt_sgl_lock);
+ spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
}
static void
@@ -4584,7 +4593,7 @@ free_hndls:
io_task->cmd_bhs = NULL;
return -ENOMEM;
}
-int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
+static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
unsigned int num_sg, unsigned int xferlen,
unsigned int writedir)
{
@@ -4973,7 +4982,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
return rc;
}
-void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
+static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
{
/* Set the logging parameter */
beiscsi_log_enable_init(phba, beiscsi_log_enable);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 7c0d7af0d3b7..0039bebaa9e2 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -685,6 +685,11 @@ static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
req_completion);
csk->snd_nxt += len;
cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
+ } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) &&
+ (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
+ struct cpl_close_con_req *req =
+ (struct cpl_close_con_req *)skb->data;
+ req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
}
total_size += skb->truesize;
t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 516bd6c4f442..cbf010324c18 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -30,161 +30,41 @@
#include "NCR5380.h"
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/isapnp.h>
+#include <linux/isa.h>
+#include <linux/pnp.h>
#include <linux/interrupt.h>
+#define MAX_CARDS 8
+
+/* old-style parameters for compatibility */
static int ncr_irq;
-static int ncr_dma;
static int ncr_addr;
static int ncr_5380;
static int ncr_53c400;
static int ncr_53c400a;
static int dtc_3181e;
static int hp_c2502;
+module_param(ncr_irq, int, 0);
+module_param(ncr_addr, int, 0);
+module_param(ncr_5380, int, 0);
+module_param(ncr_53c400, int, 0);
+module_param(ncr_53c400a, int, 0);
+module_param(dtc_3181e, int, 0);
+module_param(hp_c2502, int, 0);
-static struct override {
- NCR5380_map_type NCR5380_map_name;
- int irq;
- int dma;
- int board; /* Use NCR53c400, Ricoh, etc. extensions ? */
-} overrides
-#ifdef GENERIC_NCR5380_OVERRIDE
-[] __initdata = GENERIC_NCR5380_OVERRIDE;
-#else
-[1] __initdata = { { 0,},};
-#endif
-
-#define NO_OVERRIDES ARRAY_SIZE(overrides)
-
-#ifndef MODULE
-
-/**
- * internal_setup - handle lilo command string override
- * @board: BOARD_* identifier for the board
- * @str: unused
- * @ints: numeric parameters
- *
- * Do LILO command line initialization of the overrides array. Display
- * errors when needed
- *
- * Locks: none
- */
-
-static void __init internal_setup(int board, char *str, int *ints)
-{
- static int commandline_current;
- switch (board) {
- case BOARD_NCR5380:
- if (ints[0] != 2 && ints[0] != 3) {
- printk(KERN_ERR "generic_NCR5380_setup : usage ncr5380=" STRVAL(NCR5380_map_name) ",irq,dma\n");
- return;
- }
- break;
- case BOARD_NCR53C400:
- if (ints[0] != 2) {
- printk(KERN_ERR "generic_NCR53C400_setup : usage ncr53c400=" STRVAL(NCR5380_map_name) ",irq\n");
- return;
- }
- break;
- case BOARD_NCR53C400A:
- if (ints[0] != 2) {
- printk(KERN_ERR "generic_NCR53C400A_setup : usage ncr53c400a=" STRVAL(NCR5380_map_name) ",irq\n");
- return;
- }
- break;
- case BOARD_DTC3181E:
- if (ints[0] != 2) {
- printk("generic_DTC3181E_setup : usage dtc3181e=" STRVAL(NCR5380_map_name) ",irq\n");
- return;
- }
- break;
- }
-
- if (commandline_current < NO_OVERRIDES) {
- overrides[commandline_current].NCR5380_map_name = (NCR5380_map_type) ints[1];
- overrides[commandline_current].irq = ints[2];
- if (ints[0] == 3)
- overrides[commandline_current].dma = ints[3];
- else
- overrides[commandline_current].dma = DMA_NONE;
- overrides[commandline_current].board = board;
- ++commandline_current;
- }
-}
-
-
-/**
- * do_NCR53C80_setup - set up entry point
- * @str: unused
- *
- * Setup function invoked at boot to parse the ncr5380= command
- * line.
- */
-
-static int __init do_NCR5380_setup(char *str)
-{
- int ints[10];
-
- get_options(str, ARRAY_SIZE(ints), ints);
- internal_setup(BOARD_NCR5380, str, ints);
- return 1;
-}
-
-/**
- * do_NCR53C400_setup - set up entry point
- * @str: unused
- * @ints: integer parameters from kernel setup code
- *
- * Setup function invoked at boot to parse the ncr53c400= command
- * line.
- */
-
-static int __init do_NCR53C400_setup(char *str)
-{
- int ints[10];
-
- get_options(str, ARRAY_SIZE(ints), ints);
- internal_setup(BOARD_NCR53C400, str, ints);
- return 1;
-}
-
-/**
- * do_NCR53C400A_setup - set up entry point
- * @str: unused
- * @ints: integer parameters from kernel setup code
- *
- * Setup function invoked at boot to parse the ncr53c400a= command
- * line.
- */
-
-static int __init do_NCR53C400A_setup(char *str)
-{
- int ints[10];
-
- get_options(str, ARRAY_SIZE(ints), ints);
- internal_setup(BOARD_NCR53C400A, str, ints);
- return 1;
-}
-
-/**
- * do_DTC3181E_setup - set up entry point
- * @str: unused
- * @ints: integer parameters from kernel setup code
- *
- * Setup function invoked at boot to parse the dtc3181e= command
- * line.
- */
+static int irq[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+module_param_array(irq, int, NULL, 0);
+MODULE_PARM_DESC(irq, "IRQ number(s)");
-static int __init do_DTC3181E_setup(char *str)
-{
- int ints[10];
+static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+module_param_array(base, int, NULL, 0);
+MODULE_PARM_DESC(base, "base address(es)");
- get_options(str, ARRAY_SIZE(ints), ints);
- internal_setup(BOARD_DTC3181E, str, ints);
- return 1;
-}
+static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 };
+module_param_array(card, int, NULL, 0);
+MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)");
-#endif
+MODULE_LICENSE("GPL");
#ifndef SCSI_G_NCR5380_MEM
/*
@@ -210,21 +90,9 @@ static void magic_configure(int idx, u8 irq, u8 magic[])
}
#endif
-/**
- * generic_NCR5380_detect - look for NCR5380 controllers
- * @tpnt: the scsi template
- *
- * Scan for the present of NCR5380, NCR53C400, NCR53C400A, DTC3181E
- * and DTC436(ISAPnP) controllers. If overrides have been set we use
- * them.
- *
- * Locks: none
- */
-
-static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
+static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
+ struct device *pdev, int base, int irq, int board)
{
- static int current_override;
- int count;
unsigned int *ports;
u8 *magic = NULL;
#ifndef SCSI_G_NCR5380_MEM
@@ -232,272 +100,222 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
int port_idx = -1;
unsigned long region_size;
#endif
- static unsigned int __initdata ncr_53c400a_ports[] = {
+ static unsigned int ncr_53c400a_ports[] = {
0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
};
- static unsigned int __initdata dtc_3181e_ports[] = {
+ static unsigned int dtc_3181e_ports[] = {
0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0
};
- static u8 ncr_53c400a_magic[] __initdata = { /* 53C400A & DTC436 */
+ static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */
0x59, 0xb9, 0xc5, 0xae, 0xa6
};
- static u8 hp_c2502_magic[] __initdata = { /* HP C2502 */
+ static u8 hp_c2502_magic[] = { /* HP C2502 */
0x0f, 0x22, 0xf0, 0x20, 0x80
};
- int flags;
+ int flags, ret;
struct Scsi_Host *instance;
struct NCR5380_hostdata *hostdata;
#ifdef SCSI_G_NCR5380_MEM
- unsigned long base;
void __iomem *iomem;
resource_size_t iomem_size;
#endif
- if (ncr_irq)
- overrides[0].irq = ncr_irq;
- if (ncr_dma)
- overrides[0].dma = ncr_dma;
- if (ncr_addr)
- overrides[0].NCR5380_map_name = (NCR5380_map_type) ncr_addr;
- if (ncr_5380)
- overrides[0].board = BOARD_NCR5380;
- else if (ncr_53c400)
- overrides[0].board = BOARD_NCR53C400;
- else if (ncr_53c400a)
- overrides[0].board = BOARD_NCR53C400A;
- else if (dtc_3181e)
- overrides[0].board = BOARD_DTC3181E;
- else if (hp_c2502)
- overrides[0].board = BOARD_HP_C2502;
-#ifndef SCSI_G_NCR5380_MEM
- if (!current_override && isapnp_present()) {
- struct pnp_dev *dev = NULL;
- count = 0;
- while ((dev = pnp_find_dev(NULL, ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e), dev))) {
- if (count >= NO_OVERRIDES)
- break;
- if (pnp_device_attach(dev) < 0)
- continue;
- if (pnp_activate_dev(dev) < 0) {
- printk(KERN_ERR "dtc436e probe: activate failed\n");
- pnp_device_detach(dev);
- continue;
- }
- if (!pnp_port_valid(dev, 0)) {
- printk(KERN_ERR "dtc436e probe: no valid port\n");
- pnp_device_detach(dev);
- continue;
- }
- if (pnp_irq_valid(dev, 0))
- overrides[count].irq = pnp_irq(dev, 0);
- else
- overrides[count].irq = NO_IRQ;
- if (pnp_dma_valid(dev, 0))
- overrides[count].dma = pnp_dma(dev, 0);
- else
- overrides[count].dma = DMA_NONE;
- overrides[count].NCR5380_map_name = (NCR5380_map_type) pnp_port_start(dev, 0);
- overrides[count].board = BOARD_DTC3181E;
- count++;
- }
+ ports = NULL;
+ flags = 0;
+ switch (board) {
+ case BOARD_NCR5380:
+ flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
+ break;
+ case BOARD_NCR53C400A:
+ ports = ncr_53c400a_ports;
+ magic = ncr_53c400a_magic;
+ break;
+ case BOARD_HP_C2502:
+ ports = ncr_53c400a_ports;
+ magic = hp_c2502_magic;
+ break;
+ case BOARD_DTC3181E:
+ ports = dtc_3181e_ports;
+ magic = ncr_53c400a_magic;
+ break;
}
-#endif
-
- for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
- if (!(overrides[current_override].NCR5380_map_name))
- continue;
-
- ports = NULL;
- flags = 0;
- switch (overrides[current_override].board) {
- case BOARD_NCR5380:
- flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
- break;
- case BOARD_NCR53C400A:
- ports = ncr_53c400a_ports;
- magic = ncr_53c400a_magic;
- break;
- case BOARD_HP_C2502:
- ports = ncr_53c400a_ports;
- magic = hp_c2502_magic;
- break;
- case BOARD_DTC3181E:
- ports = dtc_3181e_ports;
- magic = ncr_53c400a_magic;
- break;
- }
#ifndef SCSI_G_NCR5380_MEM
- if (ports && magic) {
- /* wakeup sequence for the NCR53C400A and DTC3181E */
-
- /* Disable the adapter and look for a free io port */
- magic_configure(-1, 0, magic);
-
- region_size = 16;
-
- if (overrides[current_override].NCR5380_map_name != PORT_AUTO)
- for (i = 0; ports[i]; i++) {
- if (!request_region(ports[i], region_size, "ncr53c80"))
- continue;
- if (overrides[current_override].NCR5380_map_name == ports[i])
- break;
- release_region(ports[i], region_size);
- } else
- for (i = 0; ports[i]; i++) {
- if (!request_region(ports[i], region_size, "ncr53c80"))
- continue;
- if (inb(ports[i]) == 0xff)
- break;
- release_region(ports[i], region_size);
+ if (ports && magic) {
+ /* wakeup sequence for the NCR53C400A and DTC3181E */
+
+ /* Disable the adapter and look for a free io port */
+ magic_configure(-1, 0, magic);
+
+ region_size = 16;
+ if (base)
+ for (i = 0; ports[i]; i++) {
+ if (base == ports[i]) { /* index found */
+ if (!request_region(ports[i],
+ region_size,
+ "ncr53c80"))
+ return -EBUSY;
+ break;
}
- if (ports[i]) {
- /* At this point we have our region reserved */
- magic_configure(i, 0, magic); /* no IRQ yet */
- outb(0xc0, ports[i] + 9);
- if (inb(ports[i] + 9) != 0x80)
- continue;
- overrides[current_override].NCR5380_map_name = ports[i];
- port_idx = i;
- } else
- continue;
- }
+ }
else
- {
- /* Not a 53C400A style setup - just grab */
- region_size = 8;
- if (!request_region(overrides[current_override].NCR5380_map_name,
- region_size, "ncr5380"))
- continue;
- }
+ for (i = 0; ports[i]; i++) {
+ if (!request_region(ports[i], region_size,
+ "ncr53c80"))
+ continue;
+ if (inb(ports[i]) == 0xff)
+ break;
+ release_region(ports[i], region_size);
+ }
+ if (ports[i]) {
+ /* At this point we have our region reserved */
+ magic_configure(i, 0, magic); /* no IRQ yet */
+ outb(0xc0, ports[i] + 9);
+ if (inb(ports[i] + 9) != 0x80) {
+ ret = -ENODEV;
+ goto out_release;
+ }
+ base = ports[i];
+ port_idx = i;
+ } else
+ return -EINVAL;
+ }
+ else
+ {
+ /* NCR5380 - no configuration, just grab */
+ region_size = 8;
+ if (!base || !request_region(base, region_size, "ncr5380"))
+ return -EBUSY;
+ }
#else
- base = overrides[current_override].NCR5380_map_name;
- iomem_size = NCR53C400_region_size;
- if (!request_mem_region(base, iomem_size, "ncr5380"))
- continue;
- iomem = ioremap(base, iomem_size);
- if (!iomem) {
- release_mem_region(base, iomem_size);
- continue;
- }
+ iomem_size = NCR53C400_region_size;
+ if (!request_mem_region(base, iomem_size, "ncr5380"))
+ return -EBUSY;
+ iomem = ioremap(base, iomem_size);
+ if (!iomem) {
+ release_mem_region(base, iomem_size);
+ return -ENOMEM;
+ }
#endif
- instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata));
- if (instance == NULL)
- goto out_release;
- hostdata = shost_priv(instance);
+ instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
+ if (instance == NULL) {
+ ret = -ENOMEM;
+ goto out_release;
+ }
+ hostdata = shost_priv(instance);
#ifndef SCSI_G_NCR5380_MEM
- instance->io_port = overrides[current_override].NCR5380_map_name;
- instance->n_io_port = region_size;
- hostdata->io_width = 1; /* 8-bit PDMA by default */
-
- /*
- * On NCR53C400 boards, NCR5380 registers are mapped 8 past
- * the base address.
- */
- switch (overrides[current_override].board) {
- case BOARD_NCR53C400:
- instance->io_port += 8;
- hostdata->c400_ctl_status = 0;
- hostdata->c400_blk_cnt = 1;
- hostdata->c400_host_buf = 4;
- break;
- case BOARD_DTC3181E:
- hostdata->io_width = 2; /* 16-bit PDMA */
- /* fall through */
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- hostdata->c400_ctl_status = 9;
- hostdata->c400_blk_cnt = 10;
- hostdata->c400_host_buf = 8;
- break;
- }
+ instance->io_port = base;
+ instance->n_io_port = region_size;
+ hostdata->io_width = 1; /* 8-bit PDMA by default */
+
+ /*
+ * On NCR53C400 boards, NCR5380 registers are mapped 8 past
+ * the base address.
+ */
+ switch (board) {
+ case BOARD_NCR53C400:
+ instance->io_port += 8;
+ hostdata->c400_ctl_status = 0;
+ hostdata->c400_blk_cnt = 1;
+ hostdata->c400_host_buf = 4;
+ break;
+ case BOARD_DTC3181E:
+ hostdata->io_width = 2; /* 16-bit PDMA */
+ /* fall through */
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ hostdata->c400_ctl_status = 9;
+ hostdata->c400_blk_cnt = 10;
+ hostdata->c400_host_buf = 8;
+ break;
+ }
#else
- instance->base = overrides[current_override].NCR5380_map_name;
- hostdata->iomem = iomem;
- hostdata->iomem_size = iomem_size;
- switch (overrides[current_override].board) {
- case BOARD_NCR53C400:
- hostdata->c400_ctl_status = 0x100;
- hostdata->c400_blk_cnt = 0x101;
- hostdata->c400_host_buf = 0x104;
- break;
- case BOARD_DTC3181E:
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
- goto out_unregister;
- }
+ instance->base = base;
+ hostdata->iomem = iomem;
+ hostdata->iomem_size = iomem_size;
+ switch (board) {
+ case BOARD_NCR53C400:
+ hostdata->c400_ctl_status = 0x100;
+ hostdata->c400_blk_cnt = 0x101;
+ hostdata->c400_host_buf = 0x104;
+ break;
+ case BOARD_DTC3181E:
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
+ ret = -EINVAL;
+ goto out_unregister;
+ }
#endif
- if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP))
- goto out_unregister;
+ ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
+ if (ret)
+ goto out_unregister;
- switch (overrides[current_override].board) {
- case BOARD_NCR53C400:
- case BOARD_DTC3181E:
- case BOARD_NCR53C400A:
- case BOARD_HP_C2502:
- NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
- }
+ switch (board) {
+ case BOARD_NCR53C400:
+ case BOARD_DTC3181E:
+ case BOARD_NCR53C400A:
+ case BOARD_HP_C2502:
+ NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
+ }
- NCR5380_maybe_reset_bus(instance);
+ NCR5380_maybe_reset_bus(instance);
- if (overrides[current_override].irq != IRQ_AUTO)
- instance->irq = overrides[current_override].irq;
- else
- instance->irq = NCR5380_probe_irq(instance, 0xffff);
+ if (irq != IRQ_AUTO)
+ instance->irq = irq;
+ else
+ instance->irq = NCR5380_probe_irq(instance, 0xffff);
- /* Compatibility with documented NCR5380 kernel parameters */
- if (instance->irq == 255)
- instance->irq = NO_IRQ;
+ /* Compatibility with documented NCR5380 kernel parameters */
+ if (instance->irq == 255)
+ instance->irq = NO_IRQ;
- if (instance->irq != NO_IRQ) {
+ if (instance->irq != NO_IRQ) {
#ifndef SCSI_G_NCR5380_MEM
- /* set IRQ for HP C2502 */
- if (overrides[current_override].board == BOARD_HP_C2502)
- magic_configure(port_idx, instance->irq, magic);
+ /* set IRQ for HP C2502 */
+ if (board == BOARD_HP_C2502)
+ magic_configure(port_idx, instance->irq, magic);
#endif
- if (request_irq(instance->irq, generic_NCR5380_intr,
- 0, "NCR5380", instance)) {
- printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
- instance->irq = NO_IRQ;
- }
- }
-
- if (instance->irq == NO_IRQ) {
- printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
- printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
+ if (request_irq(instance->irq, generic_NCR5380_intr,
+ 0, "NCR5380", instance)) {
+ printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
+ instance->irq = NO_IRQ;
}
+ }
- ++current_override;
- ++count;
+ if (instance->irq == NO_IRQ) {
+ printk(KERN_INFO "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
+ printk(KERN_INFO "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
}
- return count;
+ ret = scsi_add_host(instance, pdev);
+ if (ret)
+ goto out_free_irq;
+ scsi_scan_host(instance);
+ dev_set_drvdata(pdev, instance);
+ return 0;
+
+out_free_irq:
+ if (instance->irq != NO_IRQ)
+ free_irq(instance->irq, instance);
+ NCR5380_exit(instance);
out_unregister:
- scsi_unregister(instance);
+ scsi_host_put(instance);
out_release:
#ifndef SCSI_G_NCR5380_MEM
- release_region(overrides[current_override].NCR5380_map_name, region_size);
+ release_region(base, region_size);
#else
iounmap(iomem);
release_mem_region(base, iomem_size);
#endif
- return count;
+ return ret;
}
-/**
- * generic_NCR5380_release_resources - free resources
- * @instance: host adapter to clean up
- *
- * Free the generic interface resources from this adapter.
- *
- * Locks: none
- */
-
-static int generic_NCR5380_release_resources(struct Scsi_Host *instance)
+static void generic_NCR5380_release_resources(struct Scsi_Host *instance)
{
+ scsi_remove_host(instance);
if (instance->irq != NO_IRQ)
free_irq(instance->irq, instance);
NCR5380_exit(instance);
@@ -511,7 +329,7 @@ static int generic_NCR5380_release_resources(struct Scsi_Host *instance)
release_mem_region(instance->base, hostdata->iomem_size);
}
#endif
- return 0;
+ scsi_host_put(instance);
}
/**
@@ -701,10 +519,9 @@ static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance,
#include "NCR5380.c"
static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
.proc_name = DRV_MODULE_NAME,
.name = "Generic NCR5380/NCR53C400 SCSI",
- .detect = generic_NCR5380_detect,
- .release = generic_NCR5380_release_resources,
.info = generic_NCR5380_info,
.queuecommand = generic_NCR5380_queue_command,
.eh_abort_handler = generic_NCR5380_abort,
@@ -718,31 +535,115 @@ static struct scsi_host_template driver_template = {
.max_sectors = 128,
};
-#include "scsi_module.c"
-module_param(ncr_irq, int, 0);
-module_param(ncr_dma, int, 0);
-module_param(ncr_addr, int, 0);
-module_param(ncr_5380, int, 0);
-module_param(ncr_53c400, int, 0);
-module_param(ncr_53c400a, int, 0);
-module_param(dtc_3181e, int, 0);
-module_param(hp_c2502, int, 0);
-MODULE_LICENSE("GPL");
+static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev)
+{
+ int ret = generic_NCR5380_init_one(&driver_template, pdev, base[ndev],
+ irq[ndev], card[ndev]);
+ if (ret) {
+ if (base[ndev])
+ printk(KERN_WARNING "Card not found at address 0x%03x\n",
+ base[ndev]);
+ return 0;
+ }
-#if !defined(SCSI_G_NCR5380_MEM) && defined(MODULE)
-static struct isapnp_device_id id_table[] = {
- {
- ISAPNP_ANY_ID, ISAPNP_ANY_ID,
- ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e),
- 0},
- {0}
+ return 1;
+}
+
+static int generic_NCR5380_isa_remove(struct device *pdev,
+ unsigned int ndev)
+{
+ generic_NCR5380_release_resources(dev_get_drvdata(pdev));
+ dev_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct isa_driver generic_NCR5380_isa_driver = {
+ .match = generic_NCR5380_isa_match,
+ .remove = generic_NCR5380_isa_remove,
+ .driver = {
+ .name = DRV_MODULE_NAME
+ },
+};
+
+#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+static struct pnp_device_id generic_NCR5380_pnp_ids[] = {
+ { .id = "DTC436e", .driver_data = BOARD_DTC3181E },
+ { .id = "" }
+};
+MODULE_DEVICE_TABLE(pnp, generic_NCR5380_pnp_ids);
+
+static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev,
+ const struct pnp_device_id *id)
+{
+ int base, irq;
+
+ if (pnp_activate_dev(pdev) < 0)
+ return -EBUSY;
+
+ base = pnp_port_start(pdev, 0);
+ irq = pnp_irq(pdev, 0);
+
+ return generic_NCR5380_init_one(&driver_template, &pdev->dev, base, irq,
+ id->driver_data);
+}
+
+static void generic_NCR5380_pnp_remove(struct pnp_dev *pdev)
+{
+ generic_NCR5380_release_resources(pnp_get_drvdata(pdev));
+ pnp_set_drvdata(pdev, NULL);
+}
+
+static struct pnp_driver generic_NCR5380_pnp_driver = {
+ .name = DRV_MODULE_NAME,
+ .id_table = generic_NCR5380_pnp_ids,
+ .probe = generic_NCR5380_pnp_probe,
+ .remove = generic_NCR5380_pnp_remove,
};
+#endif /* !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP) */
+
+static int pnp_registered, isa_registered;
+
+static int __init generic_NCR5380_init(void)
+{
+ int ret = 0;
+
+ /* compatibility with old-style parameters */
+ if (irq[0] == 0 && base[0] == 0 && card[0] == -1) {
+ irq[0] = ncr_irq;
+ base[0] = ncr_addr;
+ if (ncr_5380)
+ card[0] = BOARD_NCR5380;
+ if (ncr_53c400)
+ card[0] = BOARD_NCR53C400;
+ if (ncr_53c400a)
+ card[0] = BOARD_NCR53C400A;
+ if (dtc_3181e)
+ card[0] = BOARD_DTC3181E;
+ if (hp_c2502)
+ card[0] = BOARD_HP_C2502;
+ }
-MODULE_DEVICE_TABLE(isapnp, id_table);
+#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+ if (!pnp_register_driver(&generic_NCR5380_pnp_driver))
+ pnp_registered = 1;
#endif
+ ret = isa_register_driver(&generic_NCR5380_isa_driver, MAX_CARDS);
+ if (!ret)
+ isa_registered = 1;
+
+ return (pnp_registered || isa_registered) ? 0 : ret;
+}
+
+static void __exit generic_NCR5380_exit(void)
+{
+#if !defined(SCSI_G_NCR5380_MEM) && defined(CONFIG_PNP)
+ if (pnp_registered)
+ pnp_unregister_driver(&generic_NCR5380_pnp_driver);
+#endif
+ if (isa_registered)
+ isa_unregister_driver(&generic_NCR5380_isa_driver);
+}
-__setup("ncr5380=", do_NCR5380_setup);
-__setup("ncr53c400=", do_NCR53C400_setup);
-__setup("ncr53c400a=", do_NCR53C400A_setup);
-__setup("dtc3181e=", do_DTC3181E_setup);
+module_init(generic_NCR5380_init);
+module_exit(generic_NCR5380_exit);
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 595177428d76..b175b9234458 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -14,15 +14,9 @@
#ifndef GENERIC_NCR5380_H
#define GENERIC_NCR5380_H
-#define __STRVAL(x) #x
-#define STRVAL(x) __STRVAL(x)
-
#ifndef SCSI_G_NCR5380_MEM
#define DRV_MODULE_NAME "g_NCR5380"
-#define NCR5380_map_type int
-#define NCR5380_map_name port
-
#define NCR5380_read(reg) \
inb(instance->io_port + (reg))
#define NCR5380_write(reg, value) \
@@ -38,8 +32,6 @@
/* therefore SCSI_G_NCR5380_MEM */
#define DRV_MODULE_NAME "g_NCR5380_mmio"
-#define NCR5380_map_type unsigned long
-#define NCR5380_map_name base
#define NCR53C400_mem_base 0x3880
#define NCR53C400_host_buffer 0x3900
#define NCR53C400_region_size 0x3a00
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index a8762a3efeef..532474109624 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2586,7 +2586,6 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
u32 fd_ioasc;
- char *envp[] = { "ASYNC_ERR_LOG=1", NULL };
if (ioa_cfg->sis64)
fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
@@ -2607,8 +2606,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
}
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
+ schedule_work(&ioa_cfg->work_q);
hostrcb = ipr_get_free_hostrcb(ioa_cfg);
- kobject_uevent_env(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE, envp);
ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c051694bfcb0..f9b6fba689ff 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -791,9 +791,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
free_task:
/* regular RX path uses back_lock */
- spin_lock_bh(&session->back_lock);
+ spin_lock(&session->back_lock);
__iscsi_put_task(task);
- spin_unlock_bh(&session->back_lock);
+ spin_unlock(&session->back_lock);
return NULL;
}
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 54d446c9f56e..b8d3b97b217a 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -36,9 +36,9 @@ struct scsi_dh_blist {
};
static const struct scsi_dh_blist scsi_dh_blist[] = {
- {"DGC", "RAID", "clariion" },
- {"DGC", "DISK", "clariion" },
- {"DGC", "VRAID", "clariion" },
+ {"DGC", "RAID", "emc" },
+ {"DGC", "DISK", "emc" },
+ {"DGC", "VRAID", "emc" },
{"COMPAQ", "MSA1000 VOLUME", "hp_sw" },
{"COMPAQ", "HSV110", "hp_sw" },
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 212e98d940bc..6f7128f49c30 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1307,7 +1307,6 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
enum scsi_scan_mode rescan)
{
- char devname[64];
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
unsigned int length;
u64 lun;
@@ -1349,9 +1348,6 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
}
}
- sprintf(devname, "host %d channel %d id %d",
- shost->host_no, sdev->channel, sdev->id);
-
/*
* Allocate enough to hold the header (the same size as one scsi_lun)
* plus the number of luns we are requesting. 511 was the default
@@ -1470,12 +1466,12 @@ retry:
out_err:
kfree(lun_data);
out:
- scsi_device_put(sdev);
if (scsi_device_created(sdev))
/*
* the sdev we used didn't appear in the report luns scan
*/
__scsi_remove_device(sdev);
+ scsi_device_put(sdev);
return ret;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7af5226aa55b..618422ea3a41 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4922,9 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
res = get_user_pages_unlocked(
uaddr,
nr_pages,
- rw == READ,
- 0, /* don't force */
- pages);
+ pages,
+ rw == READ ? FOLL_WRITE : 0); /* don't force */
/* Errors and no page mapped should return here */
if (res < nr_pages)
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 47966909286d..e27b4d4e6ae2 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -63,7 +63,7 @@ config SCSI_UFSHCD_PCI
config SCSI_UFS_DWC_TC_PCI
tristate "DesignWare pci support using a G210 Test Chip"
- depends on SCSI_UFSHCD && PCI
+ depends on SCSI_UFSHCD_PCI
---help---
Synopsys Test Chip is a PHY for prototyping purposes.
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index ee4ab85e2801..22f881e9253a 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -25,6 +25,7 @@
#define UFS_VENDOR_TOSHIBA 0x198
#define UFS_VENDOR_SAMSUNG 0x1CE
+#define UFS_VENDOR_SKHYNIX 0x1AD
/**
* ufs_device_info - ufs device details
@@ -145,6 +146,7 @@ static struct ufs_dev_fix ufs_fixups[] = {
UFS_DEVICE_QUIRK_PA_TACTIVATE),
UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
END_FIX
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 37f3c51e9d92..05c745663c10 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1266,9 +1266,12 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
0, query->request.query_func, 0, 0);
- /* Data segment length */
- ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
- 0, 0, len >> 8, (u8)len);
+ /* Data segment length only need for WRITE_DESC */
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ ucd_req_ptr->header.dword_2 =
+ UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
+ else
+ ucd_req_ptr->header.dword_2 = 0;
/* Copy the Query Request buffer as is */
memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
@@ -6500,6 +6503,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
if (IS_ERR(hba->devfreq)) {
dev_err(hba->dev, "Unable to register with devfreq %ld\n",
PTR_ERR(hba->devfreq));
+ err = PTR_ERR(hba->devfreq);
goto out_remove_scsi_host;
}
/* Suspend devfreq until the UFS device is detected */
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index fe42a2fdf351..e6e90e80519a 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -1,6 +1,7 @@
menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/bcm/Kconfig"
+source "drivers/soc/fsl/qbman/Kconfig"
source "drivers/soc/fsl/qe/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 203307fd92c1..75e1f5334821 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -2,5 +2,6 @@
# Makefile for the Linux Kernel SOC fsl specific device drivers
#
+obj-$(CONFIG_FSL_DPAA) += qbman/
obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig
new file mode 100644
index 000000000000..757033c0586c
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Kconfig
@@ -0,0 +1,67 @@
+menuconfig FSL_DPAA
+ bool "Freescale DPAA 1.x support"
+ depends on FSL_SOC_BOOKE
+ select GENERIC_ALLOCATOR
+ help
+ The Freescale Data Path Acceleration Architecture (DPAA) is a set of
+ hardware components on specific QorIQ multicore processors.
+ This architecture provides the infrastructure to support simplified
+ sharing of networking interfaces and accelerators by multiple CPUs.
+ The major h/w blocks composing DPAA are BMan and QMan.
+
+ The Buffer Manager (BMan) is a hardware buffer pool management block
+ that allows software and accelerators on the datapath to acquire and
+ release buffers in order to build frames.
+
+ The Queue Manager (QMan) is a hardware queue management block
+ that allows software and accelerators on the datapath to enqueue and
+ dequeue frames in order to communicate.
+
+if FSL_DPAA
+
+config FSL_DPAA_CHECKING
+ bool "Additional driver checking"
+ help
+ Compiles in additional checks, to sanity-check the drivers and
+ any use of the exported API. Not recommended for performance.
+
+config FSL_BMAN_TEST
+ tristate "BMan self-tests"
+ help
+ Compile the BMan self-test code. These tests will
+ exercise the BMan APIs to confirm functionality
+ of both the software drivers and hardware device.
+
+config FSL_BMAN_TEST_API
+ bool "High-level API self-test"
+ depends on FSL_BMAN_TEST
+ default y
+ help
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine
+ to the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST
+ tristate "QMan self-tests"
+ help
+ Compile self-test code for QMan.
+
+config FSL_QMAN_TEST_API
+ bool "QMan high-level self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ help
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine to
+ the cpu(s) the test executes on).
+
+config FSL_QMAN_TEST_STASH
+ bool "QMan 'hot potato' data-stashing self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ help
+ This performs a "hot potato" style test enqueuing/dequeuing a frame
+ across a series of FQs scheduled to different portals (and cpus), with
+ DQRR, data and context stashing always on.
+
+endif # FSL_DPAA
diff --git a/drivers/soc/fsl/qbman/Makefile b/drivers/soc/fsl/qbman/Makefile
new file mode 100644
index 000000000000..7ae199f1664e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/Makefile
@@ -0,0 +1,12 @@
+obj-$(CONFIG_FSL_DPAA) += bman_ccsr.o qman_ccsr.o \
+ bman_portal.o qman_portal.o \
+ bman.o qman.o
+
+obj-$(CONFIG_FSL_BMAN_TEST) += bman-test.o
+bman-test-y = bman_test.o
+bman-test-$(CONFIG_FSL_BMAN_TEST_API) += bman_test_api.o
+
+obj-$(CONFIG_FSL_QMAN_TEST) += qman-test.o
+qman-test-y = qman_test.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_API) += qman_test_api.o
+qman-test-$(CONFIG_FSL_QMAN_TEST_STASH) += qman_test_stash.o
diff --git a/drivers/soc/fsl/qbman/bman.c b/drivers/soc/fsl/qbman/bman.c
new file mode 100644
index 000000000000..ffa48fdbb1a9
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman.c
@@ -0,0 +1,797 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x0000
+#define BM_REG_RCR_CI_CINH 0x0004
+#define BM_REG_RCR_ITR 0x0008
+#define BM_REG_CFG 0x0100
+#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
+#define BM_REG_ISR 0x0e00
+#define BM_REG_IER 0x0e04
+#define BM_REG_ISDR 0x0e08
+#define BM_REG_IIR 0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+/* Release Command */
+struct bm_rcr_entry {
+ union {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
+ u8 __reserved1[62];
+ };
+ struct bm_buffer bufs[8];
+ };
+};
+#define BM_RCR_VERB_VBIT 0x80
+#define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
+#define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
+#define BM_RCR_VERB_CMD_BPID_MULTI 0x30
+#define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+struct bm_mc_command {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 bpid; /* used by acquire command */
+ u8 __reserved[62];
+};
+#define BM_MCC_VERB_VBIT 0x80
+#define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
+#define BM_MCC_VERB_CMD_ACQUIRE 0x10
+#define BM_MCC_VERB_CMD_QUERY 0x40
+#define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
+
+/* MC result, Acquire and Query Response */
+union bm_mc_result {
+ struct {
+ u8 verb;
+ u8 bpid;
+ u8 __reserved[62];
+ };
+ struct bm_buffer bufs[8];
+};
+#define BM_MCR_VERB_VBIT 0x80
+#define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
+#define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
+#define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
+#define BM_MCR_VERB_CMD_ERR_INVALID 0x60
+#define BM_MCR_VERB_CMD_ERR_ECC 0x70
+#define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
+#define BM_MCR_TIMEOUT 10000 /* us */
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ union bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 bm_in(struct bm_portal *p, u32 offset)
+{
+ return __raw_readl(p->addr.ci + offset);
+}
+
+static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
+{
+ __raw_writel(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
+{
+ return __raw_readl(p->addr.ce + offset);
+}
+
+struct bman_portal {
+ struct bm_portal p;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ /* probing time config params for cpu-affine portals */
+ const struct bm_portal_config *config;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+
+static inline struct bman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(bman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(bman_affine_portal);
+}
+
+/*
+ * This object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per BMan buffer pool, eg. if different users of the
+ * pool are operating via different portals.
+ */
+struct bman_pool {
+ /* index of the buffer pool to encapsulate (0-63) */
+ u32 bpid;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+};
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct bman_portal *p = ptr;
+ struct bm_portal *portal = &p->p;
+ u32 clear = p->irq_sources;
+ u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ clear |= poll_portal_slow(p, is);
+ bm_out(portal, BM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+/* --- RCR API --- */
+
+#define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry))
+#define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~RCR_CARRY;
+
+ return (struct bm_rcr_entry *)addr;
+}
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int rcr_ptr2idx(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
+}
+#endif
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void rcr_inc(struct bm_rcr *rcr)
+{
+ /* increment to the next RCR pointer and handle overflow and 'vbit' */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+
+ rcr->cursor = rcr_carryclear(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static int bm_rcr_get_avail(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return rcr->available;
+}
+
+static int bm_rcr_get_fill(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ rcr->ithresh = ithresh;
+ bm_out(portal, BM_REG_RCR_ITR, ithresh);
+}
+
+static void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
+}
+
+static u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+
+ DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
+ rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
+ diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+
+ DPAA_ASSERT(!rcr->busy);
+ if (!rcr->available)
+ return NULL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 1;
+#endif
+ dpaa_zero(rcr->cursor);
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+
+ DPAA_ASSERT(rcr->busy);
+ DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
+ DPAA_ASSERT(rcr->available >= 1);
+ dma_wmb();
+ rcursor = rcr->cursor;
+ rcursor->_ncw_verb = myverb | rcr->vbit;
+ dpaa_flush(rcursor);
+ rcr_inc(rcr);
+ rcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ enum bm_rcr_cmode cmode)
+{
+ struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.ce + BM_CL_RCR;
+ rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
+ BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
+ | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(portal, BM_REG_CFG, cfg);
+ return 0;
+}
+
+static void bm_rcr_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_rcr *rcr = &portal->rcr;
+ int i;
+
+ DPAA_ASSERT(!rcr->busy);
+
+ i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr_ptr2idx(rcr->cursor))
+ pr_crit("losing uncommited RCR entries\n");
+
+ i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ if (i != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != rcr_ptr2idx(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+#endif
+}
+
+/* --- Management command API --- */
+static int bm_mc_init(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + BM_CL_CR;
+ mc->rr = portal->addr.ce + BM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
+ 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static void bm_mc_finish(struct bm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_hw;
+#endif
+}
+
+static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ struct bm_mc *mc = &portal->mc;
+ union bm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+static inline int bm_mc_result_timeout(struct bm_portal *portal,
+ union bm_mc_result **mcr)
+{
+ int timeout = BM_MCR_TIMEOUT;
+
+ do {
+ *mcr = bm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+/* Disable all BSCN interrupts for the portal */
+static void bm_isr_bscn_disable(struct bm_portal *portal)
+{
+ bm_out(portal, BM_REG_SCN(0), 0);
+ bm_out(portal, BM_REG_SCN(1), 0);
+}
+
+static int bman_create_portal(struct bman_portal *portal,
+ const struct bm_portal_config *c)
+{
+ struct bm_portal *p;
+ int ret;
+
+ p = &portal->p;
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference...
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
+ dev_err(c->dev, "RCR initialisation failed\n");
+ goto fail_rcr;
+ }
+ if (bm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /*
+ * Default to all BPIDs disabled, we enable as required at
+ * run-time.
+ */
+ bm_isr_bscn_disable(p);
+
+ /* Write-to-clear any stale interrupt status bits */
+ bm_out(p, BM_REG_ISDR, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_out(p, BM_REG_IER, 0);
+ bm_out(p, BM_REG_ISR, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+ if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+ irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+ dev_err(c->dev, "irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = c;
+
+ bm_out(p, BM_REG_ISDR, 0);
+ bm_out(p, BM_REG_IIR, 0);
+
+ return 0;
+
+fail_rcr_empty:
+fail_affinity:
+ free_irq(c->irq, portal);
+fail_irq:
+ bm_mc_finish(p);
+fail_mc:
+ bm_rcr_finish(p);
+fail_rcr:
+ return -EIO;
+}
+
+struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
+{
+ struct bman_portal *portal;
+ int err;
+
+ portal = &per_cpu(bman_affine_portal, c->cpu);
+ err = bman_create_portal(portal, c);
+ if (err)
+ return NULL;
+
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+
+ return portal;
+}
+
+static u32 poll_portal_slow(struct bman_portal *p, u32 is)
+{
+ u32 ret = is;
+
+ if (is & BM_PIRQ_RCRI) {
+ bm_rcr_cce_update(&p->p);
+ bm_rcr_set_ithresh(&p->p, 0);
+ bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
+ is &= ~BM_PIRQ_RCRI;
+ }
+
+ /* There should be no status register bits left undefined */
+ DPAA_ASSERT(!is);
+ return ret;
+}
+
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+ bm_out(&p->p, BM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+ return 0;
+}
+
+static int bm_shutdown_pool(u32 bpid)
+{
+ struct bm_mc_command *bm_cmd;
+ union bm_mc_result *bm_res;
+
+ while (1) {
+ struct bman_portal *p = get_affine_portal();
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(&p->p);
+ bm_cmd->bpid = bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ if (!bm_mc_result_timeout(&p->p, &bm_res)) {
+ put_affine_portal();
+ pr_crit("BMan Acquire Command timedout\n");
+ return -ETIMEDOUT;
+ }
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ put_affine_portal();
+ /* Pool is empty */
+ return 0;
+ }
+ put_affine_portal();
+ }
+
+ return 0;
+}
+
+struct gen_pool *bm_bpalloc;
+
+static int bm_alloc_bpid_range(u32 *result, u32 count)
+{
+ unsigned long addr;
+
+ addr = gen_pool_alloc(bm_bpalloc, count);
+ if (!addr)
+ return -ENOMEM;
+
+ *result = addr & ~DPAA_GENALLOC_OFF;
+
+ return 0;
+}
+
+static int bm_release_bpid(u32 bpid)
+{
+ int ret;
+
+ ret = bm_shutdown_pool(bpid);
+ if (ret) {
+ pr_debug("BPID %d leaked\n", bpid);
+ return ret;
+ }
+
+ gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+
+struct bman_pool *bman_new_pool(void)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (bm_alloc_bpid_range(&bpid, 1))
+ return NULL;
+
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+
+ pool->bpid = bpid;
+
+ return pool;
+err:
+ bm_release_bpid(bpid);
+ kfree(pool);
+ return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+ bm_release_bpid(pool->bpid);
+
+ kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+int bman_get_bpid(const struct bman_pool *pool)
+{
+ return pool->bpid;
+}
+EXPORT_SYMBOL(bman_get_bpid);
+
+static void update_rcr_ci(struct bman_portal *p, int avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ unsigned long irqflags;
+ int avail, timeout = 1000; /* 1ms */
+ int i = num - 1;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ do {
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ avail = bm_rcr_get_avail(&p->p);
+ if (avail < 2)
+ update_rcr_ci(p, avail);
+ r = bm_rcr_start(&p->p);
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ if (likely(r))
+ break;
+
+ udelay(1);
+ } while (--timeout);
+
+ if (unlikely(!timeout))
+ return -ETIMEDOUT;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+ /*
+ * we can copy all but the first entry, as this can trigger badness
+ * with the valid-bit
+ */
+ bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
+ bm_buffer_set_bpid(r->bufs, pool->bpid);
+ if (i)
+ memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
+
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(bman_release);
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ union bm_mc_result *mcr;
+ int ret;
+
+ DPAA_ASSERT(num > 0 && num <= 8);
+
+ mcc = bm_mc_start(&p->p);
+ mcc->bpid = pool->bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ if (!bm_mc_result_timeout(&p->p, &mcr)) {
+ put_affine_portal();
+ pr_crit("BMan Acquire Timeout\n");
+ return -ETIMEDOUT;
+ }
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs)
+ memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
+
+ put_affine_portal();
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal)
+{
+ return portal->config;
+}
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
new file mode 100644
index 000000000000..9deb0524543f
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -0,0 +1,263 @@
+/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+
+/* Register offsets */
+#define REG_FBPR_FPC 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FBPR_BARE 0x0c00
+#define REG_FBPR_BAR 0x0c04
+#define REG_FBPR_AR 0x0c10
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_ERR_ISDR 0x0e08
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
+#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
+#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
+#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
+
+struct bman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+ { BM_EIRQ_IVCI, "Invalid Command Verb" },
+ { BM_EIRQ_FLWI, "FBPR Low Watermark" },
+ { BM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { BM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { BM_EIRQ_BSCN, "Pool State Change Notification" },
+};
+
+/* Only trigger low water mark interrupt once only */
+#define BMAN_ERRS_TO_DISABLE BM_EIRQ_FLWI
+
+/* Pointer to the start of the BMan's CCSR space */
+static u32 __iomem *bm_ccsr_start;
+
+static inline u32 bm_ccsr_in(u32 offset)
+{
+ return ioread32be(bm_ccsr_start + offset/4);
+}
+static inline void bm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, bm_ccsr_start + offset/4);
+}
+
+static void bm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = bm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+/* signal transactions for FBPRs with higher priority */
+#define FBPR_AR_RPRIO_HI BIT(30)
+
+static void bm_set_memory(u64 ba, u32 size)
+{
+ u32 exp = ilog2(size);
+ /* choke if size isn't within range */
+ DPAA_ASSERT(size >= 4096 && size <= 1024*1024*1024 &&
+ is_power_of_2(size));
+ /* choke if '[e]ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+ bm_ccsr_out(REG_FBPR_BARE, upper_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_BAR, lower_32_bits(ba));
+ bm_ccsr_out(REG_FBPR_AR, exp - 1);
+}
+
+/*
+ * Location and size of BMan private memory
+ *
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved). Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+ fbpr_a = rmem->base;
+ fbpr_sz = rmem->size;
+
+ WARN_ON(!(fbpr_a && fbpr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = bm_ccsr_in(REG_ERR_IER);
+ isr_val = bm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = bm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(bman_hwerr_txts); i++) {
+ if (bman_hwerr_txts[i].mask & isr_mask) {
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ bman_hwerr_txts[i].txt);
+ if (bman_hwerr_txts[i].mask & ecsr_val) {
+ /* Re-arm error capture registers */
+ bm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ bman_hwerr_txts[i].mask);
+ ier_val &= ~bman_hwerr_txts[i].mask;
+ bm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ bm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int fsl_bman_probe(struct platform_device *pdev)
+{
+ int ret, err_irq;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ u16 id, bm_pool_cnt;
+ u8 major, minor;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+ bm_ccsr_start = devm_ioremap(dev, res->start,
+ res->end - res->start + 1);
+ if (!bm_ccsr_start)
+ return -ENXIO;
+
+ bm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ bman_ip_rev = BMAN_REV10;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else if (major == 2 && minor == 0) {
+ bman_ip_rev = BMAN_REV20;
+ bm_pool_cnt = 8;
+ } else if (major == 2 && minor == 1) {
+ bman_ip_rev = BMAN_REV21;
+ bm_pool_cnt = BM_POOL_MAX;
+ } else {
+ dev_err(dev, "Unknown Bman version:%04x,%02x,%02x\n",
+ id, major, minor);
+ return -ENODEV;
+ }
+
+ bm_set_memory(fbpr_a, fbpr_sz);
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %s IRQ\n", node->full_name);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, bman_isr, IRQF_SHARED, "bman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+ ret, node->full_name);
+ return ret;
+ }
+ /* Disable Buffer Pool State Change */
+ bm_ccsr_out(REG_ERR_ISDR, BM_EIRQ_BSCN);
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ bm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ bm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ bm_bpalloc = devm_gen_pool_create(dev, 0, -1, "bman-bpalloc");
+ if (IS_ERR(bm_bpalloc)) {
+ ret = PTR_ERR(bm_bpalloc);
+ dev_err(dev, "bman-bpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* seed BMan resource pool */
+ ret = gen_pool_add(bm_bpalloc, DPAA_GENALLOC_OFF, bm_pool_cnt, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed BPID range [%d..%d] (%d)\n",
+ 0, bm_pool_cnt - 1, ret);
+ return ret;
+ }
+
+ return 0;
+};
+
+static const struct of_device_id fsl_bman_ids[] = {
+ {
+ .compatible = "fsl,bman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_bman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_bman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_bman_probe,
+};
+
+builtin_platform_driver(fsl_bman_driver);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
new file mode 100644
index 000000000000..6579cc18811a
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -0,0 +1,219 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+static struct bman_portal *affine_bportals[NR_CPUS];
+static struct cpumask portal_cpus;
+/* protect bman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(bman_lock);
+
+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
+{
+ struct bman_portal *p = bman_create_affine_portal(pcfg);
+
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ bman_p_irqsource_add(p, BM_PIRQ_RCRI);
+ affine_bportals[pcfg->cpu] = p;
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
+}
+
+static void bman_offline_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (!p)
+ return;
+
+ pcfg = bman_get_bm_portal_config(p);
+ if (!pcfg)
+ return;
+
+ irq_set_affinity(pcfg->irq, cpumask_of(0));
+}
+
+static void bman_online_cpu(unsigned int cpu)
+{
+ struct bman_portal *p = affine_bportals[cpu];
+ const struct bm_portal_config *pcfg;
+
+ if (!p)
+ return;
+
+ pcfg = bman_get_bm_portal_config(p);
+ if (!pcfg)
+ return;
+
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+}
+
+static int bman_hotplug_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ bman_online_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ bman_offline_cpu(cpu);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bman_hotplug_cpu_notifier = {
+ .notifier_call = bman_hotplug_cpu_callback,
+};
+
+static int bman_portal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct bm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ void __iomem *va;
+ int irq, cpu;
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg)
+ return -ENOMEM;
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %s property 'reg::CE'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %s property 'reg::CI'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ pcfg->cpu = -1;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Can't get %s IRQ'\n", node->full_name);
+ return -ENXIO;
+ }
+ pcfg->irq = irq;
+
+ va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+ if (!va)
+ goto err_ioremap1;
+
+ pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+ va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+ _PAGE_GUARDED | _PAGE_NO_CACHE);
+ if (!va)
+ goto err_ioremap2;
+
+ pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+ spin_lock(&bman_lock);
+ cpu = cpumask_next_zero(-1, &portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ /* unassigned portal, skip init */
+ spin_unlock(&bman_lock);
+ return 0;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&bman_lock);
+ pcfg->cpu = cpu;
+
+ if (!init_pcfg(pcfg))
+ goto err_ioremap2;
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ bman_offline_cpu(cpu);
+
+ return 0;
+
+err_ioremap2:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+ dev_err(dev, "ioremap failed\n");
+ return -ENXIO;
+}
+
+static const struct of_device_id bman_portal_ids[] = {
+ {
+ .compatible = "fsl,bman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bman_portal_ids);
+
+static struct platform_driver bman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = bman_portal_ids,
+ },
+ .probe = bman_portal_probe,
+};
+
+static int __init bman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
+
+ return 0;
+}
+
+module_driver(bman_portal_driver,
+ bman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/bman_priv.h b/drivers/soc/fsl/qbman/bman_priv.h
new file mode 100644
index 000000000000..f6896a2f6d90
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_priv.h
@@ -0,0 +1,80 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/bman.h>
+
+/* Portal processing (interrupt) sources */
+#define BM_PIRQ_RCRI 0x00000002 /* RCR Ring (below threshold) */
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise BMAN_REVx */
+
+extern struct gen_pool *bm_bpalloc;
+
+struct bm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ struct device *dev;
+ /* User-visible portal configuration settings */
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+};
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config);
+/*
+ * The below bman_p_***() variant might be called in a situation that the cpu
+ * which the portal affine to is not online yet.
+ * @bman_portal specifies which portal the API will use.
+ */
+int bman_p_irqsource_add(struct bman_portal *p, u32 bits);
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE BM_PIRQ_RCRI
+
+const struct bm_portal_config *
+bman_get_bm_portal_config(const struct bman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/bman_test.c b/drivers/soc/fsl/qbman/bman_test.c
new file mode 100644
index 000000000000..09b1c960b26a
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.c
@@ -0,0 +1,53 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("BMan testing");
+
+static int test_init(void)
+{
+#ifdef CONFIG_FSL_BMAN_TEST_API
+ int loop = 1;
+
+ while (loop--)
+ bman_test_api();
+#endif
+ return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/bman_test.h b/drivers/soc/fsl/qbman/bman_test.h
new file mode 100644
index 000000000000..037ed342add4
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test.h
@@ -0,0 +1,35 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+void bman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/bman_test_api.c b/drivers/soc/fsl/qbman/bman_test_api.c
new file mode 100644
index 000000000000..6f6bdd154fe3
--- /dev/null
+++ b/drivers/soc/fsl/qbman/bman_test_api.c
@@ -0,0 +1,151 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+#define NUM_BUFS 93
+#define LOOPS 3
+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
+
+static struct bman_pool *pool;
+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
+static int bufs_received;
+
+static void bufs_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_BUFS; i++)
+ bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
+ bufs_received = 0;
+}
+
+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
+{
+ if (bman_ip_rev == BMAN_REV20 || bman_ip_rev == BMAN_REV21) {
+
+ /*
+ * On SoCs with BMan revison 2.0, BMan only respects the 40
+ * LS-bits of buffer addresses, masking off the upper 8-bits on
+ * release commands. The API provides for 48-bit addresses
+ * because some SoCs support all 48-bits. When generating
+ * garbage addresses for testing, we either need to zero the
+ * upper 8-bits when releasing to BMan (otherwise we'll be
+ * disappointed when the buffers we acquire back from BMan
+ * don't match), or we need to mask the upper 8-bits off when
+ * comparing. We do the latter.
+ */
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) <
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return -1;
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK) >
+ (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return 1;
+ } else {
+ if (bm_buffer_get64(a) < bm_buffer_get64(b))
+ return -1;
+ if (bm_buffer_get64(a) > bm_buffer_get64(b))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bufs_confirm(void)
+{
+ int i, j;
+
+ for (i = 0; i < NUM_BUFS; i++) {
+ int matches = 0;
+
+ for (j = 0; j < NUM_BUFS; j++)
+ if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
+ matches++;
+ WARN_ON(matches != 1);
+ }
+}
+
+/* test */
+void bman_test_api(void)
+{
+ int i, loops = LOOPS;
+
+ bufs_init();
+
+ pr_info("%s(): Starting\n", __func__);
+
+ pool = bman_new_pool();
+ if (!pool) {
+ pr_crit("bman_new_pool() failed\n");
+ goto failed;
+ }
+
+ /* Release buffers */
+do_loop:
+ i = 0;
+ while (i < NUM_BUFS) {
+ int num = 8;
+
+ if (i + num > NUM_BUFS)
+ num = NUM_BUFS - i;
+ if (bman_release(pool, bufs_in + i, num)) {
+ pr_crit("bman_release() failed\n");
+ goto failed;
+ }
+ i += num;
+ }
+
+ /* Acquire buffers */
+ while (i > 0) {
+ int tmp, num = 8;
+
+ if (num > i)
+ num = i;
+ tmp = bman_acquire(pool, bufs_out + i - num, num);
+ WARN_ON(tmp != num);
+ i -= num;
+ }
+ i = bman_acquire(pool, NULL, 1);
+ WARN_ON(i > 0);
+
+ bufs_confirm();
+
+ if (--loops)
+ goto do_loop;
+
+ /* Clean up */
+ bman_free_pool(pool);
+ pr_info("%s(): Finished\n", __func__);
+ return;
+
+failed:
+ WARN_ON(1);
+}
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
new file mode 100644
index 000000000000..b63fd72295c6
--- /dev/null
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -0,0 +1,103 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DPAA_SYS_H
+#define __DPAA_SYS_H
+
+#include <linux/cpu.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/prefetch.h>
+#include <linux/genalloc.h>
+#include <asm/cacheflush.h>
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPAA_PORTAL_CE 0
+#define DPAA_PORTAL_CI 1
+
+#if (L1_CACHE_BYTES != 32) && (L1_CACHE_BYTES != 64)
+#error "Unsupported Cacheline Size"
+#endif
+
+static inline void dpaa_flush(void *p)
+{
+#ifdef CONFIG_PPC
+ flush_dcache_range((unsigned long)p, (unsigned long)p+64);
+#elif defined(CONFIG_ARM32)
+ __cpuc_flush_dcache_area(p, 64);
+#elif defined(CONFIG_ARM64)
+ __flush_dcache_area(p, 64);
+#endif
+}
+
+#define dpaa_invalidate(p) dpaa_flush(p)
+
+#define dpaa_zero(p) memset(p, 0, 64)
+
+static inline void dpaa_touch_ro(void *p)
+{
+#if (L1_CACHE_BYTES == 32)
+ prefetch(p+32);
+#endif
+ prefetch(p);
+}
+
+/* Commonly used combo */
+static inline void dpaa_invalidate_touch_ro(void *p)
+{
+ dpaa_invalidate(p);
+ dpaa_touch_ro(p);
+}
+
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+#define DPAA_ASSERT(x) WARN_ON(!(x))
+#else
+#define DPAA_ASSERT(x)
+#endif
+
+/* cyclic helper for rings */
+static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Offset applied to genalloc pools due to zero being an error return */
+#define DPAA_GENALLOC_OFF 0x80000000
+
+#endif /* __DPAA_SYS_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
new file mode 100644
index 000000000000..119054bc922b
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -0,0 +1,2881 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+#define QMAN_POLL_LIMIT 32
+#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_PIRQ_MR_ITHRESH 4
+#define QMAN_PIRQ_IPERIOD 100
+
+/* Portal register assists */
+
+/* Cache-inhibited register offsets */
+#define QM_REG_EQCR_PI_CINH 0x0000
+#define QM_REG_EQCR_CI_CINH 0x0004
+#define QM_REG_EQCR_ITR 0x0008
+#define QM_REG_DQRR_PI_CINH 0x0040
+#define QM_REG_DQRR_CI_CINH 0x0044
+#define QM_REG_DQRR_ITR 0x0048
+#define QM_REG_DQRR_DCAP 0x0050
+#define QM_REG_DQRR_SDQCR 0x0054
+#define QM_REG_DQRR_VDQCR 0x0058
+#define QM_REG_DQRR_PDQCR 0x005c
+#define QM_REG_MR_PI_CINH 0x0080
+#define QM_REG_MR_CI_CINH 0x0084
+#define QM_REG_MR_ITR 0x0088
+#define QM_REG_CFG 0x0100
+#define QM_REG_ISR 0x0e00
+#define QM_REG_IER 0x0e04
+#define QM_REG_ISDR 0x0e08
+#define QM_REG_IIR 0x0e0c
+#define QM_REG_ITPR 0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3100
+#define QM_CL_DQRR_PI_CENA 0x3200
+#define QM_CL_DQRR_CI_CENA 0x3300
+#define QM_CL_MR_PI_CENA 0x3400
+#define QM_CL_MR_CI_CENA 0x3500
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+/*
+ * BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses and data-dependencies. Use of barrier()s
+ * or other order-preserving primitives simply degrade performance. Hence the
+ * use of the __raw_*() interfaces, which simply ensure that the compiler treats
+ * the portal registers as volatile
+ */
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/*
+ * Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */
+};
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
+};
+
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+/* "Enqueue Command" */
+struct qm_eqcr_entry {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 dca;
+ u16 seqnum;
+ u32 orp; /* 24-bit */
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd;
+ u8 __reserved3[32];
+} __packed;
+#define QM_EQCR_VERB_VBIT 0x80
+#define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
+#define QM_EQCR_VERB_CMD_ENQUEUE 0x01
+#define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */
+#define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */
+#define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+ const struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+ union qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
+};
+
+/* MC (Management Command) command */
+/* "Query FQ" */
+struct qm_mcc_queryfq {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+/* "Alter FQ State Commands " */
+struct qm_mcc_alterfq {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2;
+ u8 count; /* number of consecutive FQID */
+ u8 __reserved3[10];
+ u32 context_b; /* frame queue context b */
+ u8 __reserved4[40];
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcc_querycgr {
+ u8 _ncw_verb;
+ u8 __reserved1[30];
+ u8 cgid;
+ u8 __reserved2[32];
+};
+
+struct qm_mcc_querywq {
+ u8 _ncw_verb;
+ u8 __reserved;
+ /* select channel if verb != QUERYWQ_DEDICATED */
+ u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+ u8 __reserved2[60];
+} __packed;
+
+#define QM_MCC_VERB_VBIT 0x80
+#define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_MCC_VERB_INITFQ_PARKED 0x40
+#define QM_MCC_VERB_INITFQ_SCHED 0x41
+#define QM_MCC_VERB_QUERYFQ 0x44
+#define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */
+#define QM_MCC_VERB_QUERYWQ 0x46
+#define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47
+#define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */
+#define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */
+#define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */
+#define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */
+#define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */
+#define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */
+#define QM_MCC_VERB_INITCGR 0x50
+#define QM_MCC_VERB_MODIFYCGR 0x51
+#define QM_MCC_VERB_CGRTESTWRITE 0x52
+#define QM_MCC_VERB_QUERYCGR 0x58
+#define QM_MCC_VERB_QUERYCONGESTION 0x59
+union qm_mc_command {
+ struct {
+ u8 _ncw_verb; /* writes to this are non-coherent */
+ u8 __reserved[63];
+ };
+ struct qm_mcc_initfq initfq;
+ struct qm_mcc_queryfq queryfq;
+ struct qm_mcc_alterfq alterfq;
+ struct qm_mcc_initcgr initcgr;
+ struct qm_mcc_querycgr querycgr;
+ struct qm_mcc_querywq querywq;
+ struct qm_mcc_queryfq_np queryfq_np;
+};
+
+/* MC (Management Command) result */
+/* "Query FQ" */
+struct qm_mcr_queryfq {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[8];
+ struct qm_fqd fqd; /* the FQD fields are here */
+ u8 __reserved2[30];
+} __packed;
+
+/* "Alter FQ State Commands" */
+struct qm_mcr_alterfq {
+ u8 verb;
+ u8 result;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[61];
+};
+#define QM_MCR_VERB_RRID 0x80
+#define QM_MCR_VERB_MASK QM_MCC_VERB_MASK
+#define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED
+#define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED
+#define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ
+#define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP
+#define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ
+#define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED
+#define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED
+#define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE
+#define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE
+#define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS
+#define QM_MCR_RESULT_NULL 0x00
+#define QM_MCR_RESULT_OK 0xf0
+#define QM_MCR_RESULT_ERR_FQID 0xf1
+#define QM_MCR_RESULT_ERR_FQSTATE 0xf2
+#define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */
+#define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4
+#define QM_MCR_RESULT_PENDING 0xf8
+#define QM_MCR_RESULT_ERR_BADCOMMAND 0xff
+#define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+#define QM_MCR_TIMEOUT 10000 /* us */
+union qm_mc_result {
+ struct {
+ u8 verb;
+ u8 result;
+ u8 __reserved1[62];
+ };
+ struct qm_mcr_queryfq queryfq;
+ struct qm_mcr_alterfq alterfq;
+ struct qm_mcr_querycgr querycgr;
+ struct qm_mcr_querycongestion querycongestion;
+ struct qm_mcr_querywq querywq;
+ struct qm_mcr_queryfq_np queryfq_np;
+};
+
+struct qm_mc {
+ union qm_mc_command *cr;
+ union qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
+};
+
+struct qm_addr {
+ void __iomem *ce; /* cache-enabled */
+ void __iomem *ci; /* cache-inhibited */
+};
+
+struct qm_portal {
+ /*
+ * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
+ * and including 'mc' fits within a cacheline (yay!). The 'config' part
+ * is setup-only, so isn't a cause for a concern. In other words, don't
+ * rearrange this structure on a whim, there be dragons ...
+ */
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} ____cacheline_aligned;
+
+/* Cache-inhibited register access. */
+static inline u32 qm_in(struct qm_portal *p, u32 offset)
+{
+ return __raw_readl(p->addr.ci + offset);
+}
+
+static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
+{
+ __raw_writel(val, p->addr.ci + offset);
+}
+
+/* Cache Enabled Portal Access */
+static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
+{
+ dpaa_invalidate(p->addr.ce + offset);
+}
+
+static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
+{
+ dpaa_touch_ro(p->addr.ce + offset);
+}
+
+static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
+{
+ return __raw_readl(p->addr.ce + offset);
+}
+
+/* --- EQCR API --- */
+
+#define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry))
+#define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~EQCR_CARRY;
+
+ return (struct qm_eqcr_entry *)addr;
+}
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
+{
+ return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void eqcr_inc(struct qm_eqcr *eqcr)
+{
+ /* increment to the next EQCR pointer and handle overflow and 'vbit' */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+
+ eqcr->cursor = eqcr_carryclear(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
+{
+ return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (pi != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("losing uncommited EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available)
+ return NULL;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+ DPAA_ASSERT(!eqcr->busy);
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
+ (QM_EQCR_SIZE - 1);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 1;
+#endif
+ dpaa_zero(eqcr->cursor);
+ return eqcr->cursor;
+}
+
+static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
+{
+ DPAA_ASSERT(eqcr->busy);
+ DPAA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff));
+ DPAA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff));
+ DPAA_ASSERT(eqcr->available >= 1);
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+
+ eqcr_commit_checks(eqcr);
+ DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+ dma_wmb();
+ eqcursor = eqcr->cursor;
+ eqcursor->_ncw_verb = myverb | eqcr->vbit;
+ dpaa_flush(eqcursor);
+ eqcr_inc(eqcr);
+ eqcr->available--;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+ qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+
+ eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
+ diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ eqcr->ithresh = ithresh;
+ qm_out(portal, QM_REG_EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ struct qm_eqcr *eqcr = &portal->eqcr;
+
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+/* --- DQRR API --- */
+
+#define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry))
+#define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
+
+static const struct qm_dqrr_entry *dqrr_carryclear(
+ const struct qm_dqrr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~DQRR_CARRY;
+
+ return (const struct qm_dqrr_entry *)addr;
+}
+
+static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
+{
+ return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
+}
+
+static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
+{
+ return dqrr_carryclear(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(portal, QM_REG_DQRR_SDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_VDQCR, 0);
+ qm_out(portal, QM_REG_DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dpaa_invalidate(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+ 0xa0 | /* RE+SE */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(portal, QM_REG_CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
+ return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (dqrr->cmode != qm_dqrr_cdc &&
+ dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->fill);
+ dqrr->cursor = dqrr_inc(dqrr->cursor);
+ return --dqrr->fill;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_dqrr *dqrr = &portal->dqrr;
+ struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+
+ DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#ifndef CONFIG_FSL_PAMU
+ /*
+ * If PAMU is not available we need to invalidate the cache.
+ * When PAMU is available the cache is updated by stash
+ */
+ dpaa_invalidate_touch_ro(res);
+#endif
+ /*
+ * when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
+ }
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+ int idx = dqrr_ptr2idx(dq);
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPAA_ASSERT((dqrr->ring + idx) == dq);
+ DPAA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
+{
+ __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
+
+ DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+}
+
+/* --- MR API --- */
+
+#define MR_SHIFT ilog2(sizeof(union qm_mr_entry))
+#define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
+
+static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
+{
+ uintptr_t addr = (uintptr_t)p;
+
+ addr &= ~MR_CARRY;
+
+ return (union qm_mr_entry *)addr;
+}
+
+static inline int mr_ptr2idx(const union qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
+}
+
+static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
+{
+ return mr_carryclear(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.ce + QM_CL_MR;
+ mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
+ ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(portal, QM_REG_CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (mr->ci != mr_ptr2idx(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
+
+static inline int qm_mr_next(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->fill);
+ mr->cursor = mr_inc(mr->cursor);
+ return --mr->fill;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+ union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+
+ DPAA_ASSERT(mr->pmode == qm_mr_pvb);
+ /*
+ * when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads".
+ */
+ if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = mr_inc(res);
+ }
+ dpaa_invalidate_touch_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ struct qm_mr *mr = &portal->mr;
+
+ DPAA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = mr_ptr2idx(mr->cursor);
+ qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(portal, QM_REG_MR_ITR, ithresh);
+}
+
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.ce + QM_CL_CR;
+ mc->rr = portal->addr.ce + QM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
+ ? 0 : 1;
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+
+ DPAA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_user;
+#endif
+ dpaa_zero(mc->cr);
+ return mc->cr;
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_user);
+ dma_wmb();
+ mc->cr->_ncw_verb = myverb | mc->vbit;
+ dpaa_flush(mc->cr);
+ dpaa_invalidate_touch_ro(rr);
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ struct qm_mc *mc = &portal->mc;
+ union qm_mc_result *rr = mc->rr + mc->rridx;
+
+ DPAA_ASSERT(mc->state == qman_mc_hw);
+ /*
+ * The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering...
+ */
+ if (!__raw_readb(&rr->verb)) {
+ dpaa_invalidate_touch_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+static inline int qm_mc_result_timeout(struct qm_portal *portal,
+ union qm_mc_result **mcr)
+{
+ int timeout = QM_MCR_TIMEOUT;
+
+ do {
+ *mcr = qm_mc_result(portal);
+ if (*mcr)
+ break;
+ udelay(1);
+ } while (--timeout);
+
+ return timeout;
+}
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ set_bits(mask, &fq->flags);
+}
+
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+ clear_bits(mask, &fq->flags);
+}
+
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long bits;
+ /* interrupt sources processed by portal_isr(), configurable */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ /* only 1 volatile dequeue at a time */
+ struct qman_fq *vdqcr_owned;
+ u32 sdqcr;
+ /* probing time config params for cpu-affine portals */
+ const struct qm_portal_config *config;
+ /* needed for providing a non-NULL device to dma_map_***() */
+ struct platform_device *pdev;
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ struct work_struct congestion_work;
+ struct work_struct mr_work;
+ char irqname[MAX_IRQNAME];
+};
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+struct qman_portal *affine_portals[NR_CPUS];
+
+static inline struct qman_portal *get_affine_portal(void)
+{
+ return &get_cpu_var(qman_affine_portal);
+}
+
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(qman_affine_portal);
+}
+
+static struct workqueue_struct *qm_portal_wq;
+
+int qman_wq_alloc(void)
+{
+ qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
+ if (!qm_portal_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static struct qman_fq **fq_table;
+static u32 num_fqids;
+
+int qman_alloc_fq_table(u32 _num_fqids)
+{
+ num_fqids = _num_fqids;
+
+ fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
+ if (!fq_table)
+ return -ENOMEM;
+
+ pr_debug("Allocated fq lookup table at %p, entry count %u\n",
+ fq_table, num_fqids * 2);
+ return 0;
+}
+
+static struct qman_fq *idx_to_fq(u32 idx)
+{
+ struct qman_fq *fq;
+
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (WARN_ON(idx >= num_fqids * 2))
+ return NULL;
+#endif
+ fq = fq_table[idx];
+ DPAA_ASSERT(!fq || idx == fq->idx);
+
+ return fq;
+}
+
+/*
+ * Only returns full-service fq objects, not enqueue-only
+ * references (QMAN_FQ_FLAG_NO_MODIFY).
+ */
+static struct qman_fq *fqid_to_fq(u32 fqid)
+{
+ return idx_to_fq(fqid * 2);
+}
+
+static struct qman_fq *tag_to_fq(u32 tag)
+{
+#if BITS_PER_LONG == 64
+ return idx_to_fq(tag);
+#else
+ return (struct qman_fq *)tag;
+#endif
+}
+
+static u32 fq_to_tag(struct qman_fq *fq)
+{
+#if BITS_PER_LONG == 64
+ return fq->idx;
+#else
+ return (u32)fq;
+#endif
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit);
+static void qm_congestion_task(struct work_struct *work);
+static void qm_mr_process_task(struct work_struct *work);
+
+static irqreturn_t portal_isr(int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+
+ u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
+ u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+
+ if (unlikely(!is))
+ return IRQ_NONE;
+
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, QMAN_POLL_LIMIT);
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is);
+ qm_out(&p->p, QM_REG_ISR, clear);
+ return IRQ_HANDLED;
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const union qm_mr_entry *msg;
+loop:
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /*
+ * if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here.
+ */
+ u64 now, then = jiffies;
+
+ do {
+ now = jiffies;
+ } while ((then + 10000) > now);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("Found verb 0x%x in MR\n", msg->verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+static int qman_create_portal(struct qman_portal *portal,
+ const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *p;
+ char buf[16];
+ int ret;
+ u32 isdr;
+
+ p = &portal->p;
+
+#ifdef CONFIG_FSL_PAMU
+ /* PAMU is required for stashing */
+ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
+#else
+ portal->use_eqcr_ci_stashing = 0;
+#endif
+ /*
+ * prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference
+ */
+ p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
+ p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+ dev_err(c->dev, "EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ dev_err(c->dev, "DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
+ dev_err(c->dev, "MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(p)) {
+ dev_err(c->dev, "MC initialisation failed\n");
+ goto fail_mc;
+ }
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
+ qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
+ qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
+ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ INIT_WORK(&portal->congestion_work, qm_congestion_task);
+ INIT_WORK(&portal->mr_work, qm_mr_process_task);
+ portal->bits = 0;
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ sprintf(buf, "qportal-%d", c->channel);
+ portal->pdev = platform_device_alloc(buf, -1);
+ if (!portal->pdev)
+ goto fail_devalloc;
+ if (dma_set_mask(&portal->pdev->dev, DMA_BIT_MASK(40)))
+ goto fail_devadd;
+ ret = platform_device_add(portal->pdev);
+ if (ret)
+ goto fail_devadd;
+ isdr = 0xffffffff;
+ qm_out(p, QM_REG_ISDR, isdr);
+ portal->irq_sources = 0;
+ qm_out(p, QM_REG_IER, 0);
+ qm_out(p, QM_REG_ISR, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
+ if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
+ dev_err(c->dev, "request_irq() failed\n");
+ goto fail_irq;
+ }
+ if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
+ irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
+ dev_err(c->dev, "irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+
+ /* Need EQCR to be empty before continuing */
+ isdr &= ~QM_PIRQ_EQCI;
+ qm_out(p, QM_REG_ISDR, isdr);
+ ret = qm_eqcr_get_fill(p);
+ if (ret) {
+ dev_err(c->dev, "EQCR unclean\n");
+ goto fail_eqcr_empty;
+ }
+ isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_out(p, QM_REG_ISDR, isdr);
+ if (qm_dqrr_current(p)) {
+ dev_err(c->dev, "DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(p, 0xffff);
+ }
+ if (qm_mr_current(p) && drain_mr_fqrni(p)) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ const union qm_mr_entry *e = qm_mr_current(p);
+
+ dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x\n, addr 0x%x",
+ e->verb, e->ern.rc, e->ern.fd.addr_lo);
+ goto fail_dqrr_mr_empty;
+ }
+ /* Success */
+ portal->config = c;
+ qm_out(p, QM_REG_ISDR, 0);
+ qm_out(p, QM_REG_IIR, 0);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(p, portal->sdqcr);
+ return 0;
+
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+fail_affinity:
+ free_irq(c->irq, portal);
+fail_irq:
+ platform_device_del(portal->pdev);
+fail_devadd:
+ platform_device_put(portal->pdev);
+fail_devalloc:
+ kfree(portal->cgrs);
+fail_cgrs:
+ qm_mc_finish(p);
+fail_mc:
+ qm_mr_finish(p);
+fail_mr:
+ qm_dqrr_finish(p);
+fail_dqrr:
+ qm_eqcr_finish(p);
+fail_eqcr:
+ return -EIO;
+}
+
+struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
+ const struct qman_cgrs *cgrs)
+{
+ struct qman_portal *portal;
+ int err;
+
+ portal = &per_cpu(qman_affine_portal, c->cpu);
+ err = qman_create_portal(portal, c, cgrs);
+ if (err)
+ return NULL;
+
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(c->cpu, &affine_mask);
+ affine_channels[c->cpu] = c->channel;
+ affine_portals[c->cpu] = portal;
+ spin_unlock(&affine_mask_lock);
+
+ return portal;
+}
+
+static void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /*
+ * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began.
+ */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->irq, qm);
+
+ kfree(qm->cgrs);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+ platform_device_del(qm->pdev);
+ platform_device_put(qm->pdev);
+
+ qm->config = NULL;
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+ struct qman_portal *qm = get_affine_portal();
+ const struct qm_portal_config *pcfg;
+ int cpu;
+
+ pcfg = qm->config;
+ cpu = pcfg->cpu;
+
+ qman_destroy_portal(qm);
+
+ spin_lock(&affine_mask_lock);
+ cpumask_clear_cpu(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ put_affine_portal();
+ return pcfg;
+}
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg, u8 verb)
+{
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPAA_ASSERT(fq->state == qman_fq_state_parked ||
+ fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPAA_ASSERT(fq->state == qman_fq_state_sched);
+ DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
+ }
+}
+
+static void qm_congestion_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ congestion_work);
+ struct qman_cgrs rr, c;
+ union qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+
+ spin_lock(&p->cgr_lock);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ spin_unlock(&p->cgr_lock);
+ dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
+ return;
+ }
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
+ &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock(&p->cgr_lock);
+}
+
+static void qm_mr_process_task(struct work_struct *work)
+{
+ struct qman_portal *p = container_of(work, struct qman_portal,
+ mr_work);
+ const union qm_mr_entry *msg;
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+
+ preempt_disable();
+
+ while (1) {
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ break;
+
+ verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is clear */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = fqid_to_fq(msg->fq.fqid);
+ if (WARN_ON(!fq))
+ break;
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+ fq = tag_to_fq(msg->fq.contextB);
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ pr_crit_once("Leaking DCP ERNs!\n");
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+ fq = tag_to_fq(msg->ern.tag);
+ fq->cb.ern(p, fq, msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ }
+
+ qm_mr_cci_consume(&p->p, num);
+ preempt_enable();
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+ if (is & QM_PIRQ_CSCI) {
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->congestion_work);
+ }
+
+ if (is & QM_PIRQ_EQRI) {
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ wake_up(&affine_queue);
+ }
+
+ if (is & QM_PIRQ_MRI) {
+ queue_work_on(smp_processor_id(), qm_portal_wq,
+ &p->mr_work);
+ }
+
+ return is;
+}
+
+/*
+ * remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined.
+ */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+ p->vdqcr_owned = NULL;
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ wake_up(&affine_queue);
+}
+
+/*
+ * The only states that would conflict with other things if they ran at the
+ * same time on the same cpu are:
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any QMan API.
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+
+ do {
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ break;
+
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /*
+ * VDQCR: don't trust contextB as the FQ may have
+ * been configured for h/w consumption and we're
+ * draining it post-retirement.
+ */
+ fq = p->vdqcr_owned;
+ /*
+ * We only set QMAN_FQ_STATE_NE when retiring, so we
+ * only need to check for clearing it when doing
+ * volatile dequeues. It's one less thing to check
+ * in the critical path (SDQCR).
+ */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /*
+ * This is duplicated from the SDQCR code, but we
+ * have stuff to do before *and* after this callback,
+ * and we don't want multiple if()s in the critical
+ * path (SDQCR).
+ */
+ res = fq->cb.dqrr(p, fq, dq);
+ if (res == qman_cb_dqrr_stop)
+ break;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: contextB points to the FQ */
+ fq = tag_to_fq(dq->contextB);
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq);
+ /*
+ * The callback can request that we exit without
+ * consuming this entry nor advancing;
+ */
+ if (res == qman_cb_dqrr_stop)
+ break;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /*
+ * Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both.
+ */
+ DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+ /* just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq,
+ res == qman_cb_dqrr_park);
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /*
+ * Entry processed and consumed, increment our counter. The
+ * callback can request that we exit after consuming the
+ * entry, and we also exit if we reach our processing limit,
+ * so loop back only if neither of these conditions is met.
+ */
+ } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
+
+ return limit;
+}
+
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+ unsigned long irqflags;
+ u32 ier;
+
+ /*
+ * Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert.
+ */
+ local_irq_save(irqflags);
+ bits &= QM_PIRQ_VISIBLE;
+ clear_bits(bits, &p->irq_sources);
+ qm_out(&p->p, QM_REG_IER, p->irq_sources);
+ ier = qm_in(&p->p, QM_REG_IER);
+ /*
+ * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering.
+ */
+ qm_out(&p->p, QM_REG_ISR, ~ier);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+const cpumask_t *qman_affine_cpus(void)
+{
+ return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_affine_portal();
+
+ cpu = portal->config->cpu;
+ put_affine_portal();
+ }
+ WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
+ return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+
+struct qman_portal *qman_get_affine_portal(int cpu)
+{
+ return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+ return __poll_portal_fast(p, limit);
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
+ pools &= p->config->pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+ local_irq_restore(irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+/* Frame queue API */
+
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
+ }
+ return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+
+ if (ret)
+ return ret;
+ }
+ fq->fqid = fqid;
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+
+ /* A context_b of 0 is allegedly special, so don't use that fqid */
+ if (fqid == 0 || fqid >= num_fqids) {
+ WARN(1, "bad fqid %d\n", fqid);
+ return -EINVAL;
+ }
+
+ fq->idx = fqid * 2;
+ if (flags & QMAN_FQ_FLAG_NO_MODIFY)
+ fq->idx++;
+
+ WARN_ON(fq_table[fq->idx]);
+ fq_table[fq->idx] = fq;
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq)
+{
+ /*
+ * We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks.
+ */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+
+ DPAA_ASSERT(fq_table[fq->idx]);
+ fq_table[fq->idx] = NULL;
+ return;
+ default:
+ break;
+ }
+ DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+ return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ u8 res, myverb;
+ int ret = 0;
+
+ myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
+ ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ (fq->state != qman_fq_state_oos &&
+ fq->state != qman_fq_state_parked)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ mcc->initfq.fqid = fq->fqid;
+ mcc->initfq.count = 0;
+ /*
+ * If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it.
+ */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+ mcc->initfq.fqd.context_b = fq_to_tag(fq);
+ /*
+ * and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings.
+ */
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+ phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
+ DMA_TO_DEVICE);
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ int wq = 0;
+
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ wq = 4;
+ }
+ qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
+ }
+ qm_mc_commit(&p->p, myverb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "MCR timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ if (opts) {
+ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (opts->we_mask & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
+
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state != qman_fq_state_parked) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(p->config->dev, "ALTER_SCHED timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret;
+ u8 res;
+
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_sched)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
+ fq->state == qman_fq_state_retired ||
+ fq->state == qman_fq_state_oos) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /*
+ * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change().
+ */
+ if (res == QM_MCR_RESULT_OK) {
+ ret = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /*
+ * Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI...
+ */
+ union qm_mr_entry msg;
+
+ msg.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ msg.fq.fqid = fq->fqid;
+ msg.fq.contextB = fq_to_tag(fq);
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ ret = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p;
+ int ret = 0;
+
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPAA_CHECKING
+ if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
+ fq->state != qman_fq_state_retired) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
+
+static int qman_query_fq_np(struct qman_fq *fq,
+ struct qm_mcr_queryfq_np *np)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = fq->fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *np = mcr->queryfq_np;
+ else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
+ ret = -ERANGE;
+ else
+ ret = -EIO;
+out:
+ put_affine_portal();
+ return ret;
+}
+
+static int qman_query_cgr(struct qman_cgr *cgr,
+ struct qm_mcr_querycgr *cgrd)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ mcc->querycgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ if (mcr->result == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ else {
+ dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
+ mcr_result_str(mcr->result));
+ ret = -EIO;
+ }
+out:
+ put_affine_portal();
+ return ret;
+}
+
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
+{
+ struct qm_mcr_querycgr query_cgr;
+ int err;
+
+ err = qman_query_cgr(cgr, &query_cgr);
+ if (err)
+ return err;
+
+ *result = !!query_cgr.cgr.cs;
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr_congested);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+ unsigned long irqflags;
+ int ret = -EBUSY;
+
+ local_irq_save(irqflags);
+ if (p->vdqcr_owned)
+ goto out;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto out;
+
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ p->vdqcr_owned = fq;
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ ret = 0;
+out:
+ local_irq_restore(irqflags);
+ return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+ int ret;
+
+ *p = get_affine_portal();
+ ret = set_p_vdqcr(*p, fq, vdqcr);
+ put_affine_portal();
+ return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+ u32 vdqcr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ ret = wait_event_interruptible(affine_queue,
+ !set_vdqcr(p, fq, vdqcr));
+ else
+ wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
+ return ret;
+}
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret;
+
+ if (fq->state != qman_fq_state_parked &&
+ fq->state != qman_fq_state_retired)
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
+ ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+ else
+ ret = set_vdqcr(&p, fq, vdqcr);
+ if (ret)
+ return ret;
+ /* VDQCR is set */
+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ /*
+ * NB: don't propagate any error - the caller wouldn't
+ * know whether the VDQCR was issued or not. A signal
+ * could arrive after returning anyway, so the caller
+ * can check signal_pending() if that's an issue.
+ */
+ wait_event_interruptible(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ else
+ wait_event(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
+{
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags;
+ u8 avail;
+
+ p = get_affine_portal();
+ local_irq_save(irqflags);
+
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq))
+ goto out;
+
+ eq->fqid = fq->fqid;
+ eq->tag = fq_to_tag(fq);
+ eq->fd = *fd;
+
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
+out:
+ local_irq_restore(irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+ int ret = 0;
+
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ if (mcr->result != QM_MCR_RESULT_OK)
+ ret = -EIO;
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
+#define TARG_MASK(n) (BIT(31) >> PORTAL_IDX(n))
+
+static u8 qman_cgr_cpus[CGR_NUM];
+
+void qman_init_cgr_all(void)
+{
+ struct qman_cgr cgr;
+ int err_cnt = 0;
+
+ for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
+ if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
+ err_cnt++;
+ }
+
+ if (err_cnt)
+ pr_err("Warning: %d error%s while initialising CGR h/w\n",
+ err_cnt, (err_cnt > 1) ? "s" : "");
+}
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts = {};
+ int ret;
+ struct qman_portal *p;
+
+ /*
+ * We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= CGR_NUM)
+ return -EINVAL;
+
+ preempt_disable();
+ p = get_affine_portal();
+ qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+ preempt_enable();
+
+ cgr->chan = p->config->channel;
+ spin_lock(&p->cgr_lock);
+
+ if (opts) {
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto out;
+ if (opts)
+ local_opts = *opts;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+ else
+ /* Overwrite TARG */
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_MASK(p);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto out;
+ }
+
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success */
+ dev_err(p->config->dev, "CGR HW state partially modified\n");
+ ret = 0;
+ goto out;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en &&
+ qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+out:
+ spin_unlock(&p->cgr_lock);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ unsigned long irqflags;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->channel) {
+ /* attempt to delete from other portal than creator */
+ dev_err(p->config->dev, "CGR not owned by current portal");
+ dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->channel);
+
+ ret = -EINVAL;
+ goto put_portal;
+ }
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list,
+ * update CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if (i->cgrid == cgr->cgrid && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+ /* Overwrite TARG */
+ local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+ ~(TARG_MASK(p));
+ ret = qm_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+struct cgr_comp {
+ struct qman_cgr *cgr;
+ struct completion completion;
+};
+
+static int qman_delete_cgr_thread(void *p)
+{
+ struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
+ int ret;
+
+ ret = qman_delete_cgr(cgr_comp->cgr);
+ complete(&cgr_comp->completion);
+
+ return ret;
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+ struct task_struct *thread;
+ struct cgr_comp cgr_comp;
+
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+ init_completion(&cgr_comp.completion);
+ cgr_comp.cgr = cgr;
+ thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
+ "cgr_del");
+
+ if (IS_ERR(thread))
+ goto out;
+
+ kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
+ wake_up_process(thread);
+ wait_for_completion(&cgr_comp.completion);
+ preempt_enable();
+ return;
+ }
+out:
+ qman_delete_cgr(cgr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+
+/* Cleanup FQs */
+
+static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
+{
+ const union qm_mr_entry *msg;
+ int found = 0;
+
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ while (msg) {
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
+ found = 1;
+ qm_mr_next(p);
+ qm_mr_cci_consume_to_current(p);
+ qm_mr_pvb_update(p);
+ msg = qm_mr_current(p);
+ }
+ return found;
+}
+
+static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
+ bool wait)
+{
+ const struct qm_dqrr_entry *dqrr;
+ int found = 0;
+
+ do {
+ qm_dqrr_pvb_update(p);
+ dqrr = qm_dqrr_current(p);
+ if (!dqrr)
+ cpu_relax();
+ } while (wait && !dqrr);
+
+ while (dqrr) {
+ if (dqrr->fqid == fqid && (dqrr->stat & s))
+ found = 1;
+ qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
+ qm_dqrr_pvb_update(p);
+ qm_dqrr_next(p);
+ dqrr = qm_dqrr_current(p);
+ }
+ return found;
+}
+
+#define qm_mr_drain(p, V) \
+ _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
+
+#define qm_dqrr_drain(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
+
+#define qm_dqrr_drain_wait(p, f, S) \
+ _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
+
+#define qm_dqrr_drain_nomatch(p) \
+ _qm_dqrr_consume_and_match(p, 0, 0, false)
+
+static int qman_shutdown_fq(u32 fqid)
+{
+ struct qman_portal *p;
+ struct device *dev;
+ union qm_mc_command *mcc;
+ union qm_mc_result *mcr;
+ int orl_empty, drain = 0, ret = 0;
+ u32 channel, wq, res;
+ u8 state;
+
+ p = get_affine_portal();
+ dev = p->config->dev;
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq_np.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ_NP timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ goto out; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ /* Need to store these since the MCR gets reused */
+ channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
+ wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ dev_err(dev, "QUERYFQ_NP timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (res == QM_MCR_RESULT_PENDING) {
+ /*
+ * Need to wait for the FQRN in the message ring, which
+ * will only occur once the FQ has been drained. In
+ * order for the FQ to drain the portal needs to be set
+ * to dequeue from the channel the FQ is scheduled on
+ */
+ int found_fqrn = 0;
+ u16 dequeue_wq = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < qm_channel_pool1 + 15) {
+ /* Pool channel, enable the bit in the portal */
+ dequeue_wq = (channel -
+ qm_channel_pool1 + 1)<<4 | wq;
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ dequeue_wq = wq;
+ } else {
+ dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
+ fqid, channel);
+ ret = -EBUSY;
+ goto out;
+ }
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ qm_dqrr_sdqcr_set(&p->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ qm_dqrr_sdqcr_set(&p->p,
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ do {
+ /* Keep draining DQRR while checking the MR*/
+ qm_dqrr_drain_nomatch(&p->p);
+ /* Process message ring too */
+ found_fqrn = qm_mr_drain(&p->p, FQRN);
+ cpu_relax();
+ } while (!found_fqrn);
+
+ }
+ if (res != QM_MCR_RESULT_OK &&
+ res != QM_MCR_RESULT_PENDING) {
+ dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
+ fqid, res);
+ ret = -EIO;
+ goto out;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /*
+ * ORL had no entries, no need to wait until the
+ * ERNs come in
+ */
+ orl_empty = 1;
+ }
+ /*
+ * Retirement succeeded, check to see if FQ needs
+ * to be drained
+ */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ do {
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ /*
+ * Wait for a dequeue and process the dequeues,
+ * making sure to empty the ring completely
+ */
+ } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
+ }
+ qm_dqrr_sdqcr_set(&p->p, 0);
+
+ while (!orl_empty) {
+ /* Wait for the ORL to have been completely drained */
+ orl_empty = qm_mr_drain(&p->p, FQRL);
+ cpu_relax();
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fqid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ if (!qm_mc_result_timeout(&p->p, &mcr)) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result) {
+ dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
+ fqid, mcr->result);
+ ret = -EIO;
+ goto out;
+ }
+ break;
+
+ case QM_MCR_NP_STATE_OOS:
+ /* Done */
+ break;
+
+ default:
+ ret = -EIO;
+ }
+
+out:
+ put_affine_portal();
+ return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal)
+{
+ return portal->config;
+}
+
+struct gen_pool *qm_fqalloc; /* FQID allocator */
+struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+
+static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
+{
+ unsigned long addr;
+
+ addr = gen_pool_alloc(p, cnt);
+ if (!addr)
+ return -ENOMEM;
+
+ *result = addr & ~DPAA_GENALLOC_OFF;
+
+ return 0;
+}
+
+int qman_alloc_fqid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_fqalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+int qman_alloc_pool_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_qpalloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+int qman_alloc_cgrid_range(u32 *result, u32 count)
+{
+ return qman_alloc_range(qm_cgralloc, result, count);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+int qman_release_fqid(u32 fqid)
+{
+ int ret = qman_shutdown_fq(fqid);
+
+ if (ret) {
+ pr_debug("FQID %d leaked\n", fqid);
+ return ret;
+ }
+
+ gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_fqid);
+
+static int qpool_cleanup(u32 qp)
+{
+ /*
+ * We query all FQDs starting from
+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+ * whose destination channel is the pool-channel being released.
+ * When a non-OOS FQD is found we attempt to clean it up
+ */
+ struct qman_fq fq = {
+ .fqid = QM_FQID_RANGE_START
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return 0;
+ if (qm_fqd_get_chan(&fqd) == qp) {
+ /* The channel is the FQ's target, clean it */
+ err = qman_shutdown_fq(fq.fqid);
+ if (err)
+ /*
+ * Couldn't shut down the FQ
+ * so the pool must be leaked
+ */
+ return err;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+int qman_release_pool(u32 qp)
+{
+ int ret;
+
+ ret = qpool_cleanup(qp);
+ if (ret) {
+ pr_debug("CHID %d leaked\n", qp);
+ return ret;
+ }
+
+ gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_pool);
+
+static int cgr_cleanup(u32 cgrid)
+{
+ /*
+ * query all FQDs starting from FQID 1 until we get an "invalid FQID"
+ * error, looking for non-OOS FQDs whose CGR is the CGR being released
+ */
+ struct qman_fq fq = {
+ .fqid = 1
+ };
+ int err;
+
+ do {
+ struct qm_mcr_queryfq_np np;
+
+ err = qman_query_fq_np(&fq, &np);
+ if (err)
+ /* FQID range exceeded, found no problems */
+ return 0;
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+
+ err = qman_query_fq(&fq, &fqd);
+ if (WARN_ON(err))
+ return 0;
+ if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+ fqd.cgid == cgrid) {
+ pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
+ cgrid, fq.fqid);
+ return -EIO;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+int qman_release_cgrid(u32 cgrid)
+{
+ int ret;
+
+ ret = cgr_cleanup(cgrid);
+ if (ret) {
+ pr_debug("CGRID %d leaked\n", cgrid);
+ return ret;
+ }
+
+ gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_release_cgrid);
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
new file mode 100644
index 000000000000..0cace9e0077e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -0,0 +1,808 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
+#define REG_DD_CFG 0x0200
+#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC 0x0400
+#define REG_PFDR_FP_HEAD 0x0404
+#define REG_PFDR_FP_TAIL 0x0408
+#define REG_PFDR_FP_LWIT 0x0410
+#define REG_PFDR_CFG 0x0414
+#define REG_SFDR_CFG 0x0500
+#define REG_SFDR_IN_USE 0x0504
+#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID 0x0630
+#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_ECIR2 0x0a0c
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_MCR 0x0b00
+#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG 0x0be0
+#define REG_HID_CFG 0x0bf0
+#define REG_IDLE_STAT 0x0bf4
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FQD_BARE 0x0c00
+#define REG_PFDR_BARE 0x0c20
+#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE 0x0c80
+#define REG_QCSP_BAR 0x0c84
+#define REG_CI_SCHED_CFG 0x0d00
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_CI_RLM_AVG 0x0d14
+#define REG_ERR_ISR 0x0e00
+#define REG_ERR_IER 0x0e04
+#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR 0x01000000
+#define MCR_get_rslt(v) (u8)((v) >> 24)
+#define MCR_rslt_idle(r) (!(r) || ((r) >= 0xf0))
+#define MCR_rslt_ok(r) ((r) == 0xf0)
+#define MCR_rslt_eaccess(r) ((r) == 0xf8)
+#define MCR_rslt_inval(r) ((r) == 0xff)
+
+/*
+ * Corenet initiator settings. Stash request queues are 4-deep to match cores
+ * ability to snarf. Stash priority is 3, other priorities are 2.
+ */
+#define QM_CI_SCHED_CFG_SRCCIV 4
+#define QM_CI_SCHED_CFG_SRQ_W 3
+#define QM_CI_SCHED_CFG_RW_W 2
+#define QM_CI_SCHED_CFG_BMAN_W 2
+/* write SRCCIV enable */
+#define QM_CI_SCHED_CFG_SRCCIV_EN BIT(31)
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+ qm_wq_portal = 0,
+ qm_wq_pool = 1,
+ qm_wq_fman0 = 2,
+ qm_wq_fman1 = 3,
+ qm_wq_caam = 4,
+ qm_wq_pme = 5,
+ qm_wq_first = qm_wq_portal,
+ qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+ qm_memory_fqd,
+ qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
+#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
+#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
+#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
+#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
+#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IFSI)
+
+struct qm_ecir {
+ u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
+};
+
+static bool qm_ecir_is_dcp(const struct qm_ecir *p)
+{
+ return p->info & BIT(29);
+}
+
+static int qm_ecir_get_pnum(const struct qm_ecir *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_ecir_get_fqid(const struct qm_ecir *p)
+{
+ return p->info & (BIT(24) - 1);
+}
+
+struct qm_ecir2 {
+ u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
+};
+
+static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
+{
+ return p->info & BIT(31);
+}
+
+static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
+{
+ return p->info & (BIT(10) - 1);
+}
+
+struct qm_eadr {
+ u32 info; /* memid[24-27], eadr[0-11] */
+ /* v3: memid[24-28], eadr[0-15] */
+};
+
+static int qm_eadr_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0xf;
+}
+
+static int qm_eadr_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(12) - 1);
+}
+
+static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
+{
+ return (p->info >> 24) & 0x1f;
+}
+
+static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
+{
+ return p->info & (BIT(16) - 1);
+}
+
+struct qman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+ { QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
+ { QM_EIRQ_CTDE, "Corenet Target Data Error" },
+ { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
+ { QM_EIRQ_PLWI, "PFDR Low Watermark" },
+ { QM_EIRQ_MBEI, "Multi-bit ECC Error" },
+ { QM_EIRQ_SBEI, "Single-bit ECC Error" },
+ { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
+ { QM_EIRQ_ICVI, "Invalid Command Verb" },
+ { QM_EIRQ_IFSI, "Invalid Flow Control State" },
+ { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
+ { QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
+ { QM_EIRQ_IDSI, "Invalid Dequeue Source" },
+ { QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
+ { QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
+ { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
+ { QM_EIRQ_IESI, "Invalid Enqueue State" },
+ { QM_EIRQ_IECI, "Invalid Enqueue Channel" },
+ { QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
+};
+
+struct qman_error_info_mdata {
+ u16 addr_mask;
+ u16 bits;
+ const char *txt;
+};
+
+static const struct qman_error_info_mdata error_mdata[] = {
+ { 0x01FF, 24, "FQD cache tag memory 0" },
+ { 0x01FF, 24, "FQD cache tag memory 1" },
+ { 0x01FF, 24, "FQD cache tag memory 2" },
+ { 0x01FF, 24, "FQD cache tag memory 3" },
+ { 0x0FFF, 512, "FQD cache memory" },
+ { 0x07FF, 128, "SFDR memory" },
+ { 0x01FF, 72, "WQ context memory" },
+ { 0x00FF, 240, "CGR memory" },
+ { 0x00FF, 302, "Internal Order Restoration List memory" },
+ { 0x01FF, 256, "SW portal ring memory" },
+};
+
+#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of QMan registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Pointer to the start of the QMan's CCSR space */
+static u32 __iomem *qm_ccsr_start;
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 qm_pools_sdqcr;
+
+static inline u32 qm_ccsr_in(u32 offset)
+{
+ return ioread32be(qm_ccsr_start + offset/4);
+}
+
+static inline void qm_ccsr_out(u32 offset, u32 val)
+{
+ iowrite32be(val, qm_ccsr_start + offset/4);
+}
+
+u32 qm_get_pools_sdqcr(void)
+{
+ return qm_pools_sdqcr;
+}
+
+enum qm_dc_portal {
+ qm_dc_portal_fman0 = 0,
+ qm_dc_portal_fman1 = 1
+};
+
+static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
+{
+ DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
+ portal == qm_dc_portal_fman1);
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+ else
+ qm_ccsr_out(REG_DCP_CFG(portal),
+ (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
+ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
+ u8 csw5, u8 csw6, u8 csw7)
+{
+ qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+ ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(void)
+{
+ qm_ccsr_out(REG_HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(void)
+{
+ qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
+ (QM_CI_SCHED_CFG_SRCCIV << 24) |
+ (QM_CI_SCHED_CFG_SRQ_W << 8) |
+ (QM_CI_SCHED_CFG_RW_W << 4) |
+ QM_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = qm_ccsr_in(REG_IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+#define PFDR_AR_EN BIT(31)
+static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
+{
+ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+ u32 exp = ilog2(size);
+
+ /* choke if size isn't within range */
+ DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
+ is_power_of_2(size));
+ /* choke if 'ba' has lower-alignment than 'size' */
+ DPAA_ASSERT(!(ba & (size - 1)));
+ qm_ccsr_out(offset, upper_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
+ qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
+}
+
+static void qm_set_pfdr_threshold(u32 th, u8 k)
+{
+ qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
+ qm_ccsr_out(REG_PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(u16 th)
+{
+ qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
+{
+ u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+
+ DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+ /* Make sure the command interface is 'idle' */
+ if (!MCR_rslt_idle(rslt)) {
+ dev_crit(dev, "QMAN_MCR isn't idle");
+ WARN_ON(1);
+ }
+
+ /* Write the MCR command params then the verb */
+ qm_ccsr_out(REG_MCP(0), pfdr_start);
+ /*
+ * TODO: remove this - it's a workaround for a model bug that is
+ * corrected in more recent versions. We use the workaround until
+ * everyone has upgraded.
+ */
+ qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
+ dma_wmb();
+ qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
+ /* Poll for the result */
+ do {
+ rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
+ } while (!MCR_rslt_idle(rslt));
+ if (MCR_rslt_ok(rslt))
+ return 0;
+ if (MCR_rslt_eaccess(rslt))
+ return -EACCES;
+ if (MCR_rslt_inval(rslt))
+ return -EINVAL;
+ dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+ return -ENODEV;
+}
+
+/*
+ * Ideally we would use the DMA API to turn rmem->base into a DMA address
+ * (especially if iommu translations ever get involved). Unfortunately, the
+ * DMA API currently does not allow mapping anything that is not backed with
+ * a struct page.
+ */
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz, pfdr_sz;
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+ fqd_a = rmem->base;
+ fqd_sz = rmem->size;
+
+ WARN_ON(!(fqd_a && fqd_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+ pfdr_a = rmem->base;
+ pfdr_sz = rmem->size;
+
+ WARN_ON(!(pfdr_a && pfdr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
+
+static unsigned int qm_get_fqid_maxcnt(void)
+{
+ return fqd_sz / 64;
+}
+
+/*
+ * Flush this memory range from data cache so that QMAN originated
+ * transactions for this memory region could be marked non-coherent.
+ */
+static int zero_priv_mem(struct device *dev, struct device_node *node,
+ phys_addr_t addr, size_t sz)
+{
+ /* map as cacheable, non-guarded */
+ void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+
+ memset_io(tmpp, 0, sz);
+ flush_dcache_range((unsigned long)tmpp,
+ (unsigned long)tmpp + sz);
+ iounmap(tmpp);
+
+ return 0;
+}
+
+static void log_edata_bits(struct device *dev, u32 bit_count)
+{
+ u32 i, j, mask = 0xffffffff;
+
+ dev_warn(dev, "ErrInt, EDATA:\n");
+ i = bit_count / 32;
+ if (bit_count % 32) {
+ i++;
+ mask = ~(mask << bit_count % 32);
+ }
+ j = 16 - i;
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
+ j++;
+ for (; j < 16; j++)
+ dev_warn(dev, " 0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
+}
+
+static void log_additional_error_info(struct device *dev, u32 isr_val,
+ u32 ecsr_val)
+{
+ struct qm_ecir ecir_val;
+ struct qm_eadr eadr_val;
+ int memid;
+
+ ecir_val.info = qm_ccsr_in(REG_ECIR);
+ /* Is portal info valid */
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ struct qm_ecir2 ecir2_val;
+
+ ecir2_val.info = qm_ccsr_in(REG_ECIR2);
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
+ qm_ecir2_get_pnum(&ecir2_val));
+ }
+ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_v3_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_v3_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ } else {
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ dev_warn(dev, "ErrInt: %s id %d\n",
+ qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
+ qm_ecir_get_pnum(&ecir_val));
+ }
+ if (ecsr_val & FQID_ECSR_ERR)
+ dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
+ qm_ecir_get_fqid(&ecir_val));
+
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.info = qm_ccsr_in(REG_EADR);
+ memid = qm_eadr_get_memid(&eadr_val);
+ dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[memid].txt,
+ error_mdata[memid].addr_mask
+ & qm_eadr_get_eadr(&eadr_val));
+ log_edata_bits(dev, error_mdata[memid].bits);
+ }
+ }
+}
+
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+ struct device *dev = ptr;
+
+ ier_val = qm_ccsr_in(REG_ERR_IER);
+ isr_val = qm_ccsr_in(REG_ERR_ISR);
+ ecsr_val = qm_ccsr_in(REG_ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
+ if (qman_hwerr_txts[i].mask & isr_mask) {
+ dev_err_ratelimited(dev, "ErrInt: %s\n",
+ qman_hwerr_txts[i].txt);
+ if (qman_hwerr_txts[i].mask & ecsr_val) {
+ log_additional_error_info(dev, isr_mask,
+ ecsr_val);
+ /* Re-arm error capture registers */
+ qm_ccsr_out(REG_ECSR, ecsr_val);
+ }
+ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
+ dev_dbg(dev, "Disabling error 0x%x\n",
+ qman_hwerr_txts[i].mask);
+ ier_val &= ~qman_hwerr_txts[i].mask;
+ qm_ccsr_out(REG_ERR_IER, ier_val);
+ }
+ }
+ }
+ qm_ccsr_out(REG_ERR_ISR, isr_val);
+
+ return IRQ_HANDLED;
+}
+
+static int qman_init_ccsr(struct device *dev)
+{
+ int i, err;
+
+ /* FQD memory */
+ qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
+ /* PFDR memory */
+ qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
+ err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
+ if (err)
+ return err;
+ /* thresholds */
+ qm_set_pfdr_threshold(512, 64);
+ qm_set_sfdr_threshold(128);
+ /* clear stale PEBI bit from interrupt status register */
+ qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
+ /* corenet initiator settings */
+ qm_set_corenet_initiator();
+ /* HID settings */
+ qm_set_hid();
+ /* Set scheduling weights to defaults */
+ for (i = qm_wq_first; i <= qm_wq_last; i++)
+ qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
+ /* We are not prepared to accept ERNs for hardware enqueues */
+ qm_set_dc(qm_dc_portal_fman0, 1, 0);
+ qm_set_dc(qm_dc_portal_fman1, 1, 0);
+ return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+ static int done;
+ static u32 liodn_offset;
+ u32 before, after;
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
+ else
+ before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
+ if (!done) {
+ liodn_offset = before & LIO_CFG_LIODN_MASK;
+ done = 1;
+ return;
+ }
+ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
+ else
+ qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+void qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+ u32 before, after;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
+ /* Each pair of vcpu share the same SRQ(SDEST) */
+ cpu_idx /= 2;
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
+ } else {
+ before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
+ }
+}
+
+static int qman_resource_init(struct device *dev)
+{
+ int pool_chan_num, cgrid_num;
+ int ret, i;
+
+ switch (qman_ip_rev >> 8) {
+ case 1:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ case 2:
+ pool_chan_num = 3;
+ cgrid_num = 64;
+ break;
+ case 3:
+ pool_chan_num = 15;
+ cgrid_num = 256;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
+ pool_chan_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
+ return ret;
+ }
+
+ ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
+ return ret;
+ }
+
+ /* parse pool channels into the SDQCR mask */
+ for (i = 0; i < cgrid_num; i++)
+ qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
+
+ ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
+ qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
+ if (ret) {
+ dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int fsl_qman_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ int ret, err_irq;
+ u16 id;
+ u8 major, minor;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+ qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
+ if (!qm_ccsr_start)
+ return -ENXIO;
+
+ qm_get_version(&id, &major, &minor);
+ if (major == 1 && minor == 0) {
+ dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
+ return -ENODEV;
+ } else if (major == 1 && minor == 1)
+ qman_ip_rev = QMAN_REV11;
+ else if (major == 1 && minor == 2)
+ qman_ip_rev = QMAN_REV12;
+ else if (major == 2 && minor == 0)
+ qman_ip_rev = QMAN_REV20;
+ else if (major == 3 && minor == 0)
+ qman_ip_rev = QMAN_REV30;
+ else if (major == 3 && minor == 1)
+ qman_ip_rev = QMAN_REV31;
+ else {
+ dev_err(dev, "Unknown QMan version\n");
+ return -ENODEV;
+ }
+
+ if ((qman_ip_rev & 0xff00) >= QMAN_REV30)
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+
+ ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
+ WARN_ON(ret);
+ if (ret)
+ return -ENODEV;
+
+ ret = qman_init_ccsr(dev);
+ if (ret) {
+ dev_err(dev, "CCSR setup failed\n");
+ return ret;
+ }
+
+ err_irq = platform_get_irq(pdev, 0);
+ if (err_irq <= 0) {
+ dev_info(dev, "Can't get %s property 'interrupts'\n",
+ node->full_name);
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
+ dev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
+ ret, node->full_name);
+ return ret;
+ }
+
+ /*
+ * Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init).
+ */
+ qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
+ /* Enable Error Interrupts */
+ qm_ccsr_out(REG_ERR_IER, 0xffffffff);
+
+ qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
+ if (IS_ERR(qm_fqalloc)) {
+ ret = PTR_ERR(qm_fqalloc);
+ dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
+ if (IS_ERR(qm_qpalloc)) {
+ ret = PTR_ERR(qm_qpalloc);
+ dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
+ if (IS_ERR(qm_cgralloc)) {
+ ret = PTR_ERR(qm_cgralloc);
+ dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qman_resource_init(dev);
+ if (ret)
+ return ret;
+
+ ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
+ if (ret)
+ return ret;
+
+ ret = qman_wq_alloc();
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id fsl_qman_ids[] = {
+ {
+ .compatible = "fsl,qman",
+ },
+ {}
+};
+
+static struct platform_driver fsl_qman_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fsl_qman_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fsl_qman_probe,
+};
+
+builtin_platform_driver(fsl_qman_driver);
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
new file mode 100644
index 000000000000..148614388fca
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -0,0 +1,355 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+/* Enable portal interupts (as opposed to polling mode) */
+#define CONFIG_FSL_DPA_PIRQ_SLOW 1
+#define CONFIG_FSL_DPA_PIRQ_FAST 1
+
+static struct cpumask portal_cpus;
+/* protect qman global registers and global data shared among portals */
+static DEFINE_SPINLOCK(qman_lock);
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+ struct device *dev = pcfg->dev;
+ int window_count = 1;
+ struct iommu_domain_geometry geom_attr;
+ struct pamu_stash_attribute stash_attr;
+ int ret;
+
+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+ if (!pcfg->iommu_domain) {
+ dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__);
+ goto no_iommu;
+ }
+ geom_attr.aperture_start = 0;
+ geom_attr.aperture_end =
+ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+ geom_attr.force_aperture = true;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+ &geom_attr);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+ &window_count);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+ stash_attr.cpu = cpu;
+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH,
+ &stash_attr);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto out_domain_free;
+ }
+ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_window_enable() = %d",
+ __func__, ret);
+ goto out_domain_free;
+ }
+ ret = iommu_attach_device(pcfg->iommu_domain, dev);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_device_attach() = %d", __func__,
+ ret);
+ goto out_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_ENABLE,
+ &window_count);
+ if (ret < 0) {
+ dev_err(dev, "%s(): iommu_domain_set_attr() = %d", __func__,
+ ret);
+ goto out_detach_device;
+ }
+
+no_iommu:
+#endif
+ qman_set_sdest(pcfg->channel, cpu);
+
+ return;
+
+#ifdef CONFIG_FSL_PAMU
+out_detach_device:
+ iommu_detach_device(pcfg->iommu_domain, NULL);
+out_domain_free:
+ iommu_domain_free(pcfg->iommu_domain);
+ pcfg->iommu_domain = NULL;
+#endif
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+ struct qman_portal *p;
+ u32 irq_sources = 0;
+
+ /* We need the same LIODN offset for all portals */
+ qman_liodn_fixup(pcfg->channel);
+
+ pcfg->iommu_domain = NULL;
+ portal_set_cpu(pcfg, pcfg->cpu);
+
+ p = qman_create_affine_portal(pcfg, NULL);
+ if (!p) {
+ dev_crit(pcfg->dev, "%s: Portal failure on cpu %d\n",
+ __func__, pcfg->cpu);
+ return NULL;
+ }
+
+ /* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+ QM_PIRQ_CSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+ irq_sources |= QM_PIRQ_DQRI;
+#endif
+ qman_p_irqsource_add(p, irq_sources);
+
+ spin_lock(&qman_lock);
+ if (cpumask_equal(&portal_cpus, cpu_possible_mask)) {
+ /* all assigned portals are initialized now */
+ qman_init_cgr_all();
+ }
+ spin_unlock(&qman_lock);
+
+ dev_info(pcfg->dev, "Portal initialised, cpu %d\n", pcfg->cpu);
+
+ return p;
+}
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+ unsigned int cpu)
+{
+#ifdef CONFIG_FSL_PAMU /* TODO */
+ struct pamu_stash_attribute stash_attr;
+ int ret;
+
+ if (pcfg->iommu_domain) {
+ stash_attr.cpu = cpu;
+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
+ if (ret < 0) {
+ dev_err(pcfg->dev,
+ "Failed to update pamu stash setting\n");
+ return;
+ }
+ }
+#endif
+ qman_set_sdest(pcfg->channel, cpu);
+}
+
+static void qman_offline_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->irq, cpumask_of(0));
+ qman_portal_update_sdest(pcfg, 0);
+ }
+ }
+}
+
+static void qman_online_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+
+ p = affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ qman_portal_update_sdest(pcfg, cpu);
+ }
+ }
+}
+
+static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ qman_online_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ qman_offline_cpu(cpu);
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block qman_hotplug_cpu_notifier = {
+ .notifier_call = qman_hotplug_cpu_callback,
+};
+
+static int qman_portal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct qm_portal_config *pcfg;
+ struct resource *addr_phys[2];
+ const u32 *channel;
+ void __iomem *va;
+ int irq, len, cpu;
+
+ pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg)
+ return -ENOMEM;
+
+ pcfg->dev = dev;
+
+ addr_phys[0] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CE);
+ if (!addr_phys[0]) {
+ dev_err(dev, "Can't get %s property 'reg::CE'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
+ DPAA_PORTAL_CI);
+ if (!addr_phys[1]) {
+ dev_err(dev, "Can't get %s property 'reg::CI'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+
+ channel = of_get_property(node, "cell-index", &len);
+ if (!channel || (len != 4)) {
+ dev_err(dev, "Can't get %s property 'cell-index'\n",
+ node->full_name);
+ return -ENXIO;
+ }
+ pcfg->channel = *channel;
+ pcfg->cpu = -1;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(dev, "Can't get %s IRQ\n", node->full_name);
+ return -ENXIO;
+ }
+ pcfg->irq = irq;
+
+ va = ioremap_prot(addr_phys[0]->start, resource_size(addr_phys[0]), 0);
+ if (!va)
+ goto err_ioremap1;
+
+ pcfg->addr_virt[DPAA_PORTAL_CE] = va;
+
+ va = ioremap_prot(addr_phys[1]->start, resource_size(addr_phys[1]),
+ _PAGE_GUARDED | _PAGE_NO_CACHE);
+ if (!va)
+ goto err_ioremap2;
+
+ pcfg->addr_virt[DPAA_PORTAL_CI] = va;
+
+ pcfg->pools = qm_get_pools_sdqcr();
+
+ spin_lock(&qman_lock);
+ cpu = cpumask_next_zero(-1, &portal_cpus);
+ if (cpu >= nr_cpu_ids) {
+ /* unassigned portal, skip init */
+ spin_unlock(&qman_lock);
+ return 0;
+ }
+
+ cpumask_set_cpu(cpu, &portal_cpus);
+ spin_unlock(&qman_lock);
+ pcfg->cpu = cpu;
+
+ if (!init_pcfg(pcfg))
+ goto err_ioremap2;
+
+ /* clear irq affinity if assigned cpu is offline */
+ if (!cpu_online(cpu))
+ qman_offline_cpu(cpu);
+
+ return 0;
+
+err_ioremap2:
+ iounmap(pcfg->addr_virt[DPAA_PORTAL_CE]);
+err_ioremap1:
+ dev_err(dev, "ioremap failed\n");
+ return -ENXIO;
+}
+
+static const struct of_device_id qman_portal_ids[] = {
+ {
+ .compatible = "fsl,qman-portal",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qman_portal_ids);
+
+static struct platform_driver qman_portal_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = qman_portal_ids,
+ },
+ .probe = qman_portal_probe,
+};
+
+static int __init qman_portal_driver_register(struct platform_driver *drv)
+{
+ int ret;
+
+ ret = platform_driver_register(drv);
+ if (ret < 0)
+ return ret;
+
+ register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
+
+ return 0;
+}
+
+module_driver(qman_portal_driver,
+ qman_portal_driver_register, platform_driver_unregister);
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
new file mode 100644
index 000000000000..5cf821e623a9
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -0,0 +1,371 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "dpaa_sys.h"
+
+#include <soc/fsl/qman.h>
+#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
+#include <asm/fsl_pamu_stash.h>
+#endif
+
+struct qm_mcr_querywq {
+ u8 verb;
+ u8 result;
+ u16 channel_wq; /* ignores wq (3 lsbits): _res[0-2] */
+ u8 __reserved[28];
+ u32 wq_len[8];
+} __packed;
+
+static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq)
+{
+ return wq->channel_wq >> 3;
+}
+
+struct __qm_mcr_querycongestion {
+ u32 state[8];
+};
+
+/* "Query Congestion Group State" */
+struct qm_mcr_querycongestion {
+ u8 verb;
+ u8 result;
+ u8 __reserved[30];
+ /* Access this struct using qman_cgrs_get() */
+ struct __qm_mcr_querycongestion state;
+} __packed;
+
+/* "Query CGR" */
+struct qm_mcr_querycgr {
+ u8 verb;
+ u8 result;
+ u16 __reserved1;
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[6];
+ u8 i_bcnt_hi; /* high 8-bits of 40-bit "Instant" */
+ u32 i_bcnt_lo; /* low 32-bits of 40-bit */
+ u8 __reserved3[3];
+ u8 a_bcnt_hi; /* high 8-bits of 40-bit "Average" */
+ u32 a_bcnt_lo; /* low 32-bits of 40-bit */
+ u32 cscn_targ_swp[4];
+} __packed;
+
+static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->i_bcnt_hi << 32) | (u64)q->i_bcnt_lo;
+}
+static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q)
+{
+ return ((u64)q->a_bcnt_hi << 32) | (u64)q->a_bcnt_lo;
+}
+
+/* "Query FQ Non-Programmable Fields" */
+struct qm_mcc_queryfq_np {
+ u8 _ncw_verb;
+ u8 __reserved1[3];
+ u32 fqid; /* 24-bit */
+ u8 __reserved2[56];
+} __packed;
+
+struct qm_mcr_queryfq_np {
+ u8 verb;
+ u8 result;
+ u8 __reserved1;
+ u8 state; /* QM_MCR_NP_STATE_*** */
+ u32 fqd_link; /* 24-bit, _res2[24-31] */
+ u16 odp_seq; /* 14-bit, _res3[14-15] */
+ u16 orp_nesn; /* 14-bit, _res4[14-15] */
+ u16 orp_ea_hseq; /* 15-bit, _res5[15] */
+ u16 orp_ea_tseq; /* 15-bit, _res6[15] */
+ u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
+ u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
+ u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
+ u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
+ u8 __reserved2[5];
+ u8 is; /* 1-bit, _res12[1-7] */
+ u16 ics_surp;
+ u32 byte_cnt;
+ u32 frm_cnt; /* 24-bit, _res13[24-31] */
+ u32 __reserved3;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved4;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+} __packed;
+
+#define QM_MCR_NP_STATE_FE 0x10
+#define QM_MCR_NP_STATE_R 0x08
+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS 0x00
+#define QM_MCR_NP_STATE_RETIRED 0x01
+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
+#define QM_MCR_NP_STATE_PARKED 0x04
+#define QM_MCR_NP_STATE_ACTIVE 0x05
+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
+
+enum qm_mcr_queryfq_np_masks {
+ qm_mcr_fqd_link_mask = BIT(24)-1,
+ qm_mcr_odp_seq_mask = BIT(14)-1,
+ qm_mcr_orp_nesn_mask = BIT(14)-1,
+ qm_mcr_orp_ea_hseq_mask = BIT(15)-1,
+ qm_mcr_orp_ea_tseq_mask = BIT(15)-1,
+ qm_mcr_orp_ea_hptr_mask = BIT(24)-1,
+ qm_mcr_orp_ea_tptr_mask = BIT(24)-1,
+ qm_mcr_pfdr_hptr_mask = BIT(24)-1,
+ qm_mcr_pfdr_tptr_mask = BIT(24)-1,
+ qm_mcr_is_mask = BIT(1)-1,
+ qm_mcr_frm_cnt_mask = BIT(24)-1,
+};
+#define qm_mcr_np_get(np, field) \
+ ((np)->field & (qm_mcr_##field##_mask))
+
+/* Congestion Groups */
+
+/*
+ * This wrapper represents a bit-array for the state of the 256 QMan congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups.
+ */
+#define CGR_BITS_PER_WORD 5
+#define CGR_WORD(x) ((x) >> CGR_BITS_PER_WORD)
+#define CGR_BIT(x) (BIT(31) >> ((x) & 0x1f))
+#define CGR_NUM (sizeof(struct __qm_mcr_querycongestion) << 3)
+
+struct qman_cgrs {
+ struct __qm_mcr_querycongestion q;
+};
+
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+
+static inline int qman_cgrs_get(struct qman_cgrs *c, u8 cgr)
+{
+ return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr);
+}
+
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+ const struct qman_cgrs *src)
+{
+ *dest = *src;
+}
+
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *_d++ = *_a++ & *_b++;
+}
+
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.state;
+ const u32 *_a = a->q.state;
+ const u32 *_b = b->q.state;
+
+ for (ret = 0; ret < 8; ret++)
+ *_d++ = *_a++ ^ *_b++;
+}
+
+void qman_init_cgr_all(void);
+
+struct qm_portal_config {
+ /*
+ * Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited.
+ */
+ void __iomem *addr_virt[2];
+ struct device *dev;
+ struct iommu_domain *iommu_domain;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ /* portal is affined to this cpu */
+ int cpu;
+ /* portal interrupt line */
+ int irq;
+ /*
+ * the portal's dedicated channel id, used initialising
+ * frame queues to target this portal when scheduled
+ */
+ u16 channel;
+ /*
+ * mask of pool channels this portal has dequeue access to
+ * (using QM_SDQCR_CHANNELS_POOL(n) for the bitmask)
+ */
+ u32 pools;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+#define QM_FQID_RANGE_START 1 /* FQID 0 reserved for internal use */
+extern struct gen_pool *qm_fqalloc; /* FQID allocator */
+extern struct gen_pool *qm_qpalloc; /* pool-channel allocator */
+extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
+u32 qm_get_pools_sdqcr(void);
+
+int qman_wq_alloc(void);
+void qman_liodn_fixup(u16 channel);
+void qman_set_sdest(u16 channel, unsigned int cpu_idx);
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+
+/*
+ * qman_query_fq - Queries FQD fields (via h/w query command)
+ * @fq: the frame queue object to be queried
+ * @fqd: storage for the queried FQD fields
+ */
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd);
+
+/*
+ * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
+#define QM_VDQCR_EXACT 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+
+#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
+#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
+#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
+
+/*
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
+ * for the QMAN_FQ_STATE_VDQCR bit to disappear.
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
+int qman_alloc_fq_table(u32 num_fqids);
+
+/* QMan s/w corenet portal, low-level i/face */
+
+/*
+ * For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SOURCE == SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS 0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
+#define QM_SDQCR_COUNT_EXACT1 0x0
+#define QM_SDQCR_COUNT_UPTO3 0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_SDQCR_TYPE_MASK 0x03000000
+#define QM_SDQCR_TYPE_NULL 0x0
+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK 0x00ffffff
+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
+
+/*
+ * Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL 0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK 0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+extern struct qman_portal *affine_portals[NR_CPUS];
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal);
diff --git a/drivers/soc/fsl/qbman/qman_test.c b/drivers/soc/fsl/qbman/qman_test.c
new file mode 100644
index 000000000000..18f7f0202fa7
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.c
@@ -0,0 +1,62 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("QMan testing");
+
+static int test_init(void)
+{
+ int loop = 1;
+ int err = 0;
+
+ while (loop--) {
+#ifdef CONFIG_FSL_QMAN_TEST_STASH
+ err = qman_test_stash();
+ if (err)
+ break;
+#endif
+#ifdef CONFIG_FSL_QMAN_TEST_API
+ err = qman_test_api();
+ if (err)
+ break;
+#endif
+ }
+ return err;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
new file mode 100644
index 000000000000..d5f8cb2260dc
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test.h
@@ -0,0 +1,36 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_priv.h"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+int qman_test_stash(void);
+int qman_test_api(void);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
new file mode 100644
index 000000000000..6880ff17f45e
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -0,0 +1,252 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#define CGR_ID 27
+#define POOL_ID 2
+#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
+#define NUM_ENQUEUES 10
+#define NUM_PARTIAL 4
+#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
+ QM_SDQCR_TYPE_PRIO_QOS | \
+ QM_SDQCR_TOKEN_SET(0x98) | \
+ QM_SDQCR_CHANNELS_DEDICATED | \
+ QM_SDQCR_CHANNELS_POOL(POOL_ID))
+#define PORTAL_OPAQUE ((void *)0xf00dbeef)
+#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
+ struct qman_fq *,
+ const struct qm_dqrr_entry *);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+ const union qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+ const union qm_mr_entry *);
+
+static struct qm_fd fd, fd_dq;
+static struct qman_fq fq_base = {
+ .cb.dqrr = cb_dqrr,
+ .cb.ern = cb_ern,
+ .cb.fqs = cb_fqs
+};
+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
+static int retire_complete, sdqcr_complete;
+
+/* Helpers for initialising and "incrementing" a frame descriptor */
+static void fd_init(struct qm_fd *fd)
+{
+ qm_fd_addr_set64(fd, 0xabdeadbeefLLU);
+ qm_fd_set_contig_big(fd, 0x0000ffff);
+ fd->cmd = 0xfeedf00d;
+}
+
+static void fd_inc(struct qm_fd *fd)
+{
+ u64 t = qm_fd_addr_get64(fd);
+ int z = t >> 40;
+ unsigned int len, off;
+ enum qm_fd_format fmt;
+
+ t <<= 1;
+ if (z)
+ t |= 1;
+ qm_fd_addr_set64(fd, t);
+
+ fmt = qm_fd_get_format(fd);
+ off = qm_fd_get_offset(fd);
+ len = qm_fd_get_length(fd);
+ len--;
+ qm_fd_set_param(fd, fmt, off, len);
+
+ fd->cmd++;
+}
+
+/* The only part of the 'fd' we can't memcmp() is the ppid */
+static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+{
+ int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+
+ if (!r) {
+ enum qm_fd_format fmt_a, fmt_b;
+
+ fmt_a = qm_fd_get_format(a);
+ fmt_b = qm_fd_get_format(b);
+ r = fmt_a - fmt_b;
+ }
+ if (!r)
+ r = a->cfg - b->cfg;
+ if (!r)
+ r = a->cmd - b->cmd;
+ return r;
+}
+
+/* test */
+static int do_enqueues(struct qman_fq *fq)
+{
+ unsigned int loop;
+ int err = 0;
+
+ for (loop = 0; loop < NUM_ENQUEUES; loop++) {
+ if (qman_enqueue(fq, &fd)) {
+ pr_crit("qman_enqueue() failed\n");
+ err = -EIO;
+ }
+ fd_inc(&fd);
+ }
+
+ return err;
+}
+
+int qman_test_api(void)
+{
+ unsigned int flags, frmcnt;
+ int err;
+ struct qman_fq *fq = &fq_base;
+
+ pr_info("%s(): Starting\n", __func__);
+ fd_init(&fd);
+ fd_init(&fd_dq);
+
+ /* Initialise (parked) FQ */
+ err = qman_create_fq(0, FQ_FLAGS, fq);
+ if (err) {
+ pr_crit("qman_create_fq() failed\n");
+ goto failed;
+ }
+ err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL);
+ if (err) {
+ pr_crit("qman_init_fq() failed\n");
+ goto failed;
+ }
+ /* Do enqueues + VDQCR, twice. (Parked FQ) */
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("VDQCR (till-empty);\n");
+ frmcnt = QM_VDQCR_NUMFRAMES_TILLEMPTY;
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_crit("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+ pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
+ NUM_ENQUEUES);
+ frmcnt = QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL);
+ err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt);
+ if (err) {
+ pr_err("qman_volatile_dequeue() failed\n");
+ goto failed;
+ }
+
+ err = do_enqueues(fq);
+ if (err)
+ goto failed;
+ pr_info("scheduled dequeue (till-empty)\n");
+ err = qman_schedule_fq(fq);
+ if (err) {
+ pr_crit("qman_schedule_fq() failed\n");
+ goto failed;
+ }
+ wait_event(waitqueue, sdqcr_complete);
+
+ /* Retire and OOS the FQ */
+ err = qman_retire_fq(fq, &flags);
+ if (err < 0) {
+ pr_crit("qman_retire_fq() failed\n");
+ goto failed;
+ }
+ wait_event(waitqueue, retire_complete);
+ if (flags & QMAN_FQ_STATE_BLOCKOOS) {
+ err = -EIO;
+ pr_crit("leaking frames\n");
+ goto failed;
+ }
+ err = qman_oos_fq(fq);
+ if (err) {
+ pr_crit("qman_oos_fq() failed\n");
+ goto failed;
+ }
+ qman_destroy_fq(fq);
+ pr_info("%s(): Finished\n", __func__);
+ return 0;
+
+failed:
+ WARN_ON(1);
+ return err;
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ if (WARN_ON(fd_cmp(&fd_dq, &dq->fd))) {
+ pr_err("BADNESS: dequeued frame doesn't match;\n");
+ return qman_cb_dqrr_consume;
+ }
+ fd_inc(&fd_dq);
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+ sdqcr_complete = 1;
+ wake_up(&waitqueue);
+ }
+ return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ pr_crit("cb_ern() unimplemented");
+ WARN_ON(1);
+}
+
+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
+ const union qm_mr_entry *msg)
+{
+ u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
+
+ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI)) {
+ pr_crit("unexpected FQS message");
+ WARN_ON(1);
+ return;
+ }
+ pr_info("Retirement message received\n");
+ retire_complete = 1;
+ wake_up(&waitqueue);
+}
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
new file mode 100644
index 000000000000..43cf66ba42f5
--- /dev/null
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -0,0 +1,617 @@
+/* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+/*
+ * Algorithm:
+ *
+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
+ * shuttle a "hot potato" frame around them such that every forwarding action
+ * moves it from one cpu to another. (The use of more than one handler per cpu
+ * is to allow enough handlers/FQs to truly test the significance of caching -
+ * ie. when cache-expiries are occurring.)
+ *
+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
+ * first and last words of the frame data will undergo a transformation step on
+ * each forwarding action. To achieve this, each handler will be assigned a
+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
+ * received by a handler, the mixer of the expected sender is XOR'd into all
+ * words of the entire frame, which is then validated against the original
+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
+ * the current handler. Apart from validating that the frame is taking the
+ * expected path, this also provides some quasi-realistic overheads to each
+ * forwarding action - dereferencing *all* the frame data, computation, and
+ * conditional branching. There is a "special" handler designated to act as the
+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
+ * to determine when the test has completed by counting HP_LOOPS iterations.
+ *
+ * Init phases:
+ *
+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
+ * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
+ * handlers and link-list them (but do no other handler setup).
+ *
+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
+ * and advance the iterator for the next loop. This includes a final fixup,
+ * which connects the last handler to the first (and which is why phase 2
+ * and 3 are separate).
+ *
+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * initialise FQ objects and advance the iterator for the next loop.
+ * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
+ * initialisation targets the correct cpu.
+ */
+
+/*
+ * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive).
+ */
+struct bstrap {
+ int (*fn)(void);
+ atomic_t started;
+};
+static int bstrap_fn(void *bs)
+{
+ struct bstrap *bstrap = bs;
+ int err;
+
+ atomic_inc(&bstrap->started);
+ err = bstrap->fn();
+ if (err)
+ return err;
+ while (!kthread_should_stop())
+ msleep(20);
+ return 0;
+}
+static int on_all_cpus(int (*fn)(void))
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct bstrap bstrap = {
+ .fn = fn,
+ .started = ATOMIC_INIT(0)
+ };
+ struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
+ "hotpotato%d", cpu);
+ int ret;
+
+ if (IS_ERR(k))
+ return -ENOMEM;
+ kthread_bind(k, cpu);
+ wake_up_process(k);
+ /*
+ * If we call kthread_stop() before the "wake up" has had an
+ * effect, then the thread may exit with -EINTR without ever
+ * running the function. So poll until it's started before
+ * requesting it to stop.
+ */
+ while (!atomic_read(&bstrap.started))
+ msleep(20);
+ ret = kthread_stop(k);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+struct hp_handler {
+
+ /* The following data is stashed when 'rx' is dequeued; */
+ /* -------------- */
+ /* The Rx FQ, dequeues of which will stash the entire hp_handler */
+ struct qman_fq rx;
+ /* The Tx FQ we should forward to */
+ struct qman_fq tx;
+ /* The value we XOR post-dequeue, prior to validating */
+ u32 rx_mixer;
+ /* The value we XOR pre-enqueue, after validating */
+ u32 tx_mixer;
+ /* what the hotpotato address should be on dequeue */
+ dma_addr_t addr;
+ u32 *frame_ptr;
+
+ /* The following data isn't (necessarily) stashed on dequeue; */
+ /* -------------- */
+ u32 fqid_rx, fqid_tx;
+ /* list node for linking us into 'hp_cpu' */
+ struct list_head node;
+ /* Just to check ... */
+ unsigned int processor_id;
+} ____cacheline_aligned;
+
+struct hp_cpu {
+ /* identify the cpu we run on; */
+ unsigned int processor_id;
+ /* root node for the per-cpu list of handlers */
+ struct list_head handlers;
+ /* list node for linking us into 'hp_cpu_list' */
+ struct list_head node;
+ /*
+ * when repeatedly scanning 'hp_list', each time linking the n'th
+ * handlers together, this is used as per-cpu iterator state
+ */
+ struct hp_handler *iterator;
+};
+
+/* Each cpu has one of these */
+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
+
+/* links together the hp_cpu structs, in first-come first-serve order. */
+static LIST_HEAD(hp_cpu_list);
+static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+
+static unsigned int hp_cpu_list_length;
+
+/* the "special" handler, that starts and terminates the test. */
+static struct hp_handler *special_handler;
+static int loop_counter;
+
+/* handlers are allocated out of this, so they're properly aligned. */
+static struct kmem_cache *hp_handler_slab;
+
+/* this is the frame data */
+static void *__frame_ptr;
+static u32 *frame_ptr;
+static dma_addr_t frame_dma;
+
+/* the main function waits on this */
+static DECLARE_WAIT_QUEUE_HEAD(queue);
+
+#define HP_PER_CPU 2
+#define HP_LOOPS 8
+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
+#define HP_NUM_WORDS 80
+/* First word of the LFSR-based frame data */
+#define HP_FIRST_WORD 0xabbaf00d
+
+static inline u32 do_lfsr(u32 prev)
+{
+ return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
+}
+
+static int allocate_frame_data(void)
+{
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+ struct platform_device *pdev = platform_device_alloc("foobar", -1);
+
+ if (!pdev) {
+ pr_crit("platform_device_alloc() failed");
+ return -EIO;
+ }
+ if (platform_device_add(pdev)) {
+ pr_crit("platform_device_add() failed");
+ return -EIO;
+ }
+ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+ if (!__frame_ptr)
+ return -ENOMEM;
+
+ frame_ptr = PTR_ALIGN(__frame_ptr, 64);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++) {
+ frame_ptr[loop] = lfsr;
+ lfsr = do_lfsr(lfsr);
+ }
+ frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ platform_device_del(pdev);
+ platform_device_put(pdev);
+ return 0;
+}
+
+static void deallocate_frame_data(void)
+{
+ kfree(__frame_ptr);
+}
+
+static inline int process_frame_data(struct hp_handler *handler,
+ const struct qm_fd *fd)
+{
+ u32 *p = handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+
+ if (qm_fd_addr_get64(fd) != handler->addr) {
+ pr_crit("bad frame address");
+ return -EIO;
+ }
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ *p ^= handler->rx_mixer;
+ if (*p != lfsr) {
+ pr_crit("corrupt frame data");
+ return -EIO;
+ }
+ *p ^= handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ return 0;
+}
+
+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ if (process_frame_data(handler, &dqrr->fd)) {
+ WARN_ON(1);
+ goto skip;
+ }
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ }
+skip:
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ process_frame_data(handler, &dqrr->fd);
+ if (++loop_counter < HP_LOOPS) {
+ if (qman_enqueue(&handler->tx, &dqrr->fd)) {
+ pr_crit("qman_enqueue() failed");
+ WARN_ON(1);
+ goto skip;
+ }
+ } else {
+ pr_info("Received final (%dth) frame\n", loop_counter);
+ wake_up(&queue);
+ }
+skip:
+ return qman_cb_dqrr_consume;
+}
+
+static int create_per_cpu_handlers(void)
+{
+ struct hp_handler *handler;
+ int loop;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ hp_cpu->processor_id = smp_processor_id();
+ spin_lock(&hp_lock);
+ list_add_tail(&hp_cpu->node, &hp_cpu_list);
+ hp_cpu_list_length++;
+ spin_unlock(&hp_lock);
+ INIT_LIST_HEAD(&hp_cpu->handlers);
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
+ if (!handler) {
+ pr_crit("kmem_cache_alloc() failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ handler->processor_id = hp_cpu->processor_id;
+ handler->addr = frame_dma;
+ handler->frame_ptr = frame_ptr;
+ list_add_tail(&handler->node, &hp_cpu->handlers);
+ }
+ return 0;
+}
+
+static int destroy_per_cpu_handlers(void)
+{
+ struct list_head *loop, *tmp;
+ struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
+
+ spin_lock(&hp_lock);
+ list_del(&hp_cpu->node);
+ spin_unlock(&hp_lock);
+ list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
+ u32 flags = 0;
+ struct hp_handler *handler = list_entry(loop, struct hp_handler,
+ node);
+ if (qman_retire_fq(&handler->rx, &flags) ||
+ (flags & QMAN_FQ_STATE_BLOCKOOS)) {
+ pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
+ WARN_ON(1);
+ return -EIO;
+ }
+ if (qman_oos_fq(&handler->rx)) {
+ pr_crit("qman_oos_fq(rx) failed");
+ WARN_ON(1);
+ return -EIO;
+ }
+ qman_destroy_fq(&handler->rx);
+ qman_destroy_fq(&handler->tx);
+ qman_release_fqid(handler->fqid_rx);
+ list_del(&handler->node);
+ kmem_cache_free(hp_handler_slab, handler);
+ }
+ return 0;
+}
+
+static inline u8 num_cachelines(u32 offset)
+{
+ u8 res = (offset + (L1_CACHE_BYTES - 1))
+ / (L1_CACHE_BYTES);
+ if (res > 3)
+ return 3;
+ return res;
+}
+#define STASH_DATA_CL \
+ num_cachelines(HP_NUM_WORDS * 4)
+#define STASH_CTX_CL \
+ num_cachelines(offsetof(struct hp_handler, fqid_rx))
+
+static int init_handler(void *h)
+{
+ struct qm_mcc_initfq opts;
+ struct hp_handler *handler = h;
+ int err;
+
+ if (handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
+ /* Set up rx */
+ memset(&handler->rx, 0, sizeof(handler->rx));
+ if (handler == special_handler)
+ handler->rx.cb.dqrr = special_dqrr;
+ else
+ handler->rx.cb.dqrr = normal_dqrr;
+ err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
+ if (err) {
+ pr_crit("qman_create_fq(rx) failed");
+ goto failed;
+ }
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+ qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
+ err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+ QMAN_INITFQ_FLAG_LOCAL, &opts);
+ if (err) {
+ pr_crit("qman_init_fq(rx) failed");
+ goto failed;
+ }
+ /* Set up tx */
+ memset(&handler->tx, 0, sizeof(handler->tx));
+ err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+ &handler->tx);
+ if (err) {
+ pr_crit("qman_create_fq(tx) failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
+}
+
+static void init_handler_cb(void *h)
+{
+ if (init_handler(h))
+ WARN_ON(1);
+}
+
+static int init_phase2(void)
+{
+ int loop;
+ u32 fqid = 0;
+ u32 lfsr = 0xdeadbeef;
+ struct hp_cpu *hp_cpu;
+ struct hp_handler *handler;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ int err;
+
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ /* Rx FQID is the previous handler's Tx FQID */
+ hp_cpu->iterator->fqid_rx = fqid;
+ /* Allocate new FQID for Tx */
+ err = qman_alloc_fqid(&fqid);
+ if (err) {
+ pr_crit("qman_alloc_fqid() failed");
+ return err;
+ }
+ hp_cpu->iterator->fqid_tx = fqid;
+ /* Rx mixer is the previous handler's Tx mixer */
+ hp_cpu->iterator->rx_mixer = lfsr;
+ /* Get new mixer for Tx */
+ lfsr = do_lfsr(lfsr);
+ hp_cpu->iterator->tx_mixer = lfsr;
+ }
+ }
+ /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
+ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
+ handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
+ if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
+ return 1;
+ handler->fqid_rx = fqid;
+ handler->rx_mixer = lfsr;
+ /* and tag it as our "special" handler */
+ special_handler = handler;
+ return 0;
+}
+
+static int init_phase3(void)
+{
+ int loop, err;
+ struct hp_cpu *hp_cpu;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ preempt_disable();
+ if (hp_cpu->processor_id == smp_processor_id()) {
+ err = init_handler(hp_cpu->iterator);
+ if (err)
+ return err;
+ } else {
+ smp_call_function_single(hp_cpu->processor_id,
+ init_handler_cb, hp_cpu->iterator, 1);
+ }
+ preempt_enable();
+ }
+ }
+ return 0;
+}
+
+static int send_first_frame(void *ignore)
+{
+ u32 *p = special_handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop, err;
+ struct qm_fd fd;
+
+ if (special_handler->processor_id != smp_processor_id()) {
+ err = -EIO;
+ goto failed;
+ }
+ memset(&fd, 0, sizeof(fd));
+ qm_fd_addr_set64(&fd, special_handler->addr);
+ qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ if (*p != lfsr) {
+ err = -EIO;
+ pr_crit("corrupt frame data");
+ goto failed;
+ }
+ *p ^= special_handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ pr_info("Sending first frame\n");
+ err = qman_enqueue(&special_handler->tx, &fd);
+ if (err) {
+ pr_crit("qman_enqueue() failed");
+ goto failed;
+ }
+
+ return 0;
+failed:
+ return err;
+}
+
+static void send_first_frame_cb(void *ignore)
+{
+ if (send_first_frame(NULL))
+ WARN_ON(1);
+}
+
+int qman_test_stash(void)
+{
+ int err;
+
+ if (cpumask_weight(cpu_online_mask) < 2) {
+ pr_info("%s(): skip - only 1 CPU\n", __func__);
+ return 0;
+ }
+
+ pr_info("%s(): Starting\n", __func__);
+
+ hp_cpu_list_length = 0;
+ loop_counter = 0;
+ hp_handler_slab = kmem_cache_create("hp_handler_slab",
+ sizeof(struct hp_handler), L1_CACHE_BYTES,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!hp_handler_slab) {
+ err = -EIO;
+ pr_crit("kmem_cache_create() failed");
+ goto failed;
+ }
+
+ err = allocate_frame_data();
+ if (err)
+ goto failed;
+
+ /* Init phase 1 */
+ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
+ if (on_all_cpus(create_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
+ pr_info("Number of cpus: %d, total of %d handlers\n",
+ hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
+
+ err = init_phase2();
+ if (err)
+ goto failed;
+
+ err = init_phase3();
+ if (err)
+ goto failed;
+
+ preempt_disable();
+ if (special_handler->processor_id == smp_processor_id()) {
+ err = send_first_frame(NULL);
+ if (err)
+ goto failed;
+ } else {
+ smp_call_function_single(special_handler->processor_id,
+ send_first_frame_cb, NULL, 1);
+ }
+ preempt_enable();
+
+ wait_event(queue, loop_counter == HP_LOOPS);
+ deallocate_frame_data();
+ if (on_all_cpus(destroy_per_cpu_handlers)) {
+ err = -EIO;
+ pr_crit("on_each_cpu() failed");
+ goto failed;
+ }
+ kmem_cache_destroy(hp_handler_slab);
+ pr_info("%s(): Finished\n", __func__);
+
+ return 0;
+failed:
+ WARN_ON(1);
+ return err;
+}
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 333eb2215a57..0aaf429f31d5 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -41,7 +41,8 @@ struct qe_gpio_chip {
static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{
- struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc);
+ struct qe_gpio_chip *qe_gc =
+ container_of(mm_gc, struct qe_gpio_chip, mm_gc);
struct qe_pio_regs __iomem *regs = mm_gc->regs;
qe_gc->cpdata = in_be32(&regs->cpdata);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 7026507e6f1d..2707a827261b 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -69,8 +69,8 @@ static phys_addr_t qebase = -1;
phys_addr_t get_qe_base(void)
{
struct device_node *qe;
- int size;
- const u32 *prop;
+ int ret;
+ struct resource res;
if (qebase != -1)
return qebase;
@@ -82,9 +82,9 @@ phys_addr_t get_qe_base(void)
return qebase;
}
- prop = of_get_property(qe, "reg", &size);
- if (prop && size >= sizeof(*prop))
- qebase = of_translate_address(qe, prop);
+ ret = of_address_to_resource(qe, 0, &res);
+ if (!ret)
+ qebase = res.start;
of_node_put(qe);
return qebase;
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 41eff805a904..104e68d9b84f 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -70,6 +70,11 @@ int cpm_muram_init(void)
}
muram_pool = gen_pool_create(0, -1);
+ if (!muram_pool) {
+ pr_err("Cannot allocate memory pool for CPM/QE muram");
+ ret = -ENOMEM;
+ goto out_muram;
+ }
muram_pbase = of_translate_address(np, zero);
if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
pr_err("Cannot translate zero through CPM muram node");
@@ -116,6 +121,9 @@ static unsigned long cpm_muram_alloc_common(unsigned long size,
struct muram_block *entry;
unsigned long start;
+ if (!muram_pool && cpm_muram_init())
+ goto out2;
+
start = gen_pool_alloc_algo(muram_pool, size, algo, data);
if (!start)
goto out2;
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index 5e48b1470178..a1048b44e6b9 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -99,7 +99,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
utdm->tdm_port = val;
ut_info->uf_info.tdm_num = utdm->tdm_port;
- if (of_get_property(np, "fsl,tdm-internal-loopback", NULL))
+ if (of_property_read_bool(np, "fsl,tdm-internal-loopback"))
utdm->tdm_mode = TDM_INTERNAL_LOOPBACK;
else
utdm->tdm_mode = TDM_NORMAL;
@@ -167,7 +167,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
}
if (siram_init_flag == 0) {
- memset_io(utdm->siram, 0, res->end - res->start + 1);
+ memset_io(utdm->siram, 0, resource_size(res));
siram_init_flag = 1;
}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 396ded52ab70..209a8f7ef02b 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1187,8 +1187,10 @@ int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
hdata.type = heap->type;
hdata.heap_id = heap->id;
- ret = copy_to_user(&buffer[cnt],
- &hdata, sizeof(hdata));
+ if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
+ ret = -EFAULT;
+ goto out;
+ }
cnt++;
if (cnt >= max_cnt)
diff --git a/drivers/staging/android/ion/ion_of.c b/drivers/staging/android/ion/ion_of.c
index 15bac92b7f04..46b2bb99bfd6 100644
--- a/drivers/staging/android/ion/ion_of.c
+++ b/drivers/staging/android/ion/ion_of.c
@@ -107,7 +107,7 @@ struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
heap_pdev = of_platform_device_create(node, heaps[i].name,
&pdev->dev);
- if (!pdev)
+ if (!heap_pdev)
return ERR_PTR(-ENOMEM);
heap_pdev->dev.platform_data = &heaps[i];
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index e36ee984485b..34307ac3f255 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -128,6 +128,7 @@ int arche_platform_change_state(enum arche_platform_state state,
pdev = of_find_device_by_node(np);
if (!pdev) {
pr_err("arche-platform device not found\n");
+ of_node_put(np);
return -ENODEV;
}
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
index 071bb1cfd3ae..baab460eeaa3 100644
--- a/drivers/staging/greybus/es2.c
+++ b/drivers/staging/greybus/es2.c
@@ -1548,7 +1548,8 @@ static int ap_probe(struct usb_interface *interface,
INIT_LIST_HEAD(&es2->arpcs);
spin_lock_init(&es2->arpc_lock);
- if (es2_arpc_in_enable(es2))
+ retval = es2_arpc_in_enable(es2);
+ if (retval)
goto error;
retval = gb_hd_add(hd);
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 5e06e4229e42..250caa00de5e 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -702,15 +702,13 @@ static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
ret = gb_gpio_irqchip_add(gpio, irqc, 0,
handle_level_irq, IRQ_TYPE_NONE);
if (ret) {
- dev_err(&connection->bundle->dev,
- "failed to add irq chip: %d\n", ret);
+ dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);
goto exit_line_free;
}
ret = gpiochip_add(gpio);
if (ret) {
- dev_err(&connection->bundle->dev,
- "failed to add gpio chip: %d\n", ret);
+ dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
goto exit_gpio_irqchip_remove;
}
diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c
index 69f67ddbd4a3..660b4674a76f 100644
--- a/drivers/staging/greybus/module.c
+++ b/drivers/staging/greybus/module.c
@@ -127,7 +127,7 @@ struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
return module;
err_put_interfaces:
- for (--i; i > 0; --i)
+ for (--i; i >= 0; --i)
gb_interface_put(module->interfaces[i]);
put_device(&module->dev);
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 5ee7954bd9f9..2633d2bfb1b4 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -888,7 +888,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
minor = alloc_minor(gb_tty);
if (minor < 0) {
if (minor == -ENOSPC) {
- dev_err(&connection->bundle->dev,
+ dev_err(&gbphy_dev->dev,
"no more free minor numbers\n");
retval = -ENODEV;
} else {
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index d626125d7af9..564b36d4f648 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -468,6 +468,8 @@ static inline int __sca3000_get_base_freq(struct sca3000_state *st,
case SCA3000_MEAS_MODE_OP_2:
*base_freq = info->option_mode_2_freq;
break;
+ default:
+ ret = -EINVAL;
}
error_ret:
return ret;
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 6eae60595905..23fda9d98bff 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -871,12 +871,10 @@ static ssize_t xattr_cache_store(struct kobject *kobj,
}
LUSTRE_RW_ATTR(xattr_cache);
-static ssize_t unstable_stats_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
+static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
+ struct super_block *sb = m->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = sbi->ll_cache;
long pages;
int mb;
@@ -884,19 +882,21 @@ static ssize_t unstable_stats_show(struct kobject *kobj,
pages = atomic_long_read(&cache->ccc_unstable_nr);
mb = (pages * PAGE_SIZE) >> 20;
- return sprintf(buf, "unstable_check: %8d\n"
- "unstable_pages: %12ld\n"
- "unstable_mb: %8d\n",
- cache->ccc_unstable_check, pages, mb);
+ seq_printf(m,
+ "unstable_check: %8d\n"
+ "unstable_pages: %12ld\n"
+ "unstable_mb: %8d\n",
+ cache->ccc_unstable_check, pages, mb);
+
+ return 0;
}
-static ssize_t unstable_stats_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer,
- size_t count)
+static ssize_t ll_unstable_stats_seq_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *off)
{
- struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
- ll_kobj);
+ struct super_block *sb = ((struct seq_file *)file->private_data)->private;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
char kernbuf[128];
int val, rc;
@@ -922,7 +922,7 @@ static ssize_t unstable_stats_store(struct kobject *kobj,
return count;
}
-LUSTRE_RW_ATTR(unstable_stats);
+LPROC_SEQ_FOPS(ll_unstable_stats);
static ssize_t root_squash_show(struct kobject *kobj, struct attribute *attr,
char *buf)
@@ -995,6 +995,7 @@ static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
/* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
{ "max_cached_mb", &ll_max_cached_mb_fops, NULL },
{ "statahead_stats", &ll_statahead_stats_fops, NULL, 0 },
+ { "unstable_stats", &ll_unstable_stats_fops, NULL },
{ "sbi_flags", &ll_sbi_flags_fops, NULL, 0 },
{ .name = "nosquash_nids",
.fops = &ll_nosquash_nids_fops },
@@ -1026,7 +1027,6 @@ static struct attribute *llite_attrs[] = {
&lustre_attr_max_easize.attr,
&lustre_attr_default_easize.attr,
&lustre_attr_xattr_cache.attr,
- &lustre_attr_unstable_stats.attr,
&lustre_attr_root_squash.attr,
NULL,
};
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index c29040fdf9a7..1091b9f1dd07 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -423,8 +423,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
actual_pages = get_user_pages(task, task->mm,
(unsigned long)buf & ~(PAGE_SIZE - 1),
num_pages,
- (type == PAGELIST_READ) /*Write */ ,
- 0 /*Force */ ,
+ (type == PAGELIST_READ) ? FOLL_WRITE : 0,
pages,
NULL /*vmas */);
up_read(&task->mm->mmap_sem);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index e11c0e07471b..7b6cd4d80621 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1477,8 +1477,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
current->mm, /* mm */
(unsigned long)virt_addr, /* start */
num_pages, /* len */
- 0, /* write */
- 0, /* force */
+ 0, /* gup_flags */
pages, /* pages (array of page pointers) */
NULL); /* vmas */
up_read(&current->mm->mmap_sem);
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 78f5613e9467..6ab7443eabde 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -3388,7 +3388,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
clients_count++;
- destroy_workqueue(hif_workqueue);
_fail_:
return result;
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 39b928c2849d..b7d747e92c7a 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1804,6 +1804,10 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* Otherwise, initiator is not expecting a NOPIN is response.
* Just ignore for now.
*/
+
+ if (cmd)
+ iscsit_free_cmd(cmd, false);
+
return 0;
}
EXPORT_SYMBOL(iscsit_process_nop_out);
@@ -2982,7 +2986,7 @@ iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
- "Solicitied" : "Unsolicitied", cmd->init_task_tag,
+ "Solicited" : "Unsolicited", cmd->init_task_tag,
cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
}
EXPORT_SYMBOL(iscsit_build_nopin_rsp);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index adf419fa4291..15f79a2ca34a 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -434,7 +434,7 @@ static int iscsi_login_zero_tsih_s2(
/*
* Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
- * Immediate Data + Unsolicitied Data-OUT if necessary..
+ * Immediate Data + Unsolicited Data-OUT if necessary..
*/
param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
conn->param_list);
@@ -646,7 +646,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
/*
- * FIXME: Unsolicitied NopIN support for ISER
+ * FIXME: Unsolicited NopIN support for ISER
*/
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
return;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6094a6beddde..7dfefd66df93 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -754,15 +754,7 @@ EXPORT_SYMBOL(target_complete_cmd);
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
- if (scsi_status != SAM_STAT_GOOD) {
- return;
- }
-
- /*
- * Calculate new residual count based upon length of SCSI data
- * transferred.
- */
- if (length < cmd->data_length) {
+ if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
@@ -771,12 +763,6 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
}
cmd->data_length = length;
- } else if (length > cmd->data_length) {
- cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
- cmd->residual_count = length - cmd->data_length;
- } else {
- cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT);
- cmd->residual_count = 0;
}
target_complete_cmd(cmd, scsi_status);
@@ -1706,6 +1692,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+ case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2547,8 +2534,12 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
- if (ack_kref)
- kref_get(&se_cmd->cmd_kref);
+ if (ack_kref) {
+ if (!kref_get_unless_zero(&se_cmd->cmd_kref))
+ return -EINVAL;
+
+ se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+ }
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (se_sess->sess_tearing_down) {
@@ -2627,7 +2618,7 @@ EXPORT_SYMBOL(target_put_sess_cmd);
*/
void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
- struct se_cmd *se_cmd;
+ struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
int rc;
@@ -2639,14 +2630,16 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
se_sess->sess_tearing_down = 1;
list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
+ list_for_each_entry_safe(se_cmd, tmp_cmd,
+ &se_sess->sess_wait_list, se_cmd_list) {
rc = kref_get_unless_zero(&se_cmd->cmd_kref);
if (rc) {
se_cmd->cmd_wait_set = 1;
spin_lock(&se_cmd->t_state_lock);
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
spin_unlock(&se_cmd->t_state_lock);
- }
+ } else
+ list_del_init(&se_cmd->se_cmd_list);
}
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -2871,6 +2864,12 @@ static const struct sense_info sense_info_table[] = {
.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
.add_sector_info = true,
},
+ [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
+ .key = COPY_ABORTED,
+ .asc = 0x0d,
+ .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
+
+ },
[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
/*
* Returning ILLEGAL REQUEST would cause immediate IO errors on
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 0f173bf7dbac..2b3c8564ace8 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -96,7 +96,7 @@ struct tcmu_dev {
size_t dev_size;
u32 cmdr_size;
u32 cmdr_last_cleaned;
- /* Offset of data ring from start of mb */
+ /* Offset of data area from start of mb */
/* Must add data_off and mb_addr to get the address */
size_t data_off;
size_t data_size;
@@ -349,7 +349,7 @@ static inline size_t spc_bitmap_free(unsigned long *bitmap)
/*
* We can't queue a command until we have space available on the cmd ring *and*
- * space available on the data ring.
+ * space available on the data area.
*
* Called with ring lock held.
*/
@@ -389,7 +389,8 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
return true;
}
-static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+static sense_reason_t
+tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
@@ -405,7 +406,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* Must be a certain minimum size for response sense info, but
@@ -432,11 +433,14 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
data_length += se_cmd->t_bidi_data_sg->length;
}
- if ((command_size > (udev->cmdr_size / 2))
- || data_length > udev->data_size)
- pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
- "cmd/data ring buffers\n", command_size, data_length,
+ if ((command_size > (udev->cmdr_size / 2)) ||
+ data_length > udev->data_size) {
+ pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
+ "cmd ring/data area\n", command_size, data_length,
udev->cmdr_size, udev->data_size);
+ spin_unlock_irq(&udev->cmdr_lock);
+ return TCM_INVALID_CDB_FIELD;
+ }
while (!is_ring_space_avail(udev, command_size, data_length)) {
int ret;
@@ -450,7 +454,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
finish_wait(&udev->wait_cmdr, &__wait);
if (!ret) {
pr_warn("tcmu: command timed out\n");
- return -ETIMEDOUT;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
spin_lock_irq(&udev->cmdr_lock);
@@ -487,9 +491,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
- /*
- * Fix up iovecs, and handle if allocation in data ring wrapped.
- */
+ /* Handle allocating space from the data area */
iov = &entry->req.iov[0];
iov_cnt = 0;
copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
@@ -526,10 +528,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
mod_timer(&udev->timeout,
round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
- return 0;
+ return TCM_NO_SENSE;
}
-static int tcmu_queue_cmd(struct se_cmd *se_cmd)
+static sense_reason_t
+tcmu_queue_cmd(struct se_cmd *se_cmd)
{
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
@@ -538,10 +541,10 @@ static int tcmu_queue_cmd(struct se_cmd *se_cmd)
tcmu_cmd = tcmu_alloc_cmd(se_cmd);
if (!tcmu_cmd)
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = tcmu_queue_cmd_ring(tcmu_cmd);
- if (ret < 0) {
+ if (ret != TCM_NO_SENSE) {
pr_err("TCMU: Could not queue command\n");
spin_lock_irq(&udev->commands_lock);
idr_remove(&udev->commands, tcmu_cmd->cmd_id);
@@ -561,7 +564,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
/*
* cmd has been completed already from timeout, just reclaim
- * data ring space and free cmd
+ * data area space and free cmd
*/
free_data_area(udev, cmd);
@@ -1129,20 +1132,9 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
}
static sense_reason_t
-tcmu_pass_op(struct se_cmd *se_cmd)
-{
- int ret = tcmu_queue_cmd(se_cmd);
-
- if (ret != 0)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- else
- return TCM_NO_SENSE;
-}
-
-static sense_reason_t
tcmu_parse_cdb(struct se_cmd *cmd)
{
- return passthrough_parse_cdb(cmd, tcmu_pass_op);
+ return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
}
static const struct target_backend_ops tcmu_ops = {
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 75cd85426ae3..094a1440eacb 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
}
mutex_unlock(&g_device_mutex);
- pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
return -EINVAL;
}
@@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
struct xcopy_op *xop, unsigned char *p,
- unsigned short tdll)
+ unsigned short tdll, sense_reason_t *sense_ret)
{
struct se_device *local_dev = se_cmd->se_dev;
unsigned char *desc = p;
@@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
unsigned short start = 0;
bool src = true;
+ *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
if (offset != 0) {
pr_err("XCOPY target descriptor list length is not"
" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
@@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
else
rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
-
- if (rc < 0)
+ /*
+ * If a matching IEEE NAA 0x83 descriptor for the requested device
+ * is not located on this node, return COPY_ABORTED with ASQ/ASQC
+ * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
+ * initiator to fall back to normal copy method.
+ */
+ if (rc < 0) {
+ *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
goto out;
+ }
pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
xop->src_dev, &xop->src_tid_wwn[0]);
@@ -653,6 +662,7 @@ static int target_xcopy_read_source(
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
remote_port, true);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
@@ -664,6 +674,7 @@ static int target_xcopy_read_source(
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
@@ -714,6 +725,7 @@ static int target_xcopy_write_destination(
remote_port, false);
if (rc < 0) {
struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
/*
* If the failure happened before the t_mem_list hand-off in
* target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
@@ -729,6 +741,7 @@ static int target_xcopy_write_destination(
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
transport_generic_free_cmd(se_cmd, 0);
return rc;
@@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work)
out:
xcopy_pt_undepend_remotedev(xop);
kfree(xop);
-
- pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
- ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ /*
+ * Don't override an error scsi status if it has already been set
+ */
+ if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
+ pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
+ " CHECK_CONDITION -> sending response\n", rc);
+ ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ }
target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
}
@@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl);
- rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
+ rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
if (rc <= 0)
goto out;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 216e18cc9133..ff5de9a96643 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -572,10 +572,10 @@ static void ft_send_work(struct work_struct *work)
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir,
- TARGET_SCF_ACK_KREF))
+ TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID))
goto err;
- pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
+ pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
return;
err:
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 6ffbb603d912..fd5c3de79470 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -39,6 +39,11 @@
#include "tcm_fc.h"
+#define TFC_SESS_DBG(lport, fmt, args...) \
+ pr_debug("host%u: rport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (lport)->port_id, ##args )
+
static void ft_sess_delete_all(struct ft_tport *);
/*
@@ -167,24 +172,29 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
struct ft_tport *tport;
struct hlist_head *head;
struct ft_sess *sess;
+ char *reason = "no session created";
rcu_read_lock();
tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
- if (!tport)
+ if (!tport) {
+ reason = "not an FCP port";
goto out;
+ }
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
- pr_debug("port_id %x found %p\n", port_id, sess);
+ TFC_SESS_DBG(lport, "port_id %x found %p\n",
+ port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
- pr_debug("port_id %x not found\n", port_id);
+ TFC_SESS_DBG(lport, "port_id %x not found, %s\n",
+ port_id, reason);
return NULL;
}
@@ -195,7 +205,7 @@ static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
struct ft_tport *tport = sess->tport;
struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
- pr_debug("port_id %x sess %p\n", sess->port_id, sess);
+ TFC_SESS_DBG(tport->lport, "port_id %x sess %p\n", sess->port_id, sess);
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
@@ -223,7 +233,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess)
- return NULL;
+ return ERR_PTR(-ENOMEM);
kref_init(&sess->kref); /* ref for table entry */
sess->tport = tport;
@@ -234,8 +244,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
TARGET_PROT_NORMAL, &initiatorname[0],
sess, ft_sess_alloc_cb);
if (IS_ERR(sess->se_sess)) {
+ int rc = PTR_ERR(sess->se_sess);
kfree(sess);
- return NULL;
+ sess = ERR_PTR(rc);
}
return sess;
}
@@ -319,7 +330,7 @@ void ft_sess_close(struct se_session *se_sess)
mutex_unlock(&ft_lport_lock);
return;
}
- pr_debug("port_id %x\n", port_id);
+ TFC_SESS_DBG(sess->tport->lport, "port_id %x close session\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
ft_close_sess(sess);
@@ -379,8 +390,13 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
if (!(fcp_parm & FCP_SPPF_INIT_FCN))
return FC_SPP_RESP_CONF;
sess = ft_sess_create(tport, rdata->ids.port_id, rdata);
- if (!sess)
- return FC_SPP_RESP_RES;
+ if (IS_ERR(sess)) {
+ if (PTR_ERR(sess) == -EACCES) {
+ spp->spp_flags &= ~FC_SPP_EST_IMG_PAIR;
+ return FC_SPP_RESP_CONF;
+ } else
+ return FC_SPP_RESP_RES;
+ }
if (!sess->params)
rdata->prli_count++;
sess->params = fcp_parm;
@@ -423,8 +439,8 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
- pr_debug("port_id %x flags %x ret %x\n",
- rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
+ TFC_SESS_DBG(rdata->local_port, "port_id %x flags %x ret %x\n",
+ rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
@@ -477,11 +493,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
- pr_debug("sid %x\n", sid);
+ TFC_SESS_DBG(lport, "recv sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
- pr_debug("sid %x sess lookup failed\n", sid);
+ TFC_SESS_DBG(lport, "sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;
diff --git a/drivers/thermal/intel_pch_thermal.c b/drivers/thermal/intel_pch_thermal.c
index 9b4815e81b0d..19bf2028e508 100644
--- a/drivers/thermal/intel_pch_thermal.c
+++ b/drivers/thermal/intel_pch_thermal.c
@@ -20,10 +20,13 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
#include <linux/thermal.h>
#include <linux/pm.h>
/* Intel PCH thermal Device IDs */
+#define PCH_THERMAL_DID_HSW_1 0x9C24 /* Haswell PCH */
+#define PCH_THERMAL_DID_HSW_2 0x8C24 /* Haswell PCH */
#define PCH_THERMAL_DID_WPT 0x9CA4 /* Wildcat Point */
#define PCH_THERMAL_DID_SKL 0x9D31 /* Skylake PCH */
@@ -66,9 +69,53 @@ struct pch_thermal_device {
unsigned long crt_temp;
int hot_trip_id;
unsigned long hot_temp;
+ int psv_trip_id;
+ unsigned long psv_temp;
bool bios_enabled;
};
+#ifdef CONFIG_ACPI
+
+/*
+ * On some platforms, there is a companion ACPI device, which adds
+ * passive trip temperature using _PSV method. There is no specific
+ * passive temperature setting in MMIO interface of this PCI device.
+ */
+static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+ int *nr_trips)
+{
+ struct acpi_device *adev;
+
+ ptd->psv_trip_id = -1;
+
+ adev = ACPI_COMPANION(&ptd->pdev->dev);
+ if (adev) {
+ unsigned long long r;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle, "_PSV", NULL,
+ &r);
+ if (ACPI_SUCCESS(status)) {
+ unsigned long trip_temp;
+
+ trip_temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+ if (trip_temp) {
+ ptd->psv_temp = trip_temp;
+ ptd->psv_trip_id = *nr_trips;
+ ++(*nr_trips);
+ }
+ }
+ }
+}
+#else
+static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
+ int *nr_trips)
+{
+ ptd->psv_trip_id = -1;
+
+}
+#endif
+
static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
{
u8 tsel;
@@ -119,6 +166,8 @@ read_trips:
++(*nr_trips);
}
+ pch_wpt_add_acpi_psv_trip(ptd, nr_trips);
+
return 0;
}
@@ -194,6 +243,8 @@ static int pch_get_trip_type(struct thermal_zone_device *tzd, int trip,
*type = THERMAL_TRIP_CRITICAL;
else if (ptd->hot_trip_id == trip)
*type = THERMAL_TRIP_HOT;
+ else if (ptd->psv_trip_id == trip)
+ *type = THERMAL_TRIP_PASSIVE;
else
return -EINVAL;
@@ -208,6 +259,8 @@ static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *tem
*temp = ptd->crt_temp;
else if (ptd->hot_trip_id == trip)
*temp = ptd->hot_temp;
+ else if (ptd->psv_trip_id == trip)
+ *temp = ptd->psv_temp;
else
return -EINVAL;
@@ -242,6 +295,11 @@ static int intel_pch_thermal_probe(struct pci_dev *pdev,
ptd->ops = &pch_dev_ops_wpt;
dev_name = "pch_skylake";
break;
+ case PCH_THERMAL_DID_HSW_1:
+ case PCH_THERMAL_DID_HSW_2:
+ ptd->ops = &pch_dev_ops_wpt;
+ dev_name = "pch_haswell";
+ break;
default:
dev_err(&pdev->dev, "unknown pch thermal device\n");
return -ENODEV;
@@ -324,6 +382,8 @@ static int intel_pch_thermal_resume(struct device *device)
static struct pci_device_id intel_pch_thermal_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 0e4dc0afcfd2..7a223074df3d 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -669,20 +669,10 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
.set_cur_state = powerclamp_set_cur_state,
};
-static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
-
static int __init powerclamp_probe(void)
{
- if (!x86_match_cpu(intel_powerclamp_ids)) {
- pr_err("Intel powerclamp does not run on family %d model %d\n",
- boot_cpu_data.x86, boot_cpu_data.x86_model);
+ if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+ pr_err("CPU does not support MWAIT");
return -ENODEV;
}
diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c
index 886fcf37f291..b9923464599f 100644
--- a/drivers/tty/serial/8250/8250_lpss.c
+++ b/drivers/tty/serial/8250/8250_lpss.c
@@ -213,7 +213,7 @@ static int qrk_serial_setup(struct lpss8250 *lpss, struct uart_port *port)
struct pci_dev *pdev = to_pci_dev(port->dev);
int ret;
- ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 1bfb6fdbaa20..1731b98d2471 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -83,7 +83,8 @@ static const struct serial8250_config uart_config[] = {
.name = "16550A",
.fifo_size = 16,
.tx_loadsz = 16,
- .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO,
},
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index b8d9c8c9d02a..417d9e7038e1 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -99,7 +99,7 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
case UART_LCR:
valshift = UNIPHIER_UART_LCR_SHIFT;
/* Divisor latch access bit does not exist. */
- value &= ~(UART_LCR_DLAB << valshift);
+ value &= ~UART_LCR_DLAB;
/* fall through */
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
@@ -199,7 +199,7 @@ static int uniphier_uart_probe(struct platform_device *pdev)
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
- dev_err(dev, "failed to get memory resource");
+ dev_err(dev, "failed to get memory resource\n");
return -EINVAL;
}
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index c7831407a882..25c1d7bc0100 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1625,6 +1625,7 @@ config SERIAL_SPRD_CONSOLE
config SERIAL_STM32
tristate "STMicroelectronics STM32 serial port support"
select SERIAL_CORE
+ depends on HAS_DMA
depends on ARM || COMPILE_TEST
help
This driver is for the on-chip Serial Controller on
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index fd8aa1f4ba78..168b10cad47b 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2132,11 +2132,29 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
mode |= ATMEL_US_USMODE_RS485;
} else if (termios->c_cflag & CRTSCTS) {
/* RS232 with hardware handshake (RTS/CTS) */
- if (atmel_use_dma_rx(port) && !atmel_use_fifo(port)) {
- dev_info(port->dev, "not enabling hardware flow control because DMA is used");
- termios->c_cflag &= ~CRTSCTS;
- } else {
+ if (atmel_use_fifo(port) &&
+ !mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) {
+ /*
+ * with ATMEL_US_USMODE_HWHS set, the controller will
+ * be able to drive the RTS pin high/low when the RX
+ * FIFO is above RXFTHRES/below RXFTHRES2.
+ * It will also disable the transmitter when the CTS
+ * pin is high.
+ * This mode is not activated if CTS pin is a GPIO
+ * because in this case, the transmitter is always
+ * disabled (there must be an internal pull-up
+ * responsible for this behaviour).
+ * If the RTS pin is a GPIO, the controller won't be
+ * able to drive it according to the FIFO thresholds,
+ * but it will be handled by the driver.
+ */
mode |= ATMEL_US_USMODE_HWHS;
+ } else {
+ /*
+ * For platforms without FIFO, the flow control is
+ * handled by the driver.
+ */
+ mode |= ATMEL_US_USMODE_NORMAL;
}
} else {
/* RS232 without hadware handshake */
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index de9d5107c00a..76103f2c4a80 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -328,7 +328,7 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
sport->dma_tx_bytes = uart_circ_chars_pending(xmit);
- if (xmit->tail < xmit->head) {
+ if (xmit->tail < xmit->head || xmit->head == 0) {
sport->dma_tx_nents = 1;
sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes);
} else {
@@ -359,7 +359,6 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
sport->dma_tx_in_progress = true;
sport->dma_tx_cookie = dmaengine_submit(sport->dma_tx_desc);
dma_async_issue_pending(sport->dma_tx_chan);
-
}
static void lpuart_dma_tx_complete(void *arg)
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index d391650b82e7..42caccb5e87e 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -419,6 +419,7 @@ static struct dmi_system_id pch_uart_dmi_table[] = {
},
(void *)MINNOW_UARTCLK,
},
+ { }
};
/* Return UART clock, checking for board specific clocks. */
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 2675792a8f59..fb0672554123 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1130,9 +1130,13 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
{
struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
+ u8 state = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG);
- sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset),
- val ? BIT(offset) : 0);
+ if (val)
+ state |= BIT(offset);
+ else
+ state &= ~BIT(offset);
+ sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset),
BIT(offset));
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 6e4f63627479..f2303f390345 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -111,7 +111,7 @@ void uart_write_wakeup(struct uart_port *port)
* closed. No cookie for you.
*/
BUG_ON(!state);
- tty_wakeup(state->port.tty);
+ tty_port_tty_wakeup(&state->port);
}
static void uart_stop(struct tty_struct *tty)
@@ -632,7 +632,7 @@ static void uart_flush_buffer(struct tty_struct *tty)
if (port->ops->flush_buffer)
port->ops->flush_buffer(port);
uart_port_unlock(port, flags);
- tty_wakeup(tty);
+ tty_port_tty_wakeup(&state->port);
}
/*
@@ -2746,8 +2746,6 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
uport->cons = drv->cons;
uport->minor = drv->tty_driver->minor_start + uport->line;
- port->console = uart_console(uport);
-
/*
* If this port is a console, then the spinlock is already
* initialised.
@@ -2761,6 +2759,8 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
uart_configure_port(drv, state, uport);
+ port->console = uart_console(uport);
+
num_groups = 2;
if (uport->attr_group)
num_groups++;
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index 41d974923102..cd97ceb76e4f 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -31,7 +31,7 @@ struct stm32_usart_info {
struct stm32_usart_config cfg;
};
-#define UNDEF_REG ~0
+#define UNDEF_REG 0xff
/* Register offsets */
struct stm32_usart_info stm32f4_info = {
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index f37edaa5ac75..dd4c02fa4820 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1200,6 +1200,7 @@ static int __init cdns_early_console_setup(struct earlycon_device *device,
OF_EARLYCON_DECLARE(cdns, "xlnx,xuartps", cdns_early_console_setup);
OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p8", cdns_early_console_setup);
OF_EARLYCON_DECLARE(cdns, "cdns,uart-r1p12", cdns_early_console_setup);
+OF_EARLYCON_DECLARE(cdns, "xlnx,zynqmp-uart", cdns_early_console_setup);
/**
* cdns_uart_console_write - perform write operation
@@ -1438,6 +1439,7 @@ static const struct of_device_id cdns_uart_of_match[] = {
{ .compatible = "xlnx,xuartps", },
{ .compatible = "cdns,uart-r1p8", },
{ .compatible = "cdns,uart-r1p12", .data = &zynqmp_uart_def },
+ { .compatible = "xlnx,zynqmp-uart", .data = &zynqmp_uart_def },
{}
};
MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 06fb39c1d6dd..8c3bf3d613c0 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -870,10 +870,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
+ if (new_screen_size > (4 << 20))
+ return -EINVAL;
newscreen = kmalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;
+ if (vc == sel_cons)
+ clear_selection();
+
old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row;
@@ -1176,7 +1181,7 @@ static void csi_J(struct vc_data *vc, int vpar)
break;
case 3: /* erase scroll-back buffer (and whole display) */
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
- vc->vc_screenbuf_size >> 1);
+ vc->vc_screenbuf_size);
set_origin(vc);
if (con_is_visible(vc))
update_screen(vc);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 96ae69502c86..111b0e0b8698 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -188,6 +188,8 @@ static void host_stop(struct ci_hdrc *ci)
if (hcd) {
usb_remove_hcd(hcd);
+ ci->role = CI_ROLE_END;
+ synchronize_irq(ci->irq);
usb_put_hcd(hcd);
if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) &&
(ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON))
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index fa9b26b91507..4c0fa0b17353 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -463,9 +463,18 @@ static void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg)
*/
void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
{
+ bool ret;
+
switch (hsotg->dr_mode) {
case USB_DR_MODE_HOST:
- dwc2_force_mode(hsotg, true);
+ ret = dwc2_force_mode(hsotg, true);
+ /*
+ * NOTE: This is required for some rockchip soc based
+ * platforms on their host-only dwc2.
+ */
+ if (!ret)
+ msleep(50);
+
break;
case USB_DR_MODE_PERIPHERAL:
dwc2_force_mode(hsotg, false);
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index aad4107ef927..2a21a0414b1d 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -259,6 +259,13 @@ enum dwc2_lx_state {
DWC2_L3, /* Off state */
};
+/*
+ * Gadget periodic tx fifo sizes as used by legacy driver
+ * EP0 is not included
+ */
+#define DWC2_G_P_LEGACY_TX_FIFO_SIZE {256, 256, 256, 256, 768, 768, 768, \
+ 768, 0, 0, 0, 0, 0, 0, 0}
+
/* Gadget ep0 states */
enum dwc2_ep0_state {
DWC2_EP0_SETUP,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 4cd6403a7566..24fbebc9b409 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -186,10 +186,9 @@ static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg,
*/
static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
{
- unsigned int fifo;
+ unsigned int ep;
unsigned int addr;
int timeout;
- u32 dptxfsizn;
u32 val;
/* Reset fifo map if not correctly cleared during previous session */
@@ -217,16 +216,16 @@ static void dwc2_hsotg_init_fifo(struct dwc2_hsotg *hsotg)
* them to endpoints dynamically according to maxpacket size value of
* given endpoint.
*/
- for (fifo = 1; fifo < MAX_EPS_CHANNELS; fifo++) {
- dptxfsizn = dwc2_readl(hsotg->regs + DPTXFSIZN(fifo));
-
- val = (dptxfsizn & FIFOSIZE_DEPTH_MASK) | addr;
- addr += dptxfsizn >> FIFOSIZE_DEPTH_SHIFT;
-
- if (addr > hsotg->fifo_mem)
- break;
+ for (ep = 1; ep < MAX_EPS_CHANNELS; ep++) {
+ if (!hsotg->g_tx_fifo_sz[ep])
+ continue;
+ val = addr;
+ val |= hsotg->g_tx_fifo_sz[ep] << FIFOSIZE_DEPTH_SHIFT;
+ WARN_ONCE(addr + hsotg->g_tx_fifo_sz[ep] > hsotg->fifo_mem,
+ "insufficient fifo memory");
+ addr += hsotg->g_tx_fifo_sz[ep];
- dwc2_writel(val, hsotg->regs + DPTXFSIZN(fifo));
+ dwc2_writel(val, hsotg->regs + DPTXFSIZN(ep));
}
/*
@@ -3807,10 +3806,36 @@ static void dwc2_hsotg_dump(struct dwc2_hsotg *hsotg)
static void dwc2_hsotg_of_probe(struct dwc2_hsotg *hsotg)
{
struct device_node *np = hsotg->dev->of_node;
+ u32 len = 0;
+ u32 i = 0;
/* Enable dma if requested in device tree */
hsotg->g_using_dma = of_property_read_bool(np, "g-use-dma");
+ /*
+ * Register TX periodic fifo size per endpoint.
+ * EP0 is excluded since it has no fifo configuration.
+ */
+ if (!of_find_property(np, "g-tx-fifo-size", &len))
+ goto rx_fifo;
+
+ len /= sizeof(u32);
+
+ /* Read tx fifo sizes other than ep0 */
+ if (of_property_read_u32_array(np, "g-tx-fifo-size",
+ &hsotg->g_tx_fifo_sz[1], len))
+ goto rx_fifo;
+
+ /* Add ep0 */
+ len++;
+
+ /* Make remaining TX fifos unavailable */
+ if (len < MAX_EPS_CHANNELS) {
+ for (i = len; i < MAX_EPS_CHANNELS; i++)
+ hsotg->g_tx_fifo_sz[i] = 0;
+ }
+
+rx_fifo:
/* Register RX fifo size */
of_property_read_u32(np, "g-rx-fifo-size", &hsotg->g_rx_fifo_sz);
@@ -3832,10 +3857,13 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
struct device *dev = hsotg->dev;
int epnum;
int ret;
+ int i;
+ u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE;
/* Initialize to legacy fifo configuration values */
hsotg->g_rx_fifo_sz = 2048;
hsotg->g_np_g_tx_fifo_sz = 1024;
+ memcpy(&hsotg->g_tx_fifo_sz[1], p_tx_fifo, sizeof(p_tx_fifo));
/* Device tree specific probe */
dwc2_hsotg_of_probe(hsotg);
@@ -3853,6 +3881,9 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
dev_dbg(dev, "NonPeriodic TXFIFO size: %d\n",
hsotg->g_np_g_tx_fifo_sz);
dev_dbg(dev, "RXFIFO size: %d\n", hsotg->g_rx_fifo_sz);
+ for (i = 0; i < MAX_EPS_CHANNELS; i++)
+ dev_dbg(dev, "Periodic TXFIFO%2d size: %d\n", i,
+ hsotg->g_tx_fifo_sz[i]);
hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 07cc8929f271..1dfa56a5f1c5 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -783,6 +783,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
req->trb = trb;
req->trb_dma = dwc3_trb_dma_offset(dep, trb);
req->first_trb_index = dep->trb_enqueue;
+ dep->queued_requests++;
}
dwc3_ep_inc_enq(dep);
@@ -833,8 +834,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
trb->ctrl |= DWC3_TRB_CTRL_HWO;
- dep->queued_requests++;
-
trace_dwc3_prepare_trb(dep, trb);
}
@@ -1074,9 +1073,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
list_add_tail(&req->list, &dep->pending_list);
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- dep->flags & DWC3_EP_PENDING_REQUEST) {
- if (list_empty(&dep->started_list)) {
+ /*
+ * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
+ * wait for a XferNotReady event so we will know what's the current
+ * (micro-)frame number.
+ *
+ * Without this trick, we are very, very likely gonna get Bus Expiry
+ * errors which will force us issue EndTransfer command.
+ */
+ if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
+ if ((dep->flags & DWC3_EP_PENDING_REQUEST) &&
+ list_empty(&dep->started_list)) {
dwc3_stop_active_transfer(dwc, dep->number, true);
dep->flags = DWC3_EP_ENABLED;
}
@@ -1861,8 +1868,11 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
unsigned int s_pkt = 0;
unsigned int trb_status;
- dep->queued_requests--;
dwc3_ep_inc_deq(dep);
+
+ if (req->trb == trb)
+ dep->queued_requests--;
+
trace_dwc3_complete_trb(dep, trb);
/*
@@ -2980,7 +2990,7 @@ err3:
kfree(dwc->setup_buf);
err2:
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
err1:
@@ -3005,7 +3015,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
kfree(dwc->setup_buf);
kfree(dwc->zlp_buf);
- dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+ dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
dwc->ep0_trb, dwc->ep0_trb_addr);
dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 54ad100af35b..e40d47d47d82 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -136,8 +136,60 @@ struct ffs_epfile {
/*
* Buffer for holding data from partial reads which may happen since
* we’re rounding user read requests to a multiple of a max packet size.
+ *
+ * The pointer is initialised with NULL value and may be set by
+ * __ffs_epfile_read_data function to point to a temporary buffer.
+ *
+ * In normal operation, calls to __ffs_epfile_read_buffered will consume
+ * data from said buffer and eventually free it. Importantly, while the
+ * function is using the buffer, it sets the pointer to NULL. This is
+ * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
+ * can never run concurrently (they are synchronised by epfile->mutex)
+ * so the latter will not assign a new value to the pointer.
+ *
+ * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
+ * valid) and sets the pointer to READ_BUFFER_DROP value. This special
+ * value is crux of the synchronisation between ffs_func_eps_disable and
+ * __ffs_epfile_read_data.
+ *
+ * Once __ffs_epfile_read_data is about to finish it will try to set the
+ * pointer back to its old value (as described above), but seeing as the
+ * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
+ * the buffer.
+ *
+ * == State transitions ==
+ *
+ * • ptr == NULL: (initial state)
+ * â—¦ __ffs_epfile_read_buffer_free: go to ptr == DROP
+ * â—¦ __ffs_epfile_read_buffered: nop
+ * â—¦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
+ * ◦ reading finishes: n/a, not in ‘and reading’ state
+ * • ptr == DROP:
+ * â—¦ __ffs_epfile_read_buffer_free: nop
+ * â—¦ __ffs_epfile_read_buffered: go to ptr == NULL
+ * â—¦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
+ * ◦ reading finishes: n/a, not in ‘and reading’ state
+ * • ptr == buf:
+ * â—¦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
+ * â—¦ __ffs_epfile_read_buffered: go to ptr == NULL and reading
+ * â—¦ __ffs_epfile_read_data: n/a, __ffs_epfile_read_buffered
+ * is always called first
+ * ◦ reading finishes: n/a, not in ‘and reading’ state
+ * • ptr == NULL and reading:
+ * â—¦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
+ * â—¦ __ffs_epfile_read_buffered: n/a, mutex is held
+ * â—¦ __ffs_epfile_read_data: n/a, mutex is held
+ * ◦ reading finishes and …
+ * … all data read: free buf, go to ptr == NULL
+ * … otherwise: go to ptr == buf and reading
+ * • ptr == DROP and reading:
+ * â—¦ __ffs_epfile_read_buffer_free: nop
+ * â—¦ __ffs_epfile_read_buffered: n/a, mutex is held
+ * â—¦ __ffs_epfile_read_data: n/a, mutex is held
+ * â—¦ reading finishes: free buf, go to ptr == DROP
*/
- struct ffs_buffer *read_buffer; /* P: epfile->mutex */
+ struct ffs_buffer *read_buffer;
+#define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
char name[5];
@@ -736,25 +788,47 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
schedule_work(&io_data->work);
}
+static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
+{
+ /*
+ * See comment in struct ffs_epfile for full read_buffer pointer
+ * synchronisation story.
+ */
+ struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP);
+ if (buf && buf != READ_BUFFER_DROP)
+ kfree(buf);
+}
+
/* Assumes epfile->mutex is held. */
static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
struct iov_iter *iter)
{
- struct ffs_buffer *buf = epfile->read_buffer;
+ /*
+ * Null out epfile->read_buffer so ffs_func_eps_disable does not free
+ * the buffer while we are using it. See comment in struct ffs_epfile
+ * for full read_buffer pointer synchronisation story.
+ */
+ struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);
ssize_t ret;
- if (!buf)
+ if (!buf || buf == READ_BUFFER_DROP)
return 0;
ret = copy_to_iter(buf->data, buf->length, iter);
if (buf->length == ret) {
kfree(buf);
- epfile->read_buffer = NULL;
- } else if (unlikely(iov_iter_count(iter))) {
+ return ret;
+ }
+
+ if (unlikely(iov_iter_count(iter))) {
ret = -EFAULT;
} else {
buf->length -= ret;
buf->data += ret;
}
+
+ if (cmpxchg(&epfile->read_buffer, NULL, buf))
+ kfree(buf);
+
return ret;
}
@@ -783,7 +857,15 @@ static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
buf->length = data_len;
buf->data = buf->storage;
memcpy(buf->storage, data + ret, data_len);
- epfile->read_buffer = buf;
+
+ /*
+ * At this point read_buffer is NULL or READ_BUFFER_DROP (if
+ * ffs_func_eps_disable has been called in the meanwhile). See comment
+ * in struct ffs_epfile for full read_buffer pointer synchronisation
+ * story.
+ */
+ if (unlikely(cmpxchg(&epfile->read_buffer, NULL, buf)))
+ kfree(buf);
return ret;
}
@@ -1097,8 +1179,7 @@ ffs_epfile_release(struct inode *inode, struct file *file)
ENTER();
- kfree(epfile->read_buffer);
- epfile->read_buffer = NULL;
+ __ffs_epfile_read_buffer_free(epfile);
ffs_data_closed(epfile->ffs);
return 0;
@@ -1724,24 +1805,20 @@ static void ffs_func_eps_disable(struct ffs_function *func)
unsigned count = func->ffs->eps_count;
unsigned long flags;
+ spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
- if (epfile)
- mutex_lock(&epfile->mutex);
- spin_lock_irqsave(&func->ffs->eps_lock, flags);
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
++ep;
- spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
if (epfile) {
epfile->ep = NULL;
- kfree(epfile->read_buffer);
- epfile->read_buffer = NULL;
- mutex_unlock(&epfile->mutex);
+ __ffs_epfile_read_buffer_free(epfile);
++epfile;
}
} while (--count);
+ spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
static int ffs_func_eps_enable(struct ffs_function *func)
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 39a6df1e2ded..686067dd8d2c 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -581,8 +581,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
/* throttle high/super speed IRQ rate back slightly */
if (gadget_is_dualspeed(dev->gadget))
- req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
- dev->gadget->speed == USB_SPEED_SUPER)
+ req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
+ dev->gadget->speed == USB_SPEED_SUPER)) &&
+ !list_empty(&dev->tx_reqs))
? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
: 0;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index bb1f6c8f0f01..45bc997d0711 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
goto err;
}
- ep->ep.name = name;
+ ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 1e5f529d51a2..063064801ceb 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1308,11 +1308,6 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_mv_driver
#endif
-#ifdef CONFIG_MIPS_SEAD3
-#include "ehci-sead3.c"
-#define PLATFORM_DRIVER ehci_hcd_sead3_driver
-#endif
-
static int __init ehci_hcd_init(void)
{
int retval = 0;
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 876dca4fc216..a268d9e8d6cf 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -39,7 +39,7 @@
#define DRIVER_DESC "EHCI generic platform driver"
#define EHCI_MAX_CLKS 4
-#define EHCI_MAX_RSTS 3
+#define EHCI_MAX_RSTS 4
#define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
struct ehci_platform_priv {
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
deleted file mode 100644
index 3d86cc2ffe68..000000000000
--- a/drivers/usb/host/ehci-sead3.c
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * MIPS CI13320A EHCI Host Controller driver
- * Based on "ehci-au1xxx.c" by K.Boge <karsten.boge@amd.com>
- *
- * Copyright (C) 2012 MIPS Technologies, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/err.h>
-#include <linux/platform_device.h>
-
-static int ehci_sead3_setup(struct usb_hcd *hcd)
-{
- int ret;
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-
- ehci->caps = hcd->regs + 0x100;
-
-#ifdef __BIG_ENDIAN
- ehci->big_endian_mmio = 1;
- ehci->big_endian_desc = 1;
-#endif
-
- ret = ehci_setup(hcd);
- if (ret)
- return ret;
-
- ehci->need_io_watchdog = 0;
-
- /* Set burst length to 16 words. */
- ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]);
-
- return ret;
-}
-
-const struct hc_driver ehci_sead3_hc_driver = {
- .description = hcd_name,
- .product_desc = "SEAD-3 EHCI",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
-
- /*
- * basic lifecycle operations
- *
- */
- .reset = ehci_sead3_setup,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
- .relinquish_port = ehci_relinquish_port,
- .port_handed_over = ehci_port_handed_over,
-
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-};
-
-static int ehci_hcd_sead3_drv_probe(struct platform_device *pdev)
-{
- struct usb_hcd *hcd;
- struct resource *res;
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
- if (pdev->resource[1].flags != IORESOURCE_IRQ) {
- pr_debug("resource[1] is not IORESOURCE_IRQ");
- return -ENOMEM;
- }
- hcd = usb_create_hcd(&ehci_sead3_hc_driver, &pdev->dev, "SEAD-3");
- if (!hcd)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hcd->regs)) {
- ret = PTR_ERR(hcd->regs);
- goto err1;
- }
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
- /* Root hub has integrated TT. */
- hcd->has_tt = 1;
-
- ret = usb_add_hcd(hcd, pdev->resource[1].start,
- IRQF_SHARED);
- if (ret == 0) {
- platform_set_drvdata(pdev, hcd);
- device_wakeup_enable(hcd->self.controller);
- return ret;
- }
-
-err1:
- usb_put_hcd(hcd);
- return ret;
-}
-
-static int ehci_hcd_sead3_drv_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- usb_remove_hcd(hcd);
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ehci_hcd_sead3_drv_suspend(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
- bool do_wakeup = device_may_wakeup(dev);
-
- return ehci_suspend(hcd, do_wakeup);
-}
-
-static int ehci_hcd_sead3_drv_resume(struct device *dev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(dev);
-
- ehci_resume(hcd, false);
- return 0;
-}
-
-static const struct dev_pm_ops sead3_ehci_pmops = {
- .suspend = ehci_hcd_sead3_drv_suspend,
- .resume = ehci_hcd_sead3_drv_resume,
-};
-
-#define SEAD3_EHCI_PMOPS (&sead3_ehci_pmops)
-
-#else
-#define SEAD3_EHCI_PMOPS NULL
-#endif
-
-static struct platform_driver ehci_hcd_sead3_driver = {
- .probe = ehci_hcd_sead3_drv_probe,
- .remove = ehci_hcd_sead3_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .driver = {
- .name = "sead3-ehci",
- .pm = SEAD3_EHCI_PMOPS,
- }
-};
-
-MODULE_ALIAS("platform:sead3-ehci");
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 5b5880c0ae19..b38a228134df 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -221,6 +221,12 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
ohci->num_ports = board->ports;
at91_start_hc(pdev);
+ /*
+ * The RemoteWakeupConnected bit has to be set explicitly
+ * before calling ohci_run. The reset value of this bit is 0.
+ */
+ ohci->hc_control = OHCI_CTRL_RWC;
+
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval == 0) {
device_wakeup_enable(hcd->self.controller);
@@ -677,9 +683,6 @@ ohci_hcd_at91_drv_suspend(struct device *dev)
* REVISIT: some boards will be able to turn VBUS off...
*/
if (!ohci_at91->wakeup) {
- ohci->hc_control = ohci_readl(ohci, &ohci->regs->control);
- ohci->hc_control &= OHCI_CTRL_RWC;
- ohci_writel(ohci, ohci->hc_control, &ohci->regs->control);
ohci->rh_state = OHCI_RH_HALTED;
/* flush the writes */
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 1700908b84ef..86612ac3fda2 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -72,7 +72,7 @@
static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
-#define IO_WATCHDOG_DELAY msecs_to_jiffies(250)
+#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
#include "ohci.h"
#include "pci-quirks.h"
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 730b9fd26685..0ef16900efed 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1166,7 +1166,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
@@ -1355,6 +1355,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
return 0;
}
+/*
+ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
+ * warm reset a USB3 device stuck in polling or compliance mode after resume.
+ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
+ */
+static bool xhci_port_missing_cas_quirk(int port_index,
+ __le32 __iomem **port_array)
+{
+ u32 portsc;
+
+ portsc = readl(port_array[port_index]);
+
+ /* if any of these are set we are not stuck */
+ if (portsc & (PORT_CONNECT | PORT_CAS))
+ return false;
+
+ if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
+ ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
+ return false;
+
+ /* clear wakeup/change bits, and do a warm port reset */
+ portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
+ portsc |= PORT_WR;
+ writel(portsc, port_array[port_index]);
+ /* flush write */
+ readl(port_array[port_index]);
+ return true;
+}
+
int xhci_bus_resume(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -1392,6 +1421,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
u32 temp;
temp = readl(port_array[port_index]);
+
+ /* warm reset CAS limited ports stuck in polling/compliance */
+ if ((xhci->quirks & XHCI_MISSING_CAS) &&
+ (hcd->speed >= HCD_USB3) &&
+ xhci_port_missing_cas_quirk(port_index, port_array)) {
+ xhci_dbg(xhci, "reset stuck port %d\n", port_index);
+ continue;
+ }
if (DEV_SUPERSPEED_ANY(temp))
temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
else
@@ -1410,7 +1447,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
if (need_usb2_u3_exit) {
spin_unlock_irqrestore(&xhci->lock, flags);
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index d7b0f97abbad..e96ae80d107e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -45,11 +45,13 @@
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
+#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
static const char hcd_name[] = "xhci_hcd";
@@ -153,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
xhci->quirks |= XHCI_SPURIOUS_REBOOT;
xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
}
@@ -169,6 +172,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
}
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+ xhci->quirks |= XHCI_MISSING_CAS;
+
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_EJ168) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index b2c1dc5dc0f3..f945380035d0 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -314,6 +314,8 @@ struct xhci_op_regs {
#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
#define XDEV_INACTIVE (0x6 << 5)
+#define XDEV_POLLING (0x7 << 5)
+#define XDEV_COMP_MODE (0xa << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
#define PORT_POWER (1 << 9)
@@ -1653,6 +1655,7 @@ struct xhci_hcd {
#define XHCI_MTK_HOST (1 << 21)
#define XHCI_SSIC_PORT_UNUSED (1 << 22)
#define XHCI_NO_64BIT_SUPPORT (1 << 23)
+#define XHCI_MISSING_CAS (1 << 24)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index bff4869a57cd..4042ea017985 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1255,6 +1255,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
map_dma_buffer(request, musb, musb_ep);
+ pm_runtime_get_sync(musb->controller);
spin_lock_irqsave(&musb->lock, lockflags);
/* don't queue if the ep is down */
@@ -1275,6 +1276,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
unlock:
spin_unlock_irqrestore(&musb->lock, lockflags);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+
return status;
}
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 1ab6973d4f61..cc1225485509 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -287,6 +287,7 @@ static int omap2430_musb_init(struct musb *musb)
}
musb->isr = omap2430_musb_interrupt;
phy_init(musb->phy);
+ phy_power_on(musb->phy);
l = musb_readl(musb->mregs, OTG_INTERFSEL);
@@ -323,8 +324,6 @@ static void omap2430_musb_enable(struct musb *musb)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
- if (!WARN_ON(!musb->phy))
- phy_power_on(musb->phy);
switch (glue->status) {
@@ -361,9 +360,6 @@ static void omap2430_musb_disable(struct musb *musb)
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- if (!WARN_ON(!musb->phy))
- phy_power_off(musb->phy);
-
if (glue->status != MUSB_UNKNOWN)
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
@@ -375,6 +371,7 @@ static int omap2430_musb_exit(struct musb *musb)
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
omap2430_low_level_exit(musb);
+ phy_power_off(musb->phy);
phy_exit(musb->phy);
musb->phy = NULL;
cancel_work_sync(&glue->omap_musb_mailbox_work);
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index 1d70add926f0..d544b331c9f2 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/delay.h>
#include <linux/io.h>
#include "common.h"
#include "rcar3.h"
@@ -35,10 +36,13 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
- if (enable)
+ if (enable) {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
- else
+ /* The controller on R-Car Gen3 needs to wait up to 45 usec */
+ udelay(45);
+ } else {
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
+ }
return 0;
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 54a4de0efdba..f61477bed3a8 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -1077,7 +1077,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
u8 control;
int result;
- cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
+ result = cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
+ if (result)
+ return result;
result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b2d767e743fc..0ff7f38d7800 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
/* ekey Devices */
{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
/* Infineon Devices */
- { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
/* GE Healthcare devices */
{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
/* Active Research (Actisense) devices */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f87a938cf005..21011c0a4c64 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -626,8 +626,9 @@
/*
* Infineon Technologies
*/
-#define INFINEON_VID 0x058b
-#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_VID 0x058b
+#define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
/*
* Acton Research Corp.
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index d213cf44a7e4..4a037b4a79cf 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1078,7 +1078,8 @@ static int usb_serial_probe(struct usb_interface *interface,
serial->disconnected = 0;
- usb_serial_console_init(serial->port[0]->minor);
+ if (num_ports > 0)
+ usb_serial_console_init(serial->port[0]->minor);
exit:
module_put(type->driver.owner);
return 0;
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 79b2b628066d..79451f7ef1b7 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -133,6 +133,13 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
bo[itr] = bi1[itr] ^ bi2[itr];
}
+/* Scratch space for MAC calculations. */
+struct wusb_mac_scratch {
+ struct aes_ccm_b0 b0;
+ struct aes_ccm_b1 b1;
+ struct aes_ccm_a ax;
+};
+
/*
* CC-MAC function WUSB1.0[6.5]
*
@@ -197,16 +204,15 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
* what sg[4] is for. Maybe there is a smarter way to do this.
*/
static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
- struct crypto_cipher *tfm_aes, void *mic,
+ struct crypto_cipher *tfm_aes,
+ struct wusb_mac_scratch *scratch,
+ void *mic,
const struct aes_ccm_nonce *n,
const struct aes_ccm_label *a, const void *b,
size_t blen)
{
int result = 0;
SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
- struct aes_ccm_b0 b0;
- struct aes_ccm_b1 b1;
- struct aes_ccm_a ax;
struct scatterlist sg[4], sg_dst;
void *dst_buf;
size_t dst_size;
@@ -218,16 +224,17 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
* These checks should be compile time optimized out
* ensure @a fills b1's mac_header and following fields
*/
- WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
- WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
- WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
- WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(*a) != sizeof(scratch->b1) - sizeof(scratch->b1.la));
+ WARN_ON(sizeof(scratch->b0) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(scratch->b1) != sizeof(struct aes_ccm_block));
+ WARN_ON(sizeof(scratch->ax) != sizeof(struct aes_ccm_block));
result = -ENOMEM;
zero_padding = blen % sizeof(struct aes_ccm_block);
if (zero_padding)
zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
- dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
+ dst_size = blen + sizeof(scratch->b0) + sizeof(scratch->b1) +
+ zero_padding;
dst_buf = kzalloc(dst_size, GFP_KERNEL);
if (!dst_buf)
goto error_dst_buf;
@@ -235,9 +242,9 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
memset(iv, 0, sizeof(iv));
/* Setup B0 */
- b0.flags = 0x59; /* Format B0 */
- b0.ccm_nonce = *n;
- b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
+ scratch->b0.flags = 0x59; /* Format B0 */
+ scratch->b0.ccm_nonce = *n;
+ scratch->b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
/* Setup B1
*
@@ -246,12 +253,12 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
* 14'--after clarification, it means to use A's contents
* for MAC Header, EO, sec reserved and padding.
*/
- b1.la = cpu_to_be16(blen + 14);
- memcpy(&b1.mac_header, a, sizeof(*a));
+ scratch->b1.la = cpu_to_be16(blen + 14);
+ memcpy(&scratch->b1.mac_header, a, sizeof(*a));
sg_init_table(sg, ARRAY_SIZE(sg));
- sg_set_buf(&sg[0], &b0, sizeof(b0));
- sg_set_buf(&sg[1], &b1, sizeof(b1));
+ sg_set_buf(&sg[0], &scratch->b0, sizeof(scratch->b0));
+ sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1));
sg_set_buf(&sg[2], b, blen);
/* 0 if well behaved :) */
sg_set_buf(&sg[3], bzero, zero_padding);
@@ -276,11 +283,12 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
* POS Crypto API: size is assumed to be AES's block size.
* Thanks for documenting it -- tip taken from airo.c
*/
- ax.flags = 0x01; /* as per WUSB 1.0 spec */
- ax.ccm_nonce = *n;
- ax.counter = 0;
- crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
- bytewise_xor(mic, &ax, iv, 8);
+ scratch->ax.flags = 0x01; /* as per WUSB 1.0 spec */
+ scratch->ax.ccm_nonce = *n;
+ scratch->ax.counter = 0;
+ crypto_cipher_encrypt_one(tfm_aes, (void *)&scratch->ax,
+ (void *)&scratch->ax);
+ bytewise_xor(mic, &scratch->ax, iv, 8);
result = 8;
error_cbc_crypt:
kfree(dst_buf);
@@ -303,6 +311,7 @@ ssize_t wusb_prf(void *out, size_t out_size,
struct aes_ccm_nonce n = *_n;
struct crypto_skcipher *tfm_cbc;
struct crypto_cipher *tfm_aes;
+ struct wusb_mac_scratch *scratch;
u64 sfn = 0;
__le64 sfn_le;
@@ -329,17 +338,25 @@ ssize_t wusb_prf(void *out, size_t out_size,
printk(KERN_ERR "E: can't set AES key: %d\n", (int)result);
goto error_setkey_aes;
}
+ scratch = kmalloc(sizeof(*scratch), GFP_KERNEL);
+ if (!scratch) {
+ result = -ENOMEM;
+ goto error_alloc_scratch;
+ }
for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
sfn_le = cpu_to_le64(sfn++);
memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */
- result = wusb_ccm_mac(tfm_cbc, tfm_aes, out + bytes,
+ result = wusb_ccm_mac(tfm_cbc, tfm_aes, scratch, out + bytes,
&n, a, b, blen);
if (result < 0)
goto error_ccm_mac;
bytes += result;
}
result = bytes;
+
+ kfree(scratch);
+error_alloc_scratch:
error_ccm_mac:
error_setkey_aes:
crypto_free_cipher(tfm_aes);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index af2f117208f1..5d3b0db5ce0a 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2187,7 +2187,7 @@ config FB_GOLDFISH
config FB_COBALT
tristate "Cobalt server LCD frame buffer support"
- depends on FB && (MIPS_COBALT || MIPS_SEAD3)
+ depends on FB && MIPS_COBALT
config FB_SH7760
bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index 07675d6f323e..2d3b691f3fc4 100644
--- a/drivers/video/fbdev/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
@@ -63,7 +63,6 @@
#define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK)
#define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE)
-#ifdef CONFIG_MIPS_COBALT
static inline void lcd_write_control(struct fb_info *info, u8 control)
{
writel((u32)control << 24, info->screen_base);
@@ -83,47 +82,6 @@ static inline u8 lcd_read_data(struct fb_info *info)
{
return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
}
-#else
-
-#define LCD_CTL 0x00
-#define LCD_DATA 0x08
-#define CPLD_STATUS 0x10
-#define CPLD_DATA 0x18
-
-static inline void cpld_wait(struct fb_info *info)
-{
- do {
- } while (readl(info->screen_base + CPLD_STATUS) & 1);
-}
-
-static inline void lcd_write_control(struct fb_info *info, u8 control)
-{
- cpld_wait(info);
- writel(control, info->screen_base + LCD_CTL);
-}
-
-static inline u8 lcd_read_control(struct fb_info *info)
-{
- cpld_wait(info);
- readl(info->screen_base + LCD_CTL);
- cpld_wait(info);
- return readl(info->screen_base + CPLD_DATA) & 0xff;
-}
-
-static inline void lcd_write_data(struct fb_info *info, u8 data)
-{
- cpld_wait(info);
- writel(data, info->screen_base + LCD_DATA);
-}
-
-static inline u8 lcd_read_data(struct fb_info *info)
-{
- cpld_wait(info);
- readl(info->screen_base + LCD_DATA);
- cpld_wait(info);
- return readl(info->screen_base + CPLD_DATA) & 0xff;
-}
-#endif
static int lcd_busy_wait(struct fb_info *info)
{
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 3b1ca4411073..a2564ab91e62 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,8 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
if (!pages)
return -ENOMEM;
- ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE,
- 0, pages);
+ ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages,
+ FOLL_WRITE);
if (ret < nr_pages) {
nr_pages = ret;
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 60bdad3a689b..150ce2abf6c8 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -245,8 +245,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
/* Get the physical addresses of the source buffer */
down_read(&current->mm->mmap_sem);
num_pinned = get_user_pages(param.local_vaddr - lb_offset,
- num_pages, (param.source == -1) ? READ : WRITE,
- 0, pages, NULL);
+ num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
+ pages, NULL);
up_read(&current->mm->mmap_sem);
if (num_pinned != num_pages) {
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 15b64076bc26..bdbadaa47ef3 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -156,12 +156,16 @@ size_t vme_get_size(struct vme_resource *resource)
case VME_MASTER:
retval = vme_master_get(resource, &enabled, &base, &size,
&aspace, &cycle, &dwidth);
+ if (retval)
+ return 0;
return size;
break;
case VME_SLAVE:
retval = vme_slave_get(resource, &enabled, &base, &size,
&buf_base, &aspace, &cycle);
+ if (retval)
+ return 0;
return size;
break;
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index e473e3b23720..6d1fbda0f461 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -499,6 +499,10 @@ static int wdat_wdt_resume_noirq(struct device *dev)
ret = wdat_wdt_enable_reboot(wdat);
if (ret)
return ret;
+
+ ret = wdat_wdt_ping(&wdat->wdd);
+ if (ret)
+ return ret;
}
return wdat_wdt_start(&wdat->wdd);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index e12bd3635f83..26e5e8507f03 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -168,7 +168,9 @@ out:
#endif /* CONFIG_HIBERNATE_CALLBACKS */
struct shutdown_handler {
- const char *command;
+#define SHUTDOWN_CMD_SIZE 11
+ const char command[SHUTDOWN_CMD_SIZE];
+ bool flag;
void (*cb)(void);
};
@@ -206,22 +208,22 @@ static void do_reboot(void)
ctrl_alt_del();
}
+static struct shutdown_handler shutdown_handlers[] = {
+ { "poweroff", true, do_poweroff },
+ { "halt", false, do_poweroff },
+ { "reboot", true, do_reboot },
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+ { "suspend", true, do_suspend },
+#endif
+};
+
static void shutdown_handler(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
char *str;
struct xenbus_transaction xbt;
int err;
- static struct shutdown_handler handlers[] = {
- { "poweroff", do_poweroff },
- { "halt", do_poweroff },
- { "reboot", do_reboot },
-#ifdef CONFIG_HIBERNATE_CALLBACKS
- { "suspend", do_suspend },
-#endif
- {NULL, NULL},
- };
- static struct shutdown_handler *handler;
+ int idx;
if (shutting_down != SHUTDOWN_INVALID)
return;
@@ -238,13 +240,13 @@ static void shutdown_handler(struct xenbus_watch *watch,
return;
}
- for (handler = &handlers[0]; handler->command; handler++) {
- if (strcmp(str, handler->command) == 0)
+ for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) {
+ if (strcmp(str, shutdown_handlers[idx].command) == 0)
break;
}
/* Only acknowledge commands which we are prepared to handle. */
- if (handler->cb)
+ if (idx < ARRAY_SIZE(shutdown_handlers))
xenbus_write(xbt, "control", "shutdown", "");
err = xenbus_transaction_end(xbt, 0);
@@ -253,8 +255,8 @@ static void shutdown_handler(struct xenbus_watch *watch,
goto again;
}
- if (handler->cb) {
- handler->cb();
+ if (idx < ARRAY_SIZE(shutdown_handlers)) {
+ shutdown_handlers[idx].cb();
} else {
pr_info("Ignoring shutdown request: %s\n", str);
shutting_down = SHUTDOWN_INVALID;
@@ -310,6 +312,9 @@ static struct notifier_block xen_reboot_nb = {
static int setup_shutdown_watcher(void)
{
int err;
+ int idx;
+#define FEATURE_PATH_SIZE (SHUTDOWN_CMD_SIZE + sizeof("feature-"))
+ char node[FEATURE_PATH_SIZE];
err = register_xenbus_watch(&shutdown_watch);
if (err) {
@@ -326,6 +331,14 @@ static int setup_shutdown_watcher(void)
}
#endif
+ for (idx = 0; idx < ARRAY_SIZE(shutdown_handlers); idx++) {
+ if (!shutdown_handlers[idx].flag)
+ continue;
+ snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
+ shutdown_handlers[idx].command);
+ xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+ }
+
return 0;
}
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index c1010f018bd8..1e8be12ebb55 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -364,7 +364,7 @@ out:
static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
{
- struct watch_adapter *watch, *tmp_watch;
+ struct watch_adapter *watch;
char *path, *token;
int err, rc;
LIST_HEAD(staging_q);
@@ -399,7 +399,7 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
}
list_add(&watch->list, &u->watches);
} else {
- list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
+ list_for_each_entry(watch, &u->watches, list) {
if (!strcmp(watch->token, token) &&
!strcmp(watch->watch.node, path)) {
unregister_xenbus_watch(&watch->watch);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 611a23119675..6d40a972ffb2 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -335,7 +335,9 @@ static int backend_state;
static void xenbus_reset_backend_state_changed(struct xenbus_watch *w,
const char **v, unsigned int l)
{
- xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i", &backend_state);
+ if (xenbus_scanf(XBT_NIL, v[XS_WATCH_PATH], "", "%i",
+ &backend_state) != 1)
+ backend_state = XenbusStateUnknown;
printk(KERN_DEBUG "XENBUS: backend %s %s\n",
v[XS_WATCH_PATH], xenbus_strstate(backend_state));
wake_up(&backend_state_wq);
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 2037e7a77a37..d764236072b1 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -91,11 +91,9 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
*/
bool afs_cm_incoming_call(struct afs_call *call)
{
- u32 operation_id = ntohl(call->operation_ID);
+ _enter("{CB.OP %u}", call->operation_ID);
- _enter("{CB.OP %u}", operation_id);
-
- switch (operation_id) {
+ switch (call->operation_ID) {
case CBCallBack:
call->type = &afs_SRXCBCallBack;
return true;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 96f4d764d1a6..31c616ab9b40 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -364,7 +364,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
buffer = kmap(page);
ret = afs_extract_data(call, buffer,
call->count, true);
- kunmap(buffer);
+ kunmap(page);
if (ret < 0)
return ret;
}
@@ -397,7 +397,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
page = call->reply3;
buffer = kmap(page);
memset(buffer + call->count, 0, PAGE_SIZE - call->count);
- kunmap(buffer);
+ kunmap(page);
}
_leave(" = 0 [done]");
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5497c8496055..535a38d2c1d0 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -112,7 +112,7 @@ struct afs_call {
bool need_attention; /* T if RxRPC poked us */
u16 service_id; /* RxRPC service ID to call */
__be16 port; /* target UDP port */
- __be32 operation_ID; /* operation ID for an incoming call */
+ u32 operation_ID; /* operation ID for an incoming call */
u32 count; /* count for use in unmarshalling */
__be32 tmp; /* place to extract temporary data */
afs_dataversion_t store_version; /* updated version expected from store */
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 477928b25940..25f05a8d21b1 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -676,10 +676,11 @@ static int afs_deliver_cm_op_id(struct afs_call *call)
ASSERTCMP(call->offset, <, 4);
/* the operation ID forms the first four bytes of the request data */
- ret = afs_extract_data(call, &call->operation_ID, 4, true);
+ ret = afs_extract_data(call, &call->tmp, 4, true);
if (ret < 0)
return ret;
+ call->operation_ID = ntohl(call->tmp);
call->state = AFS_CALL_AWAIT_REQUEST;
call->offset = 0;
diff --git a/fs/befs/befs.h b/fs/befs/befs.h
index e0f59263a96d..c6bad51d8ec7 100644
--- a/fs/befs/befs.h
+++ b/fs/befs/befs.h
@@ -43,7 +43,10 @@ struct befs_sb_info {
u32 ag_shift;
u32 num_ags;
- /* jornal log entry */
+ /* State of the superblock */
+ u32 flags;
+
+ /* Journal log entry */
befs_block_run log_blocks;
befs_off_t log_start;
befs_off_t log_end;
@@ -79,7 +82,7 @@ enum befs_err {
BEFS_BT_END,
BEFS_BT_EMPTY,
BEFS_BT_MATCH,
- BEFS_BT_PARMATCH,
+ BEFS_BT_OVERFLOW,
BEFS_BT_NOT_FOUND
};
@@ -140,18 +143,6 @@ befs_iaddrs_per_block(struct super_block *sb)
return BEFS_SB(sb)->block_size / sizeof (befs_disk_inode_addr);
}
-static inline int
-befs_iaddr_is_empty(const befs_inode_addr *iaddr)
-{
- return (!iaddr->allocation_group) && (!iaddr->start) && (!iaddr->len);
-}
-
-static inline size_t
-befs_brun_size(struct super_block *sb, befs_block_run run)
-{
- return BEFS_SB(sb)->block_size * run.len;
-}
-
#include "endian.h"
#endif /* _LINUX_BEFS_H */
diff --git a/fs/befs/btree.c b/fs/befs/btree.c
index 307645f9e284..7e135ea73fdd 100644
--- a/fs/befs/btree.c
+++ b/fs/befs/btree.c
@@ -85,7 +85,7 @@ struct befs_btree_node {
};
/* local constants */
-static const befs_off_t befs_bt_inval = 0xffffffffffffffffULL;
+static const befs_off_t BEFS_BT_INVAL = 0xffffffffffffffffULL;
/* local functions */
static int befs_btree_seekleaf(struct super_block *sb, const befs_data_stream *ds,
@@ -156,8 +156,6 @@ befs_bt_read_super(struct super_block *sb, const befs_data_stream *ds,
sup->max_depth = fs32_to_cpu(sb, od_sup->max_depth);
sup->data_type = fs32_to_cpu(sb, od_sup->data_type);
sup->root_node_ptr = fs64_to_cpu(sb, od_sup->root_node_ptr);
- sup->free_node_ptr = fs64_to_cpu(sb, od_sup->free_node_ptr);
- sup->max_size = fs64_to_cpu(sb, od_sup->max_size);
brelse(bh);
if (sup->magic != BEFS_BTREE_MAGIC) {
@@ -183,8 +181,8 @@ befs_bt_read_super(struct super_block *sb, const befs_data_stream *ds,
* Calls befs_read_datastream to read in the indicated btree node and
* makes sure its header fields are in cpu byteorder, byteswapping if
* necessary.
- * Note: node->bh must be NULL when this function called first
- * time. Don't forget brelse(node->bh) after last call.
+ * Note: node->bh must be NULL when this function is called the first time.
+ * Don't forget brelse(node->bh) after last call.
*
* On success, returns BEFS_OK and *@node contains the btree node that
* starts at @node_off, with the node->head fields in cpu byte order.
@@ -244,7 +242,7 @@ befs_bt_read_node(struct super_block *sb, const befs_data_stream *ds,
* Read the superblock and rootnode of the b+tree.
* Drill down through the interior nodes using befs_find_key().
* Once at the correct leaf node, use befs_find_key() again to get the
- * actuall value stored with the key.
+ * actual value stored with the key.
*/
int
befs_btree_find(struct super_block *sb, const befs_data_stream *ds,
@@ -283,9 +281,9 @@ befs_btree_find(struct super_block *sb, const befs_data_stream *ds,
while (!befs_leafnode(this_node)) {
res = befs_find_key(sb, this_node, key, &node_off);
- if (res == BEFS_BT_NOT_FOUND)
+ /* if no key set, try the overflow node */
+ if (res == BEFS_BT_OVERFLOW)
node_off = this_node->head.overflow;
- /* if no match, go to overflow node */
if (befs_bt_read_node(sb, ds, this_node, node_off) != BEFS_OK) {
befs_error(sb, "befs_btree_find() failed to read "
"node at %llu", node_off);
@@ -293,15 +291,15 @@ befs_btree_find(struct super_block *sb, const befs_data_stream *ds,
}
}
- /* at the correct leaf node now */
-
+ /* at a leaf node now, check if it is correct */
res = befs_find_key(sb, this_node, key, value);
brelse(this_node->bh);
kfree(this_node);
if (res != BEFS_BT_MATCH) {
- befs_debug(sb, "<--- %s Key %s not found", __func__, key);
+ befs_error(sb, "<--- %s Key %s not found", __func__, key);
+ befs_debug(sb, "<--- %s ERROR", __func__);
*value = 0;
return BEFS_BT_NOT_FOUND;
}
@@ -324,16 +322,12 @@ befs_btree_find(struct super_block *sb, const befs_data_stream *ds,
* @findkey: Keystring to search for
* @value: If key is found, the value stored with the key is put here
*
- * finds exact match if one exists, and returns BEFS_BT_MATCH
- * If no exact match, finds first key in node that is greater
- * (alphabetically) than the search key and returns BEFS_BT_PARMATCH
- * (for partial match, I guess). Can you think of something better to
- * call it?
- *
- * If no key was a match or greater than the search key, return
- * BEFS_BT_NOT_FOUND.
+ * Finds exact match if one exists, and returns BEFS_BT_MATCH.
+ * If there is no match and node's value array is too small for key, return
+ * BEFS_BT_OVERFLOW.
+ * If no match and node should countain this key, return BEFS_BT_NOT_FOUND.
*
- * Use binary search instead of a linear.
+ * Uses binary search instead of a linear.
*/
static int
befs_find_key(struct super_block *sb, struct befs_btree_node *node,
@@ -348,18 +342,16 @@ befs_find_key(struct super_block *sb, struct befs_btree_node *node,
befs_debug(sb, "---> %s %s", __func__, findkey);
- *value = 0;
-
findkey_len = strlen(findkey);
- /* if node can not contain key, just skeep this node */
+ /* if node can not contain key, just skip this node */
last = node->head.all_key_count - 1;
thiskey = befs_bt_get_key(sb, node, last, &keylen);
eq = befs_compare_strings(thiskey, keylen, findkey, findkey_len);
if (eq < 0) {
- befs_debug(sb, "<--- %s %s not found", __func__, findkey);
- return BEFS_BT_NOT_FOUND;
+ befs_debug(sb, "<--- node can't contain %s", findkey);
+ return BEFS_BT_OVERFLOW;
}
valarray = befs_bt_valarray(node);
@@ -387,12 +379,15 @@ befs_find_key(struct super_block *sb, struct befs_btree_node *node,
else
first = mid + 1;
}
+
+ /* return an existing value so caller can arrive to a leaf node */
if (eq < 0)
*value = fs64_to_cpu(sb, valarray[mid + 1]);
else
*value = fs64_to_cpu(sb, valarray[mid]);
- befs_debug(sb, "<--- %s found %s at %d", __func__, thiskey, mid);
- return BEFS_BT_PARMATCH;
+ befs_error(sb, "<--- %s %s not found", __func__, findkey);
+ befs_debug(sb, "<--- %s ERROR", __func__);
+ return BEFS_BT_NOT_FOUND;
}
/**
@@ -405,7 +400,7 @@ befs_find_key(struct super_block *sb, struct befs_btree_node *node,
* @keysize: Length of the returned key
* @value: Value stored with the returned key
*
- * Heres how it works: Key_no is the index of the key/value pair to
+ * Here's how it works: Key_no is the index of the key/value pair to
* return in keybuf/value.
* Bufsize is the size of keybuf (BEFS_NAME_LEN+1 is a good size). Keysize is
* the number of characters in the key (just a convenience).
@@ -422,7 +417,7 @@ befs_btree_read(struct super_block *sb, const befs_data_stream *ds,
{
struct befs_btree_node *this_node;
befs_btree_super bt_super;
- befs_off_t node_off = 0;
+ befs_off_t node_off;
int cur_key;
fs64 *valarray;
char *keystart;
@@ -467,7 +462,7 @@ befs_btree_read(struct super_block *sb, const befs_data_stream *ds,
while (key_sum + this_node->head.all_key_count <= key_no) {
/* no more nodes to look in: key_no is too large */
- if (this_node->head.right == befs_bt_inval) {
+ if (this_node->head.right == BEFS_BT_INVAL) {
*keysize = 0;
*value = 0;
befs_debug(sb,
@@ -541,7 +536,6 @@ befs_btree_read(struct super_block *sb, const befs_data_stream *ds,
* @node_off: Pointer to offset of current node within datastream. Modified
* by the function.
*
- *
* Helper function for btree traverse. Moves the current position to the
* start of the first leaf node.
*
@@ -608,7 +602,7 @@ static int
befs_leafnode(struct befs_btree_node *node)
{
/* all interior nodes (and only interior nodes) have an overflow node */
- if (node->head.overflow == befs_bt_inval)
+ if (node->head.overflow == BEFS_BT_INVAL)
return 1;
else
return 0;
@@ -715,7 +709,7 @@ befs_bt_get_key(struct super_block *sb, struct befs_btree_node *node,
*
* Returns 0 if @key1 and @key2 are equal.
* Returns >0 if @key1 is greater.
- * Returns <0 if @key2 is greater..
+ * Returns <0 if @key2 is greater.
*/
static int
befs_compare_strings(const void *key1, int keylen1,
diff --git a/fs/befs/datastream.c b/fs/befs/datastream.c
index af1bc19b7c85..b4c7ba013c0d 100644
--- a/fs/befs/datastream.c
+++ b/fs/befs/datastream.c
@@ -22,22 +22,22 @@ const befs_inode_addr BAD_IADDR = { 0, 0, 0 };
static int befs_find_brun_direct(struct super_block *sb,
const befs_data_stream *data,
- befs_blocknr_t blockno, befs_block_run * run);
+ befs_blocknr_t blockno, befs_block_run *run);
static int befs_find_brun_indirect(struct super_block *sb,
const befs_data_stream *data,
befs_blocknr_t blockno,
- befs_block_run * run);
+ befs_block_run *run);
static int befs_find_brun_dblindirect(struct super_block *sb,
const befs_data_stream *data,
befs_blocknr_t blockno,
- befs_block_run * run);
+ befs_block_run *run);
/**
* befs_read_datastream - get buffer_head containing data, starting from pos.
* @sb: Filesystem superblock
- * @ds: datastrem to find data with
+ * @ds: datastream to find data with
* @pos: start of data
* @off: offset of data in buffer_head->b_data
*
@@ -46,7 +46,7 @@ static int befs_find_brun_dblindirect(struct super_block *sb,
*/
struct buffer_head *
befs_read_datastream(struct super_block *sb, const befs_data_stream *ds,
- befs_off_t pos, uint * off)
+ befs_off_t pos, uint *off)
{
struct buffer_head *bh;
befs_block_run run;
@@ -75,7 +75,13 @@ befs_read_datastream(struct super_block *sb, const befs_data_stream *ds,
return bh;
}
-/*
+/**
+ * befs_fblock2brun - give back block run for fblock
+ * @sb: the superblock
+ * @data: datastream to read from
+ * @fblock: the blocknumber with the file position to find
+ * @run: The found run is passed back through this pointer
+ *
* Takes a file position and gives back a brun who's starting block
* is block number fblock of the file.
*
@@ -88,7 +94,7 @@ befs_read_datastream(struct super_block *sb, const befs_data_stream *ds,
*/
int
befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
- befs_blocknr_t fblock, befs_block_run * run)
+ befs_blocknr_t fblock, befs_block_run *run)
{
int err;
befs_off_t pos = fblock << BEFS_SB(sb)->block_shift;
@@ -115,7 +121,7 @@ befs_fblock2brun(struct super_block *sb, const befs_data_stream *data,
/**
* befs_read_lsmylink - read long symlink from datastream.
* @sb: Filesystem superblock
- * @ds: Datastrem to read from
+ * @ds: Datastream to read from
* @buff: Buffer in which to place long symlink data
* @len: Length of the long symlink in bytes
*
@@ -128,6 +134,7 @@ befs_read_lsymlink(struct super_block *sb, const befs_data_stream *ds,
befs_off_t bytes_read = 0; /* bytes readed */
u16 plen;
struct buffer_head *bh;
+
befs_debug(sb, "---> %s length: %llu", __func__, len);
while (bytes_read < len) {
@@ -183,13 +190,13 @@ befs_count_blocks(struct super_block *sb, const befs_data_stream *ds)
metablocks += ds->indirect.len;
/*
- Double indir block, plus all the indirect blocks it mapps
- In the double-indirect range, all block runs of data are
- BEFS_DBLINDIR_BRUN_LEN blocks long. Therefore, we know
- how many data block runs are in the double-indirect region,
- and from that we know how many indirect blocks it takes to
- map them. We assume that the indirect blocks are also
- BEFS_DBLINDIR_BRUN_LEN blocks long.
+ * Double indir block, plus all the indirect blocks it maps.
+ * In the double-indirect range, all block runs of data are
+ * BEFS_DBLINDIR_BRUN_LEN blocks long. Therefore, we know
+ * how many data block runs are in the double-indirect region,
+ * and from that we know how many indirect blocks it takes to
+ * map them. We assume that the indirect blocks are also
+ * BEFS_DBLINDIR_BRUN_LEN blocks long.
*/
if (ds->size > ds->max_indirect_range && ds->max_indirect_range != 0) {
uint dbl_bytes;
@@ -212,58 +219,50 @@ befs_count_blocks(struct super_block *sb, const befs_data_stream *ds)
return blocks;
}
-/*
- Finds the block run that starts at file block number blockno
- in the file represented by the datastream data, if that
- blockno is in the direct region of the datastream.
-
- sb: the superblock
- data: the datastream
- blockno: the blocknumber to find
- run: The found run is passed back through this pointer
-
- Return value is BEFS_OK if the blockrun is found, BEFS_ERR
- otherwise.
-
- Algorithm:
- Linear search. Checks each element of array[] to see if it
- contains the blockno-th filesystem block. This is necessary
- because the block runs map variable amounts of data. Simply
- keeps a count of the number of blocks searched so far (sum),
- incrementing this by the length of each block run as we come
- across it. Adds sum to *count before returning (this is so
- you can search multiple arrays that are logicaly one array,
- as in the indirect region code).
-
- When/if blockno is found, if blockno is inside of a block
- run as stored on disk, we offset the start and length members
- of the block run, so that blockno is the start and len is
- still valid (the run ends in the same place).
-
- 2001-11-15 Will Dyson
-*/
+/**
+ * befs_find_brun_direct - find a direct block run in the datastream
+ * @sb: the superblock
+ * @data: the datastream
+ * @blockno: the blocknumber to find
+ * @run: The found run is passed back through this pointer
+ *
+ * Finds the block run that starts at file block number blockno
+ * in the file represented by the datastream data, if that
+ * blockno is in the direct region of the datastream.
+ *
+ * Return value is BEFS_OK if the blockrun is found, BEFS_ERR
+ * otherwise.
+ *
+ * Algorithm:
+ * Linear search. Checks each element of array[] to see if it
+ * contains the blockno-th filesystem block. This is necessary
+ * because the block runs map variable amounts of data. Simply
+ * keeps a count of the number of blocks searched so far (sum),
+ * incrementing this by the length of each block run as we come
+ * across it. Adds sum to *count before returning (this is so
+ * you can search multiple arrays that are logicaly one array,
+ * as in the indirect region code).
+ *
+ * When/if blockno is found, if blockno is inside of a block
+ * run as stored on disk, we offset the start and length members
+ * of the block run, so that blockno is the start and len is
+ * still valid (the run ends in the same place).
+ */
static int
befs_find_brun_direct(struct super_block *sb, const befs_data_stream *data,
- befs_blocknr_t blockno, befs_block_run * run)
+ befs_blocknr_t blockno, befs_block_run *run)
{
int i;
const befs_block_run *array = data->direct;
befs_blocknr_t sum;
- befs_blocknr_t max_block =
- data->max_direct_range >> BEFS_SB(sb)->block_shift;
befs_debug(sb, "---> %s, find %lu", __func__, (unsigned long)blockno);
- if (blockno > max_block) {
- befs_error(sb, "%s passed block outside of direct region",
- __func__);
- return BEFS_ERR;
- }
-
for (i = 0, sum = 0; i < BEFS_NUM_DIRECT_BLOCKS;
sum += array[i].len, i++) {
if (blockno >= sum && blockno < sum + (array[i].len)) {
int offset = blockno - sum;
+
run->allocation_group = array[i].allocation_group;
run->start = array[i].start + offset;
run->len = array[i].len - offset;
@@ -275,38 +274,39 @@ befs_find_brun_direct(struct super_block *sb, const befs_data_stream *data,
}
}
+ befs_error(sb, "%s failed to find file block %lu", __func__,
+ (unsigned long)blockno);
befs_debug(sb, "---> %s ERROR", __func__);
return BEFS_ERR;
}
-/*
- Finds the block run that starts at file block number blockno
- in the file represented by the datastream data, if that
- blockno is in the indirect region of the datastream.
-
- sb: the superblock
- data: the datastream
- blockno: the blocknumber to find
- run: The found run is passed back through this pointer
-
- Return value is BEFS_OK if the blockrun is found, BEFS_ERR
- otherwise.
-
- Algorithm:
- For each block in the indirect run of the datastream, read
- it in and search through it for search_blk.
-
- XXX:
- Really should check to make sure blockno is inside indirect
- region.
-
- 2001-11-15 Will Dyson
-*/
+/**
+ * befs_find_brun_indirect - find a block run in the datastream
+ * @sb: the superblock
+ * @data: the datastream
+ * @blockno: the blocknumber to find
+ * @run: The found run is passed back through this pointer
+ *
+ * Finds the block run that starts at file block number blockno
+ * in the file represented by the datastream data, if that
+ * blockno is in the indirect region of the datastream.
+ *
+ * Return value is BEFS_OK if the blockrun is found, BEFS_ERR
+ * otherwise.
+ *
+ * Algorithm:
+ * For each block in the indirect run of the datastream, read
+ * it in and search through it for search_blk.
+ *
+ * XXX:
+ * Really should check to make sure blockno is inside indirect
+ * region.
+ */
static int
befs_find_brun_indirect(struct super_block *sb,
const befs_data_stream *data,
befs_blocknr_t blockno,
- befs_block_run * run)
+ befs_block_run *run)
{
int i, j;
befs_blocknr_t sum = 0;
@@ -326,11 +326,12 @@ befs_find_brun_indirect(struct super_block *sb,
/* Examine blocks of the indirect run one at a time */
for (i = 0; i < indirect.len; i++) {
- indirblock = befs_bread(sb, indirblockno + i);
+ indirblock = sb_bread(sb, indirblockno + i);
if (indirblock == NULL) {
- befs_debug(sb, "---> %s failed to read "
+ befs_error(sb, "---> %s failed to read "
"disk block %lu from the indirect brun",
__func__, (unsigned long)indirblockno + i);
+ befs_debug(sb, "<--- %s ERROR", __func__);
return BEFS_ERR;
}
@@ -370,52 +371,51 @@ befs_find_brun_indirect(struct super_block *sb,
return BEFS_ERR;
}
-/*
- Finds the block run that starts at file block number blockno
- in the file represented by the datastream data, if that
- blockno is in the double-indirect region of the datastream.
-
- sb: the superblock
- data: the datastream
- blockno: the blocknumber to find
- run: The found run is passed back through this pointer
-
- Return value is BEFS_OK if the blockrun is found, BEFS_ERR
- otherwise.
-
- Algorithm:
- The block runs in the double-indirect region are different.
- They are always allocated 4 fs blocks at a time, so each
- block run maps a constant amount of file data. This means
- that we can directly calculate how many block runs into the
- double-indirect region we need to go to get to the one that
- maps a particular filesystem block.
-
- We do this in two stages. First we calculate which of the
- inode addresses in the double-indirect block will point us
- to the indirect block that contains the mapping for the data,
- then we calculate which of the inode addresses in that
- indirect block maps the data block we are after.
-
- Oh, and once we've done that, we actually read in the blocks
- that contain the inode addresses we calculated above. Even
- though the double-indirect run may be several blocks long,
- we can calculate which of those blocks will contain the index
- we are after and only read that one. We then follow it to
- the indirect block and perform a similar process to find
- the actual block run that maps the data block we are interested
- in.
-
- Then we offset the run as in befs_find_brun_array() and we are
- done.
-
- 2001-11-15 Will Dyson
-*/
+/**
+ * befs_find_brun_dblindirect - find a block run in the datastream
+ * @sb: the superblock
+ * @data: the datastream
+ * @blockno: the blocknumber to find
+ * @run: The found run is passed back through this pointer
+ *
+ * Finds the block run that starts at file block number blockno
+ * in the file represented by the datastream data, if that
+ * blockno is in the double-indirect region of the datastream.
+ *
+ * Return value is BEFS_OK if the blockrun is found, BEFS_ERR
+ * otherwise.
+ *
+ * Algorithm:
+ * The block runs in the double-indirect region are different.
+ * They are always allocated 4 fs blocks at a time, so each
+ * block run maps a constant amount of file data. This means
+ * that we can directly calculate how many block runs into the
+ * double-indirect region we need to go to get to the one that
+ * maps a particular filesystem block.
+ *
+ * We do this in two stages. First we calculate which of the
+ * inode addresses in the double-indirect block will point us
+ * to the indirect block that contains the mapping for the data,
+ * then we calculate which of the inode addresses in that
+ * indirect block maps the data block we are after.
+ *
+ * Oh, and once we've done that, we actually read in the blocks
+ * that contain the inode addresses we calculated above. Even
+ * though the double-indirect run may be several blocks long,
+ * we can calculate which of those blocks will contain the index
+ * we are after and only read that one. We then follow it to
+ * the indirect block and perform a similar process to find
+ * the actual block run that maps the data block we are interested
+ * in.
+ *
+ * Then we offset the run as in befs_find_brun_array() and we are
+ * done.
+ */
static int
befs_find_brun_dblindirect(struct super_block *sb,
const befs_data_stream *data,
befs_blocknr_t blockno,
- befs_block_run * run)
+ befs_block_run *run)
{
int dblindir_indx;
int indir_indx;
@@ -430,10 +430,9 @@ befs_find_brun_dblindirect(struct super_block *sb,
struct buffer_head *indir_block;
befs_block_run indir_run;
befs_disk_inode_addr *iaddr_array;
- struct befs_sb_info *befs_sb = BEFS_SB(sb);
befs_blocknr_t indir_start_blk =
- data->max_indirect_range >> befs_sb->block_shift;
+ data->max_indirect_range >> BEFS_SB(sb)->block_shift;
off_t dbl_indir_off = blockno - indir_start_blk;
@@ -471,7 +470,7 @@ befs_find_brun_dblindirect(struct super_block *sb,
}
dbl_indir_block =
- befs_bread(sb, iaddr2blockno(sb, &data->double_indirect) +
+ sb_bread(sb, iaddr2blockno(sb, &data->double_indirect) +
dbl_which_block);
if (dbl_indir_block == NULL) {
befs_error(sb, "%s couldn't read the "
@@ -479,7 +478,6 @@ befs_find_brun_dblindirect(struct super_block *sb,
(unsigned long)
iaddr2blockno(sb, &data->double_indirect) +
dbl_which_block);
- brelse(dbl_indir_block);
return BEFS_ERR;
}
@@ -499,12 +497,11 @@ befs_find_brun_dblindirect(struct super_block *sb,
}
indir_block =
- befs_bread(sb, iaddr2blockno(sb, &indir_run) + which_block);
+ sb_bread(sb, iaddr2blockno(sb, &indir_run) + which_block);
if (indir_block == NULL) {
befs_error(sb, "%s couldn't read the indirect block "
"at blockno %lu", __func__, (unsigned long)
iaddr2blockno(sb, &indir_run) + which_block);
- brelse(indir_block);
return BEFS_ERR;
}
diff --git a/fs/befs/debug.c b/fs/befs/debug.c
index 4de7cffcd662..85c13392e9e8 100644
--- a/fs/befs/debug.c
+++ b/fs/befs/debug.c
@@ -169,6 +169,7 @@ befs_dump_super_block(const struct super_block *sb, befs_super_block * sup)
befs_debug(sb, " num_blocks %llu", fs64_to_cpu(sb, sup->num_blocks));
befs_debug(sb, " used_blocks %llu", fs64_to_cpu(sb, sup->used_blocks));
+ befs_debug(sb, " inode_size %u", fs32_to_cpu(sb, sup->inode_size));
befs_debug(sb, " magic2 %08x", fs32_to_cpu(sb, sup->magic2));
befs_debug(sb, " blocks_per_ag %u",
diff --git a/fs/befs/io.c b/fs/befs/io.c
index 523c8af2d770..b4a558126ee1 100644
--- a/fs/befs/io.c
+++ b/fs/befs/io.c
@@ -27,7 +27,7 @@ struct buffer_head *
befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr)
{
struct buffer_head *bh;
- befs_blocknr_t block = 0;
+ befs_blocknr_t block;
struct befs_sb_info *befs_sb = BEFS_SB(sb);
befs_debug(sb, "---> Enter %s "
@@ -59,27 +59,3 @@ befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr)
befs_debug(sb, "<--- %s ERROR", __func__);
return NULL;
}
-
-struct buffer_head *
-befs_bread(struct super_block *sb, befs_blocknr_t block)
-{
- struct buffer_head *bh;
-
- befs_debug(sb, "---> Enter %s %lu", __func__, (unsigned long)block);
-
- bh = sb_bread(sb, block);
-
- if (bh == NULL) {
- befs_error(sb, "Failed to read block %lu",
- (unsigned long)block);
- goto error;
- }
-
- befs_debug(sb, "<--- %s", __func__);
-
- return bh;
-
- error:
- befs_debug(sb, "<--- %s ERROR", __func__);
- return NULL;
-}
diff --git a/fs/befs/io.h b/fs/befs/io.h
index 9b78266b6aa5..78d7bc6e60de 100644
--- a/fs/befs/io.h
+++ b/fs/befs/io.h
@@ -5,5 +5,3 @@
struct buffer_head *befs_bread_iaddr(struct super_block *sb,
befs_inode_addr iaddr);
-struct buffer_head *befs_bread(struct super_block *sb, befs_blocknr_t block);
-
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index bfe9f9994935..647a276eba56 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -120,7 +120,7 @@ befs_get_block(struct inode *inode, sector_t block,
struct super_block *sb = inode->i_sb;
befs_data_stream *ds = &BEFS_I(inode)->i_data.ds;
befs_block_run run = BAD_IADDR;
- int res = 0;
+ int res;
ulong disk_off;
befs_debug(sb, "---> befs_get_block() for inode %lu, block %ld",
@@ -179,15 +179,16 @@ befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
kfree(utfname);
} else {
- ret = befs_btree_find(sb, ds, dentry->d_name.name, &offset);
+ ret = befs_btree_find(sb, ds, name, &offset);
}
if (ret == BEFS_BT_NOT_FOUND) {
befs_debug(sb, "<--- %s %pd not found", __func__, dentry);
+ d_add(dentry, NULL);
return ERR_PTR(-ENOENT);
} else if (ret != BEFS_OK || offset == 0) {
- befs_warning(sb, "<--- %s Error", __func__);
+ befs_error(sb, "<--- %s Error", __func__);
return ERR_PTR(-ENODATA);
}
@@ -211,56 +212,55 @@ befs_readdir(struct file *file, struct dir_context *ctx)
befs_off_t value;
int result;
size_t keysize;
- unsigned char d_type;
char keybuf[BEFS_NAME_LEN + 1];
befs_debug(sb, "---> %s name %pD, inode %ld, ctx->pos %lld",
__func__, file, inode->i_ino, ctx->pos);
-more:
- result = befs_btree_read(sb, ds, ctx->pos, BEFS_NAME_LEN + 1,
- keybuf, &keysize, &value);
+ while (1) {
+ result = befs_btree_read(sb, ds, ctx->pos, BEFS_NAME_LEN + 1,
+ keybuf, &keysize, &value);
- if (result == BEFS_ERR) {
- befs_debug(sb, "<--- %s ERROR", __func__);
- befs_error(sb, "IO error reading %pD (inode %lu)",
- file, inode->i_ino);
- return -EIO;
-
- } else if (result == BEFS_BT_END) {
- befs_debug(sb, "<--- %s END", __func__);
- return 0;
-
- } else if (result == BEFS_BT_EMPTY) {
- befs_debug(sb, "<--- %s Empty directory", __func__);
- return 0;
- }
+ if (result == BEFS_ERR) {
+ befs_debug(sb, "<--- %s ERROR", __func__);
+ befs_error(sb, "IO error reading %pD (inode %lu)",
+ file, inode->i_ino);
+ return -EIO;
- d_type = DT_UNKNOWN;
+ } else if (result == BEFS_BT_END) {
+ befs_debug(sb, "<--- %s END", __func__);
+ return 0;
- /* Convert to NLS */
- if (BEFS_SB(sb)->nls) {
- char *nlsname;
- int nlsnamelen;
- result =
- befs_utf2nls(sb, keybuf, keysize, &nlsname, &nlsnamelen);
- if (result < 0) {
- befs_debug(sb, "<--- %s ERROR", __func__);
- return result;
+ } else if (result == BEFS_BT_EMPTY) {
+ befs_debug(sb, "<--- %s Empty directory", __func__);
+ return 0;
}
- if (!dir_emit(ctx, nlsname, nlsnamelen,
- (ino_t) value, d_type)) {
+
+ /* Convert to NLS */
+ if (BEFS_SB(sb)->nls) {
+ char *nlsname;
+ int nlsnamelen;
+
+ result =
+ befs_utf2nls(sb, keybuf, keysize, &nlsname,
+ &nlsnamelen);
+ if (result < 0) {
+ befs_debug(sb, "<--- %s ERROR", __func__);
+ return result;
+ }
+ if (!dir_emit(ctx, nlsname, nlsnamelen,
+ (ino_t) value, DT_UNKNOWN)) {
+ kfree(nlsname);
+ return 0;
+ }
kfree(nlsname);
- return 0;
+ } else {
+ if (!dir_emit(ctx, keybuf, keysize,
+ (ino_t) value, DT_UNKNOWN))
+ return 0;
}
- kfree(nlsname);
- } else {
- if (!dir_emit(ctx, keybuf, keysize,
- (ino_t) value, d_type))
- return 0;
+ ctx->pos++;
}
- ctx->pos++;
- goto more;
}
static struct inode *
@@ -299,7 +299,6 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
struct befs_sb_info *befs_sb = BEFS_SB(sb);
struct befs_inode_info *befs_ino;
struct inode *inode;
- long ret = -EIO;
befs_debug(sb, "---> %s inode = %lu", __func__, ino);
@@ -318,7 +317,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
befs_ino->i_inode_num.allocation_group,
befs_ino->i_inode_num.start, befs_ino->i_inode_num.len);
- bh = befs_bread(sb, inode->i_ino);
+ bh = sb_bread(sb, inode->i_ino);
if (!bh) {
befs_error(sb, "unable to read inode block - "
"inode = %lu", inode->i_ino);
@@ -421,7 +420,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
unacquire_none:
iget_failed(inode);
befs_debug(sb, "<--- %s - Bad inode", __func__);
- return ERR_PTR(ret);
+ return ERR_PTR(-EIO);
}
/* Initialize the inode cache. Called at fs setup.
@@ -436,10 +435,9 @@ befs_init_inodecache(void)
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
init_once);
- if (befs_inode_cachep == NULL) {
- pr_err("%s: Couldn't initialize inode slabcache\n", __func__);
+ if (befs_inode_cachep == NULL)
return -ENOMEM;
- }
+
return 0;
}
@@ -524,8 +522,6 @@ befs_utf2nls(struct super_block *sb, const char *in,
*out = result = kmalloc(maxlen, GFP_NOFS);
if (!*out) {
- befs_error(sb, "%s cannot allocate memory", __func__);
- *out_len = 0;
return -ENOMEM;
}
@@ -604,7 +600,6 @@ befs_nls2utf(struct super_block *sb, const char *in,
*out = result = kmalloc(maxlen, GFP_NOFS);
if (!*out) {
- befs_error(sb, "%s cannot allocate memory", __func__);
*out_len = 0;
return -ENOMEM;
}
@@ -637,10 +632,6 @@ befs_nls2utf(struct super_block *sb, const char *in,
return -EILSEQ;
}
-/**
- * Use the
- *
- */
enum {
Opt_uid, Opt_gid, Opt_charset, Opt_debug, Opt_err,
};
@@ -760,19 +751,19 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
long ret = -EINVAL;
const unsigned long sb_block = 0;
const off_t x86_sb_off = 512;
+ int blocksize;
save_mount_options(sb, data);
sb->s_fs_info = kzalloc(sizeof(*befs_sb), GFP_KERNEL);
- if (sb->s_fs_info == NULL) {
- pr_err("(%s): Unable to allocate memory for private "
- "portion of superblock. Bailing.\n", sb->s_id);
+ if (sb->s_fs_info == NULL)
goto unacquire_none;
- }
+
befs_sb = BEFS_SB(sb);
if (!parse_options((char *) data, &befs_sb->mount_opts)) {
- befs_error(sb, "cannot parse mount options");
+ if (!silent)
+ befs_error(sb, "cannot parse mount options");
goto unacquire_priv_sbp;
}
@@ -793,10 +784,16 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
* least 1k to get the second 512 bytes of the volume.
* -WD 10-26-01
*/
- sb_min_blocksize(sb, 1024);
+ blocksize = sb_min_blocksize(sb, 1024);
+ if (!blocksize) {
+ if (!silent)
+ befs_error(sb, "unable to set blocksize");
+ goto unacquire_priv_sbp;
+ }
if (!(bh = sb_bread(sb, sb_block))) {
- befs_error(sb, "unable to read superblock");
+ if (!silent)
+ befs_error(sb, "unable to read superblock");
goto unacquire_priv_sbp;
}
@@ -820,9 +817,9 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
brelse(bh);
if( befs_sb->num_blocks > ~((sector_t)0) ) {
- befs_error(sb, "blocks count: %llu "
- "is larger than the host can use",
- befs_sb->num_blocks);
+ if (!silent)
+ befs_error(sb, "blocks count: %llu is larger than the host can use",
+ befs_sb->num_blocks);
goto unacquire_priv_sbp;
}
@@ -841,7 +838,8 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
}
sb->s_root = d_make_root(root);
if (!sb->s_root) {
- befs_error(sb, "get root inode failed");
+ if (!silent)
+ befs_error(sb, "get root inode failed");
goto unacquire_priv_sbp;
}
@@ -870,9 +868,9 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
unacquire_priv_sbp:
kfree(befs_sb->mount_opts.iocharset);
kfree(sb->s_fs_info);
+ sb->s_fs_info = NULL;
unacquire_none:
- sb->s_fs_info = NULL;
return ret;
}
diff --git a/fs/befs/super.c b/fs/befs/super.c
index aeafc4d84278..7c50025c99d8 100644
--- a/fs/befs/super.c
+++ b/fs/befs/super.c
@@ -13,24 +13,20 @@
#include "befs.h"
#include "super.h"
-/**
- * load_befs_sb -- Read from disk and properly byteswap all the fields
+/*
+ * befs_load_sb -- Read from disk and properly byteswap all the fields
* of the befs superblock
- *
- *
- *
- *
*/
int
-befs_load_sb(struct super_block *sb, befs_super_block * disk_sb)
+befs_load_sb(struct super_block *sb, befs_super_block *disk_sb)
{
struct befs_sb_info *befs_sb = BEFS_SB(sb);
/* Check the byte order of the filesystem */
if (disk_sb->fs_byte_order == BEFS_BYTEORDER_NATIVE_LE)
- befs_sb->byte_order = BEFS_BYTESEX_LE;
+ befs_sb->byte_order = BEFS_BYTESEX_LE;
else if (disk_sb->fs_byte_order == BEFS_BYTEORDER_NATIVE_BE)
- befs_sb->byte_order = BEFS_BYTESEX_BE;
+ befs_sb->byte_order = BEFS_BYTESEX_BE;
befs_sb->magic1 = fs32_to_cpu(sb, disk_sb->magic1);
befs_sb->magic2 = fs32_to_cpu(sb, disk_sb->magic2);
@@ -45,6 +41,8 @@ befs_load_sb(struct super_block *sb, befs_super_block * disk_sb)
befs_sb->ag_shift = fs32_to_cpu(sb, disk_sb->ag_shift);
befs_sb->num_ags = fs32_to_cpu(sb, disk_sb->num_ags);
+ befs_sb->flags = fs32_to_cpu(sb, disk_sb->flags);
+
befs_sb->log_blocks = fsrun_to_cpu(sb, disk_sb->log_blocks);
befs_sb->log_start = fs64_to_cpu(sb, disk_sb->log_start);
befs_sb->log_end = fs64_to_cpu(sb, disk_sb->log_end);
@@ -84,15 +82,15 @@ befs_check_sb(struct super_block *sb)
}
if (befs_sb->block_size > PAGE_SIZE) {
- befs_error(sb, "blocksize(%u) cannot be larger"
+ befs_error(sb, "blocksize(%u) cannot be larger "
"than system pagesize(%lu)", befs_sb->block_size,
PAGE_SIZE);
return BEFS_ERR;
}
/*
- * block_shift and block_size encode the same information
- * in different ways as a consistency check.
+ * block_shift and block_size encode the same information
+ * in different ways as a consistency check.
*/
if ((1 << befs_sb->block_shift) != befs_sb->block_size) {
@@ -101,10 +99,18 @@ befs_check_sb(struct super_block *sb)
return BEFS_ERR;
}
- if (befs_sb->log_start != befs_sb->log_end) {
+
+ /* ag_shift also encodes the same information as blocks_per_ag in a
+ * different way, non-fatal consistency check
+ */
+ if ((1 << befs_sb->ag_shift) != befs_sb->blocks_per_ag)
+ befs_error(sb, "ag_shift disagrees with blocks_per_ag.");
+
+ if (befs_sb->log_start != befs_sb->log_end ||
+ befs_sb->flags == BEFS_DIRTY) {
befs_error(sb, "Filesystem not clean! There are blocks in the "
- "journal. You must boot into BeOS and mount this volume "
- "to make it clean.");
+ "journal. You must boot into BeOS and mount this "
+ "volume to make it clean.");
return BEFS_ERR;
}
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index ccc70d96958d..d4d8b7e36b2f 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -698,7 +698,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
if (ret) {
- bio->bi_error = ret;
+ comp_bio->bi_error = ret;
bio_endio(comp_bio);
}
@@ -728,7 +728,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
if (ret) {
- bio->bi_error = ret;
+ comp_bio->bi_error = ret;
bio_endio(comp_bio);
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 6c21bad26a27..0b8ce2b9f7d0 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -252,7 +252,8 @@ struct btrfs_super_block {
#define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SUPP \
- (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE)
+ (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \
+ BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID)
#define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e720d3e6ec20..3a57f99d96aa 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2586,6 +2586,7 @@ int open_ctree(struct super_block *sb,
int num_backups_tried = 0;
int backup_index = 0;
int max_active;
+ int clear_free_space_tree = 0;
tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
@@ -3148,6 +3149,26 @@ retry_root_backup:
if (sb->s_flags & MS_RDONLY)
return 0;
+ if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
+ btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
+ clear_free_space_tree = 1;
+ } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+ !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
+ btrfs_warn(fs_info, "free space tree is invalid");
+ clear_free_space_tree = 1;
+ }
+
+ if (clear_free_space_tree) {
+ btrfs_info(fs_info, "clearing free space tree");
+ ret = btrfs_clear_free_space_tree(fs_info);
+ if (ret) {
+ btrfs_warn(fs_info,
+ "failed to clear free space tree: %d", ret);
+ close_ctree(tree_root);
+ return ret;
+ }
+ }
+
if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) &&
!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
btrfs_info(fs_info, "creating free space tree");
@@ -3185,18 +3206,6 @@ retry_root_backup:
btrfs_qgroup_rescan_resume(fs_info);
- if (btrfs_test_opt(tree_root->fs_info, CLEAR_CACHE) &&
- btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
- btrfs_info(fs_info, "clearing free space tree");
- ret = btrfs_clear_free_space_tree(fs_info);
- if (ret) {
- btrfs_warn(fs_info,
- "failed to clear free space tree: %d", ret);
- close_ctree(tree_root);
- return ret;
- }
- }
-
if (!fs_info->uuid_root) {
btrfs_info(fs_info, "creating UUID tree");
ret = btrfs_create_uuid_tree(fs_info);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index ee40384c394d..66a755150056 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -5558,17 +5558,45 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
}
}
-/*
- * The extent buffer bitmap operations are done with byte granularity because
- * bitmap items are not guaranteed to be aligned to a word and therefore a
- * single word in a bitmap may straddle two pages in the extent buffer.
- */
-#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
-#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
-#define BITMAP_FIRST_BYTE_MASK(start) \
- ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
-#define BITMAP_LAST_BYTE_MASK(nbits) \
- (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
+void le_bitmap_set(u8 *map, unsigned int start, int len)
+{
+ u8 *p = map + BIT_BYTE(start);
+ const unsigned int size = start + len;
+ int bits_to_set = BITS_PER_BYTE - (start % BITS_PER_BYTE);
+ u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(start);
+
+ while (len - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ len -= bits_to_set;
+ bits_to_set = BITS_PER_BYTE;
+ mask_to_set = ~(u8)0;
+ p++;
+ }
+ if (len) {
+ mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+
+void le_bitmap_clear(u8 *map, unsigned int start, int len)
+{
+ u8 *p = map + BIT_BYTE(start);
+ const unsigned int size = start + len;
+ int bits_to_clear = BITS_PER_BYTE - (start % BITS_PER_BYTE);
+ u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(start);
+
+ while (len - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ len -= bits_to_clear;
+ bits_to_clear = BITS_PER_BYTE;
+ mask_to_clear = ~(u8)0;
+ p++;
+ }
+ if (len) {
+ mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
/*
* eb_bitmap_offset() - calculate the page and offset of the byte containing the
@@ -5612,7 +5640,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
unsigned long nr)
{
- char *kaddr;
+ u8 *kaddr;
struct page *page;
unsigned long i;
size_t offset;
@@ -5634,13 +5662,13 @@ int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
unsigned long pos, unsigned long len)
{
- char *kaddr;
+ u8 *kaddr;
struct page *page;
unsigned long i;
size_t offset;
const unsigned int size = pos + len;
int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
- unsigned int mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
+ u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
eb_bitmap_offset(eb, start, pos, &i, &offset);
page = eb->pages[i];
@@ -5651,7 +5679,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
kaddr[offset] |= mask_to_set;
len -= bits_to_set;
bits_to_set = BITS_PER_BYTE;
- mask_to_set = ~0U;
+ mask_to_set = ~(u8)0;
if (++offset >= PAGE_SIZE && len > 0) {
offset = 0;
page = eb->pages[++i];
@@ -5676,13 +5704,13 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
unsigned long pos, unsigned long len)
{
- char *kaddr;
+ u8 *kaddr;
struct page *page;
unsigned long i;
size_t offset;
const unsigned int size = pos + len;
int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
- unsigned int mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
+ u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
eb_bitmap_offset(eb, start, pos, &i, &offset);
page = eb->pages[i];
@@ -5693,7 +5721,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
kaddr[offset] &= ~mask_to_clear;
len -= bits_to_clear;
bits_to_clear = BITS_PER_BYTE;
- mask_to_clear = ~0U;
+ mask_to_clear = ~(u8)0;
if (++offset >= PAGE_SIZE && len > 0) {
offset = 0;
page = eb->pages[++i];
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 4a094f1dc7ef..ab31d145227e 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -59,6 +59,28 @@
*/
#define EXTENT_PAGE_PRIVATE 1
+/*
+ * The extent buffer bitmap operations are done with byte granularity instead of
+ * word granularity for two reasons:
+ * 1. The bitmaps must be little-endian on disk.
+ * 2. Bitmap items are not guaranteed to be aligned to a word and therefore a
+ * single word in a bitmap may straddle two pages in the extent buffer.
+ */
+#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
+#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
+#define BITMAP_FIRST_BYTE_MASK(start) \
+ ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
+#define BITMAP_LAST_BYTE_MASK(nbits) \
+ (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
+
+static inline int le_test_bit(int nr, const u8 *addr)
+{
+ return 1U & (addr[BIT_BYTE(nr)] >> (nr & (BITS_PER_BYTE-1)));
+}
+
+extern void le_bitmap_set(u8 *map, unsigned int start, int len);
+extern void le_bitmap_clear(u8 *map, unsigned int start, int len);
+
struct extent_state;
struct btrfs_root;
struct btrfs_io_bio;
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index e4a42a8e4f84..57401b474ec6 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -151,7 +151,7 @@ static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize)
return DIV_ROUND_UP((u32)div_u64(size, sectorsize), BITS_PER_BYTE);
}
-static unsigned long *alloc_bitmap(u32 bitmap_size)
+static u8 *alloc_bitmap(u32 bitmap_size)
{
void *mem;
@@ -180,8 +180,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
struct btrfs_free_space_info *info;
struct btrfs_key key, found_key;
struct extent_buffer *leaf;
- unsigned long *bitmap;
- char *bitmap_cursor;
+ u8 *bitmap, *bitmap_cursor;
u64 start, end;
u64 bitmap_range, i;
u32 bitmap_size, flags, expected_extent_count;
@@ -231,7 +230,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
block_group->sectorsize);
last = div_u64(found_key.objectid + found_key.offset - start,
block_group->sectorsize);
- bitmap_set(bitmap, first, last - first);
+ le_bitmap_set(bitmap, first, last - first);
extent_count++;
nr++;
@@ -270,7 +269,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
goto out;
}
- bitmap_cursor = (char *)bitmap;
+ bitmap_cursor = bitmap;
bitmap_range = block_group->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
i = start;
while (i < end) {
@@ -319,7 +318,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
struct btrfs_free_space_info *info;
struct btrfs_key key, found_key;
struct extent_buffer *leaf;
- unsigned long *bitmap;
+ u8 *bitmap;
u64 start, end;
/* Initialize to silence GCC. */
u64 extent_start = 0;
@@ -363,7 +362,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
break;
} else if (found_key.type == BTRFS_FREE_SPACE_BITMAP_KEY) {
unsigned long ptr;
- char *bitmap_cursor;
+ u8 *bitmap_cursor;
u32 bitmap_pos, data_size;
ASSERT(found_key.objectid >= start);
@@ -373,7 +372,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
bitmap_pos = div_u64(found_key.objectid - start,
block_group->sectorsize *
BITS_PER_BYTE);
- bitmap_cursor = ((char *)bitmap) + bitmap_pos;
+ bitmap_cursor = bitmap + bitmap_pos;
data_size = free_space_bitmap_size(found_key.offset,
block_group->sectorsize);
@@ -410,7 +409,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
offset = start;
bitnr = 0;
while (offset < end) {
- bit = !!test_bit(bitnr, bitmap);
+ bit = !!le_test_bit(bitnr, bitmap);
if (prev_bit == 0 && bit == 1) {
extent_start = offset;
} else if (prev_bit == 1 && bit == 0) {
@@ -1185,6 +1184,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
}
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
+ btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
ret = btrfs_commit_transaction(trans, tree_root);
@@ -1253,6 +1253,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
return PTR_ERR(trans);
btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE);
+ btrfs_clear_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
fs_info->free_space_root = NULL;
ret = clear_free_space_tree(trans, free_space_root);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 01bc36cec26e..71261b459863 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5805,6 +5805,64 @@ static int changed_extent(struct send_ctx *sctx,
int ret = 0;
if (sctx->cur_ino != sctx->cmp_key->objectid) {
+
+ if (result == BTRFS_COMPARE_TREE_CHANGED) {
+ struct extent_buffer *leaf_l;
+ struct extent_buffer *leaf_r;
+ struct btrfs_file_extent_item *ei_l;
+ struct btrfs_file_extent_item *ei_r;
+
+ leaf_l = sctx->left_path->nodes[0];
+ leaf_r = sctx->right_path->nodes[0];
+ ei_l = btrfs_item_ptr(leaf_l,
+ sctx->left_path->slots[0],
+ struct btrfs_file_extent_item);
+ ei_r = btrfs_item_ptr(leaf_r,
+ sctx->right_path->slots[0],
+ struct btrfs_file_extent_item);
+
+ /*
+ * We may have found an extent item that has changed
+ * only its disk_bytenr field and the corresponding
+ * inode item was not updated. This case happens due to
+ * very specific timings during relocation when a leaf
+ * that contains file extent items is COWed while
+ * relocation is ongoing and its in the stage where it
+ * updates data pointers. So when this happens we can
+ * safely ignore it since we know it's the same extent,
+ * but just at different logical and physical locations
+ * (when an extent is fully replaced with a new one, we
+ * know the generation number must have changed too,
+ * since snapshot creation implies committing the current
+ * transaction, and the inode item must have been updated
+ * as well).
+ * This replacement of the disk_bytenr happens at
+ * relocation.c:replace_file_extents() through
+ * relocation.c:btrfs_reloc_cow_block().
+ */
+ if (btrfs_file_extent_generation(leaf_l, ei_l) ==
+ btrfs_file_extent_generation(leaf_r, ei_r) &&
+ btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
+ btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
+ btrfs_file_extent_compression(leaf_l, ei_l) ==
+ btrfs_file_extent_compression(leaf_r, ei_r) &&
+ btrfs_file_extent_encryption(leaf_l, ei_l) ==
+ btrfs_file_extent_encryption(leaf_r, ei_r) &&
+ btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
+ btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
+ btrfs_file_extent_type(leaf_l, ei_l) ==
+ btrfs_file_extent_type(leaf_r, ei_r) &&
+ btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
+ btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
+ btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
+ btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
+ btrfs_file_extent_offset(leaf_l, ei_l) ==
+ btrfs_file_extent_offset(leaf_r, ei_r) &&
+ btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
+ btrfs_file_extent_num_bytes(leaf_r, ei_r))
+ return 0;
+ }
+
inconsistent_snapshot_error(sctx, result, "extent");
return -EIO;
}
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index d19ab0317283..caad80bb9bd0 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -273,20 +273,37 @@ out:
return ret;
}
-/**
- * test_bit_in_byte - Determine whether a bit is set in a byte
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static inline int test_bit_in_byte(int nr, const u8 *addr)
+static int check_eb_bitmap(unsigned long *bitmap, struct extent_buffer *eb,
+ unsigned long len)
{
- return 1UL & (addr[nr / BITS_PER_BYTE] >> (nr & (BITS_PER_BYTE - 1)));
+ unsigned long i;
+
+ for (i = 0; i < len * BITS_PER_BYTE; i++) {
+ int bit, bit1;
+
+ bit = !!test_bit(i, bitmap);
+ bit1 = !!extent_buffer_test_bit(eb, 0, i);
+ if (bit1 != bit) {
+ test_msg("Bits do not match\n");
+ return -EINVAL;
+ }
+
+ bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
+ i % BITS_PER_BYTE);
+ if (bit1 != bit) {
+ test_msg("Offset bits do not match\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
}
static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
unsigned long len)
{
- unsigned long i, x;
+ unsigned long i, j;
+ u32 x;
+ int ret;
memset(bitmap, 0, len);
memset_extent_buffer(eb, 0, 0, len);
@@ -297,16 +314,18 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
- if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+ ret = check_eb_bitmap(bitmap, eb, len);
+ if (ret) {
test_msg("Setting all bits failed\n");
- return -EINVAL;
+ return ret;
}
bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
- if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+ ret = check_eb_bitmap(bitmap, eb, len);
+ if (ret) {
test_msg("Clearing all bits failed\n");
- return -EINVAL;
+ return ret;
}
/* Straddling pages test */
@@ -316,9 +335,10 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
sizeof(long) * BITS_PER_BYTE);
extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
sizeof(long) * BITS_PER_BYTE);
- if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+ ret = check_eb_bitmap(bitmap, eb, len);
+ if (ret) {
test_msg("Setting straddling pages failed\n");
- return -EINVAL;
+ return ret;
}
bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
@@ -328,9 +348,10 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
sizeof(long) * BITS_PER_BYTE);
- if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
+ ret = check_eb_bitmap(bitmap, eb, len);
+ if (ret) {
test_msg("Clearing straddling pages failed\n");
- return -EINVAL;
+ return ret;
}
}
@@ -339,28 +360,22 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
* something repetitive that could miss some hypothetical off-by-n bug.
*/
x = 0;
- for (i = 0; i < len / sizeof(long); i++) {
- x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffUL;
- bitmap[i] = x;
- }
- write_extent_buffer(eb, bitmap, 0, len);
-
- for (i = 0; i < len * BITS_PER_BYTE; i++) {
- int bit, bit1;
-
- bit = !!test_bit_in_byte(i, (u8 *)bitmap);
- bit1 = !!extent_buffer_test_bit(eb, 0, i);
- if (bit1 != bit) {
- test_msg("Testing bit pattern failed\n");
- return -EINVAL;
+ bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
+ extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
+ for (i = 0; i < len * BITS_PER_BYTE / 32; i++) {
+ x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffU;
+ for (j = 0; j < 32; j++) {
+ if (x & (1U << j)) {
+ bitmap_set(bitmap, i * 32 + j, 1);
+ extent_buffer_bitmap_set(eb, 0, i * 32 + j, 1);
+ }
}
+ }
- bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
- i % BITS_PER_BYTE);
- if (bit1 != bit) {
- test_msg("Testing bit pattern with offset failed\n");
- return -EINVAL;
- }
+ ret = check_eb_bitmap(bitmap, eb, len);
+ if (ret) {
+ test_msg("Random bit pattern failed\n");
+ return ret;
}
return 0;
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index 7508d3b42780..6e144048a72e 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -24,20 +24,15 @@
#include "../transaction.h"
struct free_space_extent {
- u64 start, length;
+ u64 start;
+ u64 length;
};
-/*
- * The test cases align their operations to this in order to hit some of the
- * edge cases in the bitmap code.
- */
-#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE)
-
static int __check_free_space_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
struct btrfs_path *path,
- struct free_space_extent *extents,
+ const struct free_space_extent * const extents,
unsigned int num_extents)
{
struct btrfs_free_space_info *info;
@@ -126,7 +121,7 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
struct btrfs_path *path,
- struct free_space_extent *extents,
+ const struct free_space_extent * const extents,
unsigned int num_extents)
{
struct btrfs_free_space_info *info;
@@ -168,9 +163,10 @@ static int check_free_space_extents(struct btrfs_trans_handle *trans,
static int test_empty_block_group(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
- struct btrfs_path *path)
+ struct btrfs_path *path,
+ u32 alignment)
{
- struct free_space_extent extents[] = {
+ const struct free_space_extent extents[] = {
{cache->key.objectid, cache->key.offset},
};
@@ -181,9 +177,10 @@ static int test_empty_block_group(struct btrfs_trans_handle *trans,
static int test_remove_all(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
- struct btrfs_path *path)
+ struct btrfs_path *path,
+ u32 alignment)
{
- struct free_space_extent extents[] = {};
+ const struct free_space_extent extents[] = {};
int ret;
ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
@@ -201,16 +198,17 @@ static int test_remove_all(struct btrfs_trans_handle *trans,
static int test_remove_beginning(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
- struct btrfs_path *path)
+ struct btrfs_path *path,
+ u32 alignment)
{
- struct free_space_extent extents[] = {
- {cache->key.objectid + BITMAP_RANGE,
- cache->key.offset - BITMAP_RANGE},
+ const struct free_space_extent extents[] = {
+ {cache->key.objectid + alignment,
+ cache->key.offset - alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
- cache->key.objectid, BITMAP_RANGE);
+ cache->key.objectid, alignment);
if (ret) {
test_msg("Could not remove free space\n");
return ret;
@@ -224,17 +222,18 @@ static int test_remove_beginning(struct btrfs_trans_handle *trans,
static int test_remove_end(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
- struct btrfs_path *path)
+ struct btrfs_path *path,
+ u32 alignment)
{
- struct free_space_extent extents[] = {
- {cache->key.objectid, cache->key.offset - BITMAP_RANGE},
+ const struct free_space_extent extents[] = {
+ {cache->key.objectid, cache->key.offset - alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
cache->key.objectid +
- cache->key.offset - BITMAP_RANGE,
- BITMAP_RANGE);
+ cache->key.offset - alignment,
+ alignment);
if (ret) {
test_msg("Could not remove free space\n");
return ret;
@@ -247,18 +246,19 @@ static int test_remove_end(struct btrfs_trans_handle *trans,
static int test_remove_middle(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
- struct btrfs_path *path)
+ struct btrfs_path *path,
+ u32 alignment)
{
- struct free_space_extent extents[] = {
- {cache->key.objectid, BITMAP_RANGE},
- {cache->key.objectid + 2 * BITMAP_RANGE,
- cache->key.offset - 2 * BITMAP_RANGE},
+ const struct free_space_extent extents[] = {
+ {cache->key.objectid, alignment},
+ {cache->key.objectid + 2 * alignment,
+ cache->key.offset - 2 * alignment},
};
int ret;
ret = __remove_from_free_space_tree(trans, fs_info, cache, path,
- cache->key.objectid + BITMAP_RANGE,
- BITMAP_RANGE);
+ cache->key.objectid + alignment,
+ alignment);
if (ret) {
test_msg("Could not remove free space\n");
return ret;
@@ -271,10 +271,11 @@ static int test_remove_middle(struct btrfs_trans_handle *trans,
static int test_merge_left(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
- struct btrfs_path *path)
+ struct btrfs_path *path,
+ u32 alignment)
{
- struct free_space_extent extents[] = {
- {cache->key.objectid, 2 * BITMAP_RANGE},
+ const struct free_space_extent extents[] = {
+ {cache->key.objectid, 2 * alignment},
};
int ret;
@@ -287,15 +288,15 @@ static int test_merge_left(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
- cache->key.objectid, BITMAP_RANGE);
+ cache->key.objectid, alignment);
if (ret) {
test_msg("Could not add free space\n");
return ret;
}
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
- cache->key.objectid + BITMAP_RANGE,
- BITMAP_RANGE);
+ cache->key.objectid + alignment,
+ alignment);
if (ret) {
test_msg("Could not add free space\n");
return ret;
@@ -308,10 +309,11 @@ static int test_merge_left(struct btrfs_trans_handle *trans,
static int test_merge_right(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
- struct btrfs_path *path)
+ struct btrfs_path *path,
+ u32 alignment)
{
- struct free_space_extent extents[] = {
- {cache->key.objectid + BITMAP_RANGE, 2 * BITMAP_RANGE},
+ const struct free_space_extent extents[] = {
+ {cache->key.objectid + alignment, 2 * alignment},
};
int ret;
@@ -324,16 +326,16 @@ static int test_merge_right(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
- cache->key.objectid + 2 * BITMAP_RANGE,
- BITMAP_RANGE);
+ cache->key.objectid + 2 * alignment,
+ alignment);
if (ret) {
test_msg("Could not add free space\n");
return ret;
}
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
- cache->key.objectid + BITMAP_RANGE,
- BITMAP_RANGE);
+ cache->key.objectid + alignment,
+ alignment);
if (ret) {
test_msg("Could not add free space\n");
return ret;
@@ -346,10 +348,11 @@ static int test_merge_right(struct btrfs_trans_handle *trans,
static int test_merge_both(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
- struct btrfs_path *path)
+ struct btrfs_path *path,
+ u32 alignment)
{
- struct free_space_extent extents[] = {
- {cache->key.objectid, 3 * BITMAP_RANGE},
+ const struct free_space_extent extents[] = {
+ {cache->key.objectid, 3 * alignment},
};
int ret;
@@ -362,23 +365,23 @@ static int test_merge_both(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
- cache->key.objectid, BITMAP_RANGE);
+ cache->key.objectid, alignment);
if (ret) {
test_msg("Could not add free space\n");
return ret;
}
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
- cache->key.objectid + 2 * BITMAP_RANGE,
- BITMAP_RANGE);
+ cache->key.objectid + 2 * alignment,
+ alignment);
if (ret) {
test_msg("Could not add free space\n");
return ret;
}
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
- cache->key.objectid + BITMAP_RANGE,
- BITMAP_RANGE);
+ cache->key.objectid + alignment,
+ alignment);
if (ret) {
test_msg("Could not add free space\n");
return ret;
@@ -391,12 +394,13 @@ static int test_merge_both(struct btrfs_trans_handle *trans,
static int test_merge_none(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *cache,
- struct btrfs_path *path)
+ struct btrfs_path *path,
+ u32 alignment)
{
- struct free_space_extent extents[] = {
- {cache->key.objectid, BITMAP_RANGE},
- {cache->key.objectid + 2 * BITMAP_RANGE, BITMAP_RANGE},
- {cache->key.objectid + 4 * BITMAP_RANGE, BITMAP_RANGE},
+ const struct free_space_extent extents[] = {
+ {cache->key.objectid, alignment},
+ {cache->key.objectid + 2 * alignment, alignment},
+ {cache->key.objectid + 4 * alignment, alignment},
};
int ret;
@@ -409,23 +413,23 @@ static int test_merge_none(struct btrfs_trans_handle *trans,
}
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
- cache->key.objectid, BITMAP_RANGE);
+ cache->key.objectid, alignment);
if (ret) {
test_msg("Could not add free space\n");
return ret;
}
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
- cache->key.objectid + 4 * BITMAP_RANGE,
- BITMAP_RANGE);
+ cache->key.objectid + 4 * alignment,
+ alignment);
if (ret) {
test_msg("Could not add free space\n");
return ret;
}
ret = __add_to_free_space_tree(trans, fs_info, cache, path,
- cache->key.objectid + 2 * BITMAP_RANGE,
- BITMAP_RANGE);
+ cache->key.objectid + 2 * alignment,
+ alignment);
if (ret) {
test_msg("Could not add free space\n");
return ret;
@@ -438,10 +442,11 @@ static int test_merge_none(struct btrfs_trans_handle *trans,
typedef int (*test_func_t)(struct btrfs_trans_handle *,
struct btrfs_fs_info *,
struct btrfs_block_group_cache *,
- struct btrfs_path *);
+ struct btrfs_path *,
+ u32 alignment);
-static int run_test(test_func_t test_func, int bitmaps,
- u32 sectorsize, u32 nodesize)
+static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
+ u32 nodesize, u32 alignment)
{
struct btrfs_fs_info *fs_info;
struct btrfs_root *root = NULL;
@@ -480,7 +485,7 @@ static int run_test(test_func_t test_func, int bitmaps,
btrfs_set_header_nritems(root->node, 0);
root->alloc_bytenr += 2 * nodesize;
- cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE, sectorsize);
+ cache = btrfs_alloc_dummy_block_group(8 * alignment, sectorsize);
if (!cache) {
test_msg("Couldn't allocate dummy block group cache\n");
ret = -ENOMEM;
@@ -514,7 +519,7 @@ static int run_test(test_func_t test_func, int bitmaps,
}
}
- ret = test_func(&trans, root->fs_info, cache, path);
+ ret = test_func(&trans, root->fs_info, cache, path, alignment);
if (ret)
goto out;
@@ -539,15 +544,27 @@ out:
return ret;
}
-static int run_test_both_formats(test_func_t test_func,
- u32 sectorsize, u32 nodesize)
+static int run_test_both_formats(test_func_t test_func, u32 sectorsize,
+ u32 nodesize, u32 alignment)
{
+ int test_ret = 0;
int ret;
- ret = run_test(test_func, 0, sectorsize, nodesize);
- if (ret)
- return ret;
- return run_test(test_func, 1, sectorsize, nodesize);
+ ret = run_test(test_func, 0, sectorsize, nodesize, alignment);
+ if (ret) {
+ test_msg("%pf failed with extents, sectorsize=%u, nodesize=%u, alignment=%u\n",
+ test_func, sectorsize, nodesize, alignment);
+ test_ret = ret;
+ }
+
+ ret = run_test(test_func, 1, sectorsize, nodesize, alignment);
+ if (ret) {
+ test_msg("%pf failed with bitmaps, sectorsize=%u, nodesize=%u, alignment=%u\n",
+ test_func, sectorsize, nodesize, alignment);
+ test_ret = ret;
+ }
+
+ return test_ret;
}
int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
@@ -563,18 +580,30 @@ int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
test_merge_both,
test_merge_none,
};
+ u32 bitmap_alignment;
+ int test_ret = 0;
int i;
+ /*
+ * Align some operations to a page to flush out bugs in the extent
+ * buffer bitmap handling of highmem.
+ */
+ bitmap_alignment = BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE;
+
test_msg("Running free space tree tests\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
- int ret = run_test_both_formats(tests[i], sectorsize,
- nodesize);
- if (ret) {
- test_msg("%pf : sectorsize %u failed\n",
- tests[i], sectorsize);
- return ret;
- }
+ int ret;
+
+ ret = run_test_both_formats(tests[i], sectorsize, nodesize,
+ sectorsize);
+ if (ret)
+ test_ret = ret;
+
+ ret = run_test_both_formats(tests[i], sectorsize, nodesize,
+ bitmap_alignment);
+ if (ret)
+ test_ret = ret;
}
- return 0;
+ return test_ret;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 528cae123dc9..3d33c4e41e5f 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2713,14 +2713,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
int index, int error)
{
struct btrfs_log_ctx *ctx;
+ struct btrfs_log_ctx *safe;
- if (!error) {
- INIT_LIST_HEAD(&root->log_ctxs[index]);
- return;
- }
-
- list_for_each_entry(ctx, &root->log_ctxs[index], list)
+ list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
+ list_del_init(&ctx->list);
ctx->log_ret = error;
+ }
INIT_LIST_HEAD(&root->log_ctxs[index]);
}
@@ -2961,13 +2959,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&root->log_mutex);
out_wake_log_root:
- /*
- * We needn't get log_mutex here because we are sure all
- * the other tasks are blocked.
- */
+ mutex_lock(&log_root_tree->log_mutex);
btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
- mutex_lock(&log_root_tree->log_mutex);
log_root_tree->log_transid_committed++;
atomic_set(&log_root_tree->log_commit[index2], 0);
mutex_unlock(&log_root_tree->log_mutex);
@@ -2978,10 +2972,8 @@ out_wake_log_root:
if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
wake_up(&log_root_tree->log_commit_wait[index2]);
out:
- /* See above. */
- btrfs_remove_all_log_ctxs(root, index1, ret);
-
mutex_lock(&root->log_mutex);
+ btrfs_remove_all_log_ctxs(root, index1, ret);
root->log_transid_committed++;
atomic_set(&root->log_commit[index1], 0);
mutex_unlock(&root->log_mutex);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 7bf08825cc11..18630e800208 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1272,7 +1272,8 @@ again:
statret = __ceph_do_getattr(inode, page,
CEPH_STAT_CAP_INLINE_DATA, !!page);
if (statret < 0) {
- __free_page(page);
+ if (page)
+ __free_page(page);
if (statret == -ENODATA) {
BUG_ON(retry_op != READ_INLINE);
goto again;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index bca1b49c1c4b..ef4d04647325 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1511,7 +1511,8 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
}
- if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
+ if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 &&
+ !(rinfo->hash_order && req->r_path2)) {
/* note dir version at start of readdir so we can tell
* if any dentries get dropped */
req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index a29ffce98187..b382e5910eea 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -845,6 +845,8 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
err = ceph_fs_debugfs_init(fsc);
if (err < 0)
goto fail;
+ } else {
+ root = dget(fsc->sb->s_root);
}
fsc->mount_state = CEPH_MOUNT_MOUNTED;
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 40b703217977..febc28f9e2c2 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -16,7 +16,7 @@
static int __remove_xattr(struct ceph_inode_info *ci,
struct ceph_inode_xattr *xattr);
-const struct xattr_handler ceph_other_xattr_handler;
+static const struct xattr_handler ceph_other_xattr_handler;
/*
* List of handlers for synthetic system.* attributes. Other
@@ -1086,7 +1086,7 @@ static int ceph_set_xattr_handler(const struct xattr_handler *handler,
return __ceph_setxattr(inode, name, value, size, flags);
}
-const struct xattr_handler ceph_other_xattr_handler = {
+static const struct xattr_handler ceph_other_xattr_handler = {
.prefix = "", /* match any name => handlers called with full name */
.get = ceph_get_xattr_handler,
.set = ceph_set_xattr_handler,
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 6c58e13fed2f..3d03e48a9213 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -152,6 +152,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
tcp_ses_list);
+ seq_printf(m, "\nNumber of credits: %d", server->credits);
i++;
list_for_each(tmp2, &server->smb_ses_list) {
ses = list_entry(tmp2, struct cifs_ses,
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 1418daa03d95..07ed81cf1552 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -49,6 +49,7 @@
#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible
* root mountable
*/
+#define CIFS_MOUNT_UID_FROM_ACL 0x2000000 /* try to get UID via special SID */
struct cifs_sb_info {
struct rb_root tlink_tree;
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index 0065256881d8..57ff0756e30c 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -36,7 +36,15 @@ struct smb_mnt_fs_info {
__u64 cifs_posix_caps;
} __packed;
+struct smb_snapshot_array {
+ __u32 number_of_snapshots;
+ __u32 number_of_snapshots_returned;
+ __u32 snapshot_array_size;
+ /* snapshots[]; */
+} __packed;
+
#define CIFS_IOCTL_MAGIC 0xCF
#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int)
#define CIFS_IOC_SET_INTEGRITY _IO(CIFS_IOCTL_MAGIC, 4)
#define CIFS_IOC_GET_MNT_INFO _IOR(CIFS_IOCTL_MAGIC, 5, struct smb_mnt_fs_info)
+#define CIFS_ENUMERATE_SNAPSHOTS _IOR(CIFS_IOCTL_MAGIC, 6, struct smb_snapshot_array)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 71e8a56e9479..15bac390dff9 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -42,6 +42,35 @@ static const struct cifs_sid sid_authusers = {
/* group users */
static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
+/* S-1-22-1 Unmapped Unix users */
+static const struct cifs_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
+ {cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/* S-1-22-2 Unmapped Unix groups */
+static const struct cifs_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
+ {cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/*
+ * See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
+ */
+
+/* S-1-5-88 MS NFS and Apple style UID/GID/mode */
+
+/* S-1-5-88-1 Unix uid */
+static const struct cifs_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5},
+ {cpu_to_le32(88),
+ cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/* S-1-5-88-2 Unix gid */
+static const struct cifs_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5},
+ {cpu_to_le32(88),
+ cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+/* S-1-5-88-3 Unix mode */
+static const struct cifs_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5},
+ {cpu_to_le32(88),
+ cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
static const struct cred *root_cred;
static int
@@ -183,6 +212,62 @@ compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
return 0; /* sids compare/match */
}
+static bool
+is_well_known_sid(const struct cifs_sid *psid, uint32_t *puid, bool is_group)
+{
+ int i;
+ int num_subauth;
+ const struct cifs_sid *pwell_known_sid;
+
+ if (!psid || (puid == NULL))
+ return false;
+
+ num_subauth = psid->num_subauth;
+
+ /* check if Mac (or Windows NFS) vs. Samba format for Unix owner SID */
+ if (num_subauth == 2) {
+ if (is_group)
+ pwell_known_sid = &sid_unix_groups;
+ else
+ pwell_known_sid = &sid_unix_users;
+ } else if (num_subauth == 3) {
+ if (is_group)
+ pwell_known_sid = &sid_unix_NFS_groups;
+ else
+ pwell_known_sid = &sid_unix_NFS_users;
+ } else
+ return false;
+
+ /* compare the revision */
+ if (psid->revision != pwell_known_sid->revision)
+ return false;
+
+ /* compare all of the six auth values */
+ for (i = 0; i < NUM_AUTHS; ++i) {
+ if (psid->authority[i] != pwell_known_sid->authority[i]) {
+ cifs_dbg(FYI, "auth %d did not match\n", i);
+ return false;
+ }
+ }
+
+ if (num_subauth == 2) {
+ if (psid->sub_auth[0] != pwell_known_sid->sub_auth[0])
+ return false;
+
+ *puid = le32_to_cpu(psid->sub_auth[1]);
+ } else /* 3 subauths, ie Windows/Mac style */ {
+ *puid = le32_to_cpu(psid->sub_auth[0]);
+ if ((psid->sub_auth[0] != pwell_known_sid->sub_auth[0]) ||
+ (psid->sub_auth[1] != pwell_known_sid->sub_auth[1]))
+ return false;
+
+ *puid = le32_to_cpu(psid->sub_auth[2]);
+ }
+
+ cifs_dbg(FYI, "Unix UID %d returned from SID\n", *puid);
+ return true; /* well known sid found, uid returned */
+}
+
static void
cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
{
@@ -276,6 +361,43 @@ sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
return -EIO;
}
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) {
+ uint32_t unix_id;
+ bool is_group;
+
+ if (sidtype != SIDOWNER)
+ is_group = true;
+ else
+ is_group = false;
+
+ if (is_well_known_sid(psid, &unix_id, is_group) == false)
+ goto try_upcall_to_get_id;
+
+ if (is_group) {
+ kgid_t gid;
+ gid_t id;
+
+ id = (gid_t)unix_id;
+ gid = make_kgid(&init_user_ns, id);
+ if (gid_valid(gid)) {
+ fgid = gid;
+ goto got_valid_id;
+ }
+ } else {
+ kuid_t uid;
+ uid_t id;
+
+ id = (uid_t)unix_id;
+ uid = make_kuid(&init_user_ns, id);
+ if (uid_valid(uid)) {
+ fuid = uid;
+ goto got_valid_id;
+ }
+ }
+ /* If unable to find uid/gid easily from SID try via upcall */
+ }
+
+try_upcall_to_get_id:
sidstr = sid_to_key_str(psid, sidtype);
if (!sidstr)
return -ENOMEM;
@@ -329,6 +451,7 @@ out_revert_creds:
* Note that we return 0 here unconditionally. If the mapping
* fails then we just fall back to using the mnt_uid/mnt_gid.
*/
+got_valid_id:
if (sidtype == SIDOWNER)
fattr->cf_uid = fuid;
else
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index cca04e710421..15261ba464c5 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -64,15 +64,15 @@ unsigned int global_secflags = CIFSSEC_DEF;
unsigned int sign_CIFS_PDUs = 1;
static const struct super_operations cifs_super_ops;
unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
-module_param(CIFSMaxBufSize, uint, 0);
+module_param(CIFSMaxBufSize, uint, 0444);
MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
"Default: 16384 Range: 8192 to 130048");
unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
-module_param(cifs_min_rcv, uint, 0);
+module_param(cifs_min_rcv, uint, 0444);
MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
"1 to 64");
unsigned int cifs_min_small = 30;
-module_param(cifs_min_small, uint, 0);
+module_param(cifs_min_small, uint, 0444);
MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
"Range: 2 to 256");
unsigned int cifs_max_pending = CIFS_MAX_REQ;
@@ -271,7 +271,7 @@ cifs_alloc_inode(struct super_block *sb)
cifs_inode->createtime = 0;
cifs_inode->epoch = 0;
#ifdef CONFIG_CIFS_SMB2
- get_random_bytes(cifs_inode->lease_key, SMB2_LEASE_KEY_SIZE);
+ generate_random_uuid(cifs_inode->lease_key);
#endif
/*
* Can not set i_flags here - they get immediately overwritten to zero
@@ -469,6 +469,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",posixpaths");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
seq_puts(s, ",setuids");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
+ seq_puts(s, ",idsfromsid");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
seq_puts(s, ",serverino");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
@@ -1262,7 +1264,6 @@ init_cifs(void)
GlobalTotalActiveXid = 0;
GlobalMaxActiveXid = 0;
spin_lock_init(&cifs_tcp_ses_lock);
- spin_lock_init(&cifs_file_list_lock);
spin_lock_init(&GlobalMid_Lock);
get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret));
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 8f1d8c1e72be..1f17f6bd7a60 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -75,6 +75,18 @@
#define SMB_ECHO_INTERVAL_MAX 600
#define SMB_ECHO_INTERVAL_DEFAULT 60
+/*
+ * Default number of credits to keep available for SMB3.
+ * This value is chosen somewhat arbitrarily. The Windows client
+ * defaults to 128 credits, the Windows server allows clients up to
+ * 512 credits (or 8K for later versions), and the NetApp server
+ * does not limit clients at all. Choose a high enough default value
+ * such that the client shouldn't limit performance, but allow mount
+ * to override (until you approach 64K, where we limit credits to 65000
+ * to reduce possibility of seeing more server credit overflow bugs.
+ */
+#define SMB2_MAX_CREDITS_AVAILABLE 32000
+
#include "cifspdu.h"
#ifndef XATTR_DOS_ATTRIB
@@ -376,6 +388,8 @@ struct smb_version_operations {
int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *);
int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon,
struct cifsFileInfo *src_file);
+ int (*enum_snapshots)(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *src_file, void __user *);
int (*query_mf_symlink)(unsigned int, struct cifs_tcon *,
struct cifs_sb_info *, const unsigned char *,
char *, unsigned int *);
@@ -464,6 +478,7 @@ struct smb_vol {
bool retry:1;
bool intr:1;
bool setuids:1;
+ bool setuidfromacl:1;
bool override_uid:1;
bool override_gid:1;
bool dynperm:1;
@@ -510,6 +525,7 @@ struct smb_vol {
struct sockaddr_storage srcaddr; /* allow binding to a local IP */
struct nls_table *local_nls;
unsigned int echo_interval; /* echo interval in secs */
+ unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
};
#define CIFS_MOUNT_MASK (CIFS_MOUNT_NO_PERM | CIFS_MOUNT_SET_UID | \
@@ -567,7 +583,8 @@ struct TCP_Server_Info {
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
bool tcp_nodelay;
- int credits; /* send no more requests at once */
+ unsigned int credits; /* send no more requests at once */
+ unsigned int max_credits; /* can override large 32000 default at mnt */
unsigned int in_flight; /* number of requests on the wire to server */
spinlock_t req_lock; /* protect the two values above */
struct mutex srv_mutex;
@@ -833,6 +850,7 @@ struct cifs_tcon {
struct list_head tcon_list;
int tc_count;
struct list_head openFileList;
+ spinlock_t open_file_lock; /* protects list above */
struct cifs_ses *ses; /* pointer to session associated with */
char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
char *nativeFileSystem;
@@ -889,7 +907,7 @@ struct cifs_tcon {
#endif /* CONFIG_CIFS_STATS2 */
__u64 bytes_read;
__u64 bytes_written;
- spinlock_t stat_lock;
+ spinlock_t stat_lock; /* protects the two fields above */
#endif /* CONFIG_CIFS_STATS */
FILE_SYSTEM_DEVICE_INFO fsDevInfo;
FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
@@ -1040,20 +1058,24 @@ struct cifs_fid_locks {
};
struct cifsFileInfo {
+ /* following two lists are protected by tcon->open_file_lock */
struct list_head tlist; /* pointer to next fid owned by tcon */
struct list_head flist; /* next fid (file instance) for this inode */
+ /* lock list below protected by cifsi->lock_sem */
struct cifs_fid_locks *llist; /* brlocks held by this fid */
kuid_t uid; /* allows finding which FileInfo structure */
__u32 pid; /* process id who opened file */
struct cifs_fid fid; /* file id from remote */
+ struct list_head rlist; /* reconnect list */
/* BB add lock scope info here if needed */ ;
/* lock scope id (0 if none) */
struct dentry *dentry;
- unsigned int f_flags;
struct tcon_link *tlink;
+ unsigned int f_flags;
bool invalidHandle:1; /* file closed via session abend */
bool oplock_break_cancelled:1;
- int count; /* refcount protected by cifs_file_list_lock */
+ int count;
+ spinlock_t file_info_lock; /* protects four flag/count fields above */
struct mutex fh_mutex; /* prevents reopen race after dead ses*/
struct cifs_search_info srch_inf;
struct work_struct oplock_break; /* work for oplock breaks */
@@ -1120,7 +1142,7 @@ struct cifs_writedata {
/*
* Take a reference on the file private data. Must be called with
- * cifs_file_list_lock held.
+ * cfile->file_info_lock held.
*/
static inline void
cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
@@ -1514,8 +1536,10 @@ require use of the stronger protocol */
* GlobalMid_Lock protects:
* list operations on pending_mid_q and oplockQ
* updates to XID counters, multiplex id and SMB sequence numbers
- * cifs_file_list_lock protects:
- * list operations on tcp and SMB session lists and tCon lists
+ * tcp_ses_lock protects:
+ * list operations on tcp and SMB session lists
+ * tcon->open_file_lock protects the list of open files hanging off the tcon
+ * cfile->file_info_lock protects counters and fields in cifs file struct
* f_owner.lock protects certain per file struct operations
* mapping->page_lock protects certain per page operations
*
@@ -1547,18 +1571,12 @@ GLOBAL_EXTERN struct list_head cifs_tcp_ses_list;
* tcp session, and the list of tcon's per smb session. It also protects
* the reference counters for the server, smb session, and tcon. Finally,
* changes to the tcon->tidStatus should be done while holding this lock.
+ * generally the locks should be taken in order tcp_ses_lock before
+ * tcon->open_file_lock and that before file->file_info_lock since the
+ * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
*/
GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock;
-/*
- * This lock protects the cifs_file->llist and cifs_file->flist
- * list operations, and updates to some flags (cifs_file->invalidHandle)
- * It will be moved to either use the tcon->stat_lock or equivalent later.
- * If cifs_tcp_ses_lock and the lock below are both needed to be held, then
- * the cifs_tcp_ses_lock must be grabbed first and released last.
- */
-GLOBAL_EXTERN spinlock_t cifs_file_list_lock;
-
#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
/* Outstanding dir notify requests */
GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 4ead72a001f9..ced0e42ce460 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -193,6 +193,8 @@ extern struct smb_vol *cifs_get_volume_info(char *mount_data,
extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *);
extern void cifs_umount(struct cifs_sb_info *);
extern void cifs_mark_open_files_invalid(struct cifs_tcon *tcon);
+extern void cifs_reopen_persistent_handles(struct cifs_tcon *tcon);
+
extern bool cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
__u64 length, __u8 type,
struct cifsLockInfo **conf_lock,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index f82d2823622f..3f3185febc58 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -98,13 +98,13 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
struct list_head *tmp1;
/* list all files open on tree connection and mark them invalid */
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
open_file = list_entry(tmp, struct cifsFileInfo, tlist);
open_file->invalidHandle = true;
open_file->oplock_break_cancelled = true;
}
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
/*
* BB Add call to invalidate_inodes(sb) for all superblocks mounted
* to this tcon.
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 2e4f4bad8b1e..aab5227979e2 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -63,7 +63,6 @@ extern mempool_t *cifs_req_poolp;
#define TLINK_IDLE_EXPIRE (600 * HZ)
enum {
-
/* Mount options that take no arguments */
Opt_user_xattr, Opt_nouser_xattr,
Opt_forceuid, Opt_noforceuid,
@@ -76,7 +75,7 @@ enum {
Opt_noposixpaths, Opt_nounix,
Opt_nocase,
Opt_brl, Opt_nobrl,
- Opt_forcemandatorylock, Opt_setuids,
+ Opt_forcemandatorylock, Opt_setuidfromacl, Opt_setuids,
Opt_nosetuids, Opt_dynperm, Opt_nodynperm,
Opt_nohard, Opt_nosoft,
Opt_nointr, Opt_intr,
@@ -95,7 +94,7 @@ enum {
Opt_cruid, Opt_gid, Opt_file_mode,
Opt_dirmode, Opt_port,
Opt_rsize, Opt_wsize, Opt_actimeo,
- Opt_echo_interval,
+ Opt_echo_interval, Opt_max_credits,
/* Mount options which take string value */
Opt_user, Opt_pass, Opt_ip,
@@ -148,6 +147,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_forcemandatorylock, "forcemand" },
{ Opt_setuids, "setuids" },
{ Opt_nosetuids, "nosetuids" },
+ { Opt_setuidfromacl, "idsfromsid" },
{ Opt_dynperm, "dynperm" },
{ Opt_nodynperm, "nodynperm" },
{ Opt_nohard, "nohard" },
@@ -190,6 +190,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_wsize, "wsize=%s" },
{ Opt_actimeo, "actimeo=%s" },
{ Opt_echo_interval, "echo_interval=%s" },
+ { Opt_max_credits, "max_credits=%s" },
{ Opt_blank_user, "user=" },
{ Opt_blank_user, "username=" },
@@ -1376,6 +1377,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_nosetuids:
vol->setuids = 0;
break;
+ case Opt_setuidfromacl:
+ vol->setuidfromacl = 1;
+ break;
case Opt_dynperm:
vol->dynperm = true;
break;
@@ -1586,6 +1590,15 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
vol->echo_interval = option;
break;
+ case Opt_max_credits:
+ if (get_option_ul(args, &option) || (option < 20) ||
+ (option > 60000)) {
+ cifs_dbg(VFS, "%s: Invalid max_credits value\n",
+ __func__);
+ goto cifs_parse_mount_err;
+ }
+ vol->max_credits = option;
+ break;
/* String Arguments */
@@ -2163,7 +2176,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
sizeof(tcp_ses->dstaddr));
#ifdef CONFIG_CIFS_SMB2
- get_random_bytes(tcp_ses->client_guid, SMB2_CLIENT_GUID_SIZE);
+ generate_random_uuid(tcp_ses->client_guid);
#endif
/*
* at this point we are the only ones with the pointer
@@ -3270,6 +3283,8 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
if (pvolume_info->setuids)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID;
+ if (pvolume_info->setuidfromacl)
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UID_FROM_ACL;
if (pvolume_info->server_ino)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM;
if (pvolume_info->remap)
@@ -3598,7 +3613,11 @@ try_mount_again:
bdi_destroy(&cifs_sb->bdi);
goto out;
}
-
+ if ((volume_info->max_credits < 20) ||
+ (volume_info->max_credits > 60000))
+ server->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
+ else
+ server->max_credits = volume_info->max_credits;
/* get a reference to a SMB session */
ses = cifs_get_smb_ses(server, volume_info);
if (IS_ERR(ses)) {
@@ -3688,14 +3707,16 @@ remote_path_check:
goto mount_fail_check;
}
- rc = cifs_are_all_path_components_accessible(server,
+ if (rc != -EREMOTE) {
+ rc = cifs_are_all_path_components_accessible(server,
xid, tcon, cifs_sb,
full_path);
- if (rc != 0) {
- cifs_dbg(VFS, "cannot query dirs between root and final path, "
- "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
- cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
- rc = 0;
+ if (rc != 0) {
+ cifs_dbg(VFS, "cannot query dirs between root and final path, "
+ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ rc = 0;
+ }
}
kfree(full_path);
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index a95fe8b1afe9..7f5f6176c6f1 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -305,6 +305,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
cfile->tlink = cifs_get_tlink(tlink);
INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
mutex_init(&cfile->fh_mutex);
+ spin_lock_init(&cfile->file_info_lock);
cifs_sb_active(inode->i_sb);
@@ -317,7 +318,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
oplock = 0;
}
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
oplock = fid->pending_open->oplock;
list_del(&fid->pending_open->olist);
@@ -326,12 +327,13 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
server->ops->set_fid(cfile, fid, oplock);
list_add(&cfile->tlist, &tcon->openFileList);
+
/* if readable file instance put first in list*/
if (file->f_mode & FMODE_READ)
list_add(&cfile->flist, &cinode->openFileList);
else
list_add_tail(&cfile->flist, &cinode->openFileList);
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
if (fid->purge_cache)
cifs_zap_mapping(inode);
@@ -343,16 +345,16 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
struct cifsFileInfo *
cifsFileInfo_get(struct cifsFileInfo *cifs_file)
{
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&cifs_file->file_info_lock);
cifsFileInfo_get_locked(cifs_file);
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cifs_file->file_info_lock);
return cifs_file;
}
/*
* Release a reference on the file private data. This may involve closing
* the filehandle out on the server. Must be called without holding
- * cifs_file_list_lock.
+ * tcon->open_file_lock and cifs_file->file_info_lock.
*/
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
@@ -367,11 +369,15 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
struct cifs_pending_open open;
bool oplock_break_cancelled;
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
+
+ spin_lock(&cifs_file->file_info_lock);
if (--cifs_file->count > 0) {
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cifs_file->file_info_lock);
+ spin_unlock(&tcon->open_file_lock);
return;
}
+ spin_unlock(&cifs_file->file_info_lock);
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &fid);
@@ -395,7 +401,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
cifs_set_oplock_level(cifsi, 0);
}
- spin_unlock(&cifs_file_list_lock);
+
+ spin_unlock(&tcon->open_file_lock);
oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
@@ -732,6 +739,15 @@ reopen_success:
* to the server to get the new inode info.
*/
+ /*
+ * If the server returned a read oplock and we have mandatory brlocks,
+ * set oplock level to None.
+ */
+ if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
+ cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
+ oplock = 0;
+ }
+
server->ops->set_fid(cfile, &cfile->fid, oplock);
if (oparms.reconnect)
cifs_relock_file(cfile);
@@ -753,6 +769,36 @@ int cifs_close(struct inode *inode, struct file *file)
return 0;
}
+void
+cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
+{
+ struct cifsFileInfo *open_file;
+ struct list_head *tmp;
+ struct list_head *tmp1;
+ struct list_head tmp_list;
+
+ cifs_dbg(FYI, "Reopen persistent handles");
+ INIT_LIST_HEAD(&tmp_list);
+
+ /* list all files open on tree connection, reopen resilient handles */
+ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp, &tcon->openFileList) {
+ open_file = list_entry(tmp, struct cifsFileInfo, tlist);
+ if (!open_file->invalidHandle)
+ continue;
+ cifsFileInfo_get(open_file);
+ list_add_tail(&open_file->rlist, &tmp_list);
+ }
+ spin_unlock(&tcon->open_file_lock);
+
+ list_for_each_safe(tmp, tmp1, &tmp_list) {
+ open_file = list_entry(tmp, struct cifsFileInfo, rlist);
+ cifs_reopen_file(open_file, false /* do not flush */);
+ list_del_init(&open_file->rlist);
+ cifsFileInfo_put(open_file);
+ }
+}
+
int cifs_closedir(struct inode *inode, struct file *file)
{
int rc = 0;
@@ -772,10 +818,10 @@ int cifs_closedir(struct inode *inode, struct file *file)
server = tcon->ses->server;
cifs_dbg(FYI, "Freeing private data in close dir\n");
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&cfile->file_info_lock);
if (server->ops->dir_needs_close(cfile)) {
cfile->invalidHandle = true;
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cfile->file_info_lock);
if (server->ops->close_dir)
rc = server->ops->close_dir(xid, tcon, &cfile->fid);
else
@@ -784,7 +830,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
/* not much we can do if it fails anyway, ignore rc */
rc = 0;
} else
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cfile->file_info_lock);
buf = cfile->srch_inf.ntwrk_buf_start;
if (buf) {
@@ -1728,12 +1774,13 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
{
struct cifsFileInfo *open_file = NULL;
struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
/* only filter by fsuid on multiuser mounts */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
/* we could simply get the first_list_entry since write-only entries
are always at the end of the list but since the first entry might
have a close pending, we go through the whole list */
@@ -1744,8 +1791,8 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
if (!open_file->invalidHandle) {
/* found a good file */
/* lock it so it will not be closed on us */
- cifsFileInfo_get_locked(open_file);
- spin_unlock(&cifs_file_list_lock);
+ cifsFileInfo_get(open_file);
+ spin_unlock(&tcon->open_file_lock);
return open_file;
} /* else might as well continue, and look for
another, or simply have the caller reopen it
@@ -1753,7 +1800,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
} else /* write only file */
break; /* write only files are last so must be done */
}
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
return NULL;
}
@@ -1762,6 +1809,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
{
struct cifsFileInfo *open_file, *inv_file = NULL;
struct cifs_sb_info *cifs_sb;
+ struct cifs_tcon *tcon;
bool any_available = false;
int rc;
unsigned int refind = 0;
@@ -1777,15 +1825,16 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
}
cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+ tcon = cifs_sb_master_tcon(cifs_sb);
/* only filter by fsuid on multiuser mounts */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
refind_writable:
if (refind > MAX_REOPEN_ATT) {
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
return NULL;
}
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
@@ -1796,8 +1845,8 @@ refind_writable:
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
if (!open_file->invalidHandle) {
/* found a good writable file */
- cifsFileInfo_get_locked(open_file);
- spin_unlock(&cifs_file_list_lock);
+ cifsFileInfo_get(open_file);
+ spin_unlock(&tcon->open_file_lock);
return open_file;
} else {
if (!inv_file)
@@ -1813,24 +1862,24 @@ refind_writable:
if (inv_file) {
any_available = false;
- cifsFileInfo_get_locked(inv_file);
+ cifsFileInfo_get(inv_file);
}
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
if (inv_file) {
rc = cifs_reopen_file(inv_file, false);
if (!rc)
return inv_file;
else {
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
list_move_tail(&inv_file->flist,
&cifs_inode->openFileList);
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
cifsFileInfo_put(inv_file);
- spin_lock(&cifs_file_list_lock);
++refind;
inv_file = NULL;
+ spin_lock(&tcon->open_file_lock);
goto refind_writable;
}
}
@@ -3612,15 +3661,17 @@ static int cifs_readpage(struct file *file, struct page *page)
static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
{
struct cifsFileInfo *open_file;
+ struct cifs_tcon *tcon =
+ cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
return 1;
}
}
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
return 0;
}
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 7a3b84e300f8..9f51b81119f2 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -189,7 +189,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
xid = get_xid();
cifs_sb = CIFS_SB(inode->i_sb);
-
+ cifs_dbg(VFS, "cifs ioctl 0x%x\n", command);
switch (command) {
case FS_IOC_GETFLAGS:
if (pSMBFile == NULL)
@@ -267,11 +267,23 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
tcon = tlink_tcon(pSMBFile->tlink);
rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
break;
+ case CIFS_ENUMERATE_SNAPSHOTS:
+ if (arg == 0) {
+ rc = -EINVAL;
+ goto cifs_ioc_exit;
+ }
+ tcon = tlink_tcon(pSMBFile->tlink);
+ if (tcon->ses->server->ops->enum_snapshots)
+ rc = tcon->ses->server->ops->enum_snapshots(xid, tcon,
+ pSMBFile, (void __user *)arg);
+ else
+ rc = -EOPNOTSUPP;
+ break;
default:
cifs_dbg(FYI, "unsupported ioctl\n");
break;
}
-
+cifs_ioc_exit:
free_xid(xid);
return rc;
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 813fe13c2ae1..c6729156f9a0 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -120,6 +120,7 @@ tconInfoAlloc(void)
++ret_buf->tc_count;
INIT_LIST_HEAD(&ret_buf->openFileList);
INIT_LIST_HEAD(&ret_buf->tcon_list);
+ spin_lock_init(&ret_buf->open_file_lock);
#ifdef CONFIG_CIFS_STATS
spin_lock_init(&ret_buf->stat_lock);
#endif
@@ -465,7 +466,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
continue;
cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
list_for_each(tmp2, &tcon->openFileList) {
netfile = list_entry(tmp2, struct cifsFileInfo,
tlist);
@@ -495,11 +496,11 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
&netfile->oplock_break);
netfile->oplock_break_cancelled = false;
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
return true;
}
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "No matching file for oplock break\n");
return true;
@@ -613,9 +614,9 @@ backup_cred(struct cifs_sb_info *cifs_sb)
void
cifs_del_pending_open(struct cifs_pending_open *open)
{
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
list_del(&open->olist);
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
}
void
@@ -635,7 +636,7 @@ void
cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
struct cifs_pending_open *open)
{
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tlink_tcon(tlink)->open_file_lock);
cifs_add_pending_open_locked(fid, tlink, open);
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 65cf85dcda09..8f6a2a5863b9 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -597,14 +597,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
is_dir_changed(file)) || (index_to_find < first_entry_in_buffer)) {
/* close and restart search */
cifs_dbg(FYI, "search backing up - close and restart search\n");
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&cfile->file_info_lock);
if (server->ops->dir_needs_close(cfile)) {
cfile->invalidHandle = true;
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cfile->file_info_lock);
if (server->ops->close_dir)
server->ops->close_dir(xid, tcon, &cfile->fid);
} else
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&cfile->file_info_lock);
if (cfile->srch_inf.ntwrk_buf_start) {
cifs_dbg(FYI, "freeing SMB ff cache buf on search rewind\n");
if (cfile->srch_inf.smallBuf)
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 4f0231e685a9..1238cd3552f9 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -266,9 +266,15 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
struct tcon_link *tlink;
int rc;
+ if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
+ (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
+ (buf->Attributes == 0))
+ return 0; /* would be a no op, no sense sending this */
+
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
+
rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path,
FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
SMB2_OP_SET_INFO);
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 389fb9f8c84e..3d383489b9cf 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -549,19 +549,19 @@ smb2_is_valid_lease_break(char *buffer)
list_for_each(tmp1, &server->smb_ses_list) {
ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
- spin_lock(&cifs_file_list_lock);
list_for_each(tmp2, &ses->tcon_list) {
tcon = list_entry(tmp2, struct cifs_tcon,
tcon_list);
+ spin_lock(&tcon->open_file_lock);
cifs_stats_inc(
&tcon->stats.cifs_stats.num_oplock_brks);
if (smb2_tcon_has_lease(tcon, rsp, lw)) {
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
return true;
}
+ spin_unlock(&tcon->open_file_lock);
}
- spin_unlock(&cifs_file_list_lock);
}
}
spin_unlock(&cifs_tcp_ses_lock);
@@ -603,7 +603,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
- spin_lock(&cifs_file_list_lock);
+ spin_lock(&tcon->open_file_lock);
list_for_each(tmp2, &tcon->openFileList) {
cfile = list_entry(tmp2, struct cifsFileInfo,
tlist);
@@ -615,7 +615,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
cifs_dbg(FYI, "file id match, oplock break\n");
cinode = CIFS_I(d_inode(cfile->dentry));
-
+ spin_lock(&cfile->file_info_lock);
if (!CIFS_CACHE_WRITE(cinode) &&
rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
cfile->oplock_break_cancelled = true;
@@ -637,14 +637,14 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
clear_bit(
CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
&cinode->flags);
-
+ spin_unlock(&cfile->file_info_lock);
queue_work(cifsiod_wq, &cfile->oplock_break);
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
return true;
}
- spin_unlock(&cifs_file_list_lock);
+ spin_unlock(&tcon->open_file_lock);
spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "No matching file for oplock break\n");
return true;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index d203c0329626..5d456ebb3813 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -28,6 +28,7 @@
#include "cifs_unicode.h"
#include "smb2status.h"
#include "smb2glob.h"
+#include "cifs_ioctl.h"
static int
change_conf(struct TCP_Server_Info *server)
@@ -70,6 +71,10 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
spin_lock(&server->req_lock);
val = server->ops->get_credits_field(server, optype);
*val += add;
+ if (*val > 65000) {
+ *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
+ printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
+ }
server->in_flight--;
if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
rc = change_conf(server);
@@ -287,7 +292,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
cifs_dbg(FYI, "Link Speed %lld\n",
le64_to_cpu(out_buf->LinkSpeed));
}
-
+ kfree(out_buf);
return rc;
}
#endif /* STATS2 */
@@ -541,6 +546,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
server->ops->set_oplock_level(cinode, oplock, fid->epoch,
&fid->purge_cache);
cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
+ memcpy(cfile->fid.create_guid, fid->create_guid, 16);
}
static void
@@ -699,6 +705,7 @@ smb2_clone_range(const unsigned int xid,
cchunk_out:
kfree(pcchunk);
+ kfree(retbuf);
return rc;
}
@@ -823,7 +830,6 @@ smb2_duplicate_extents(const unsigned int xid,
{
int rc;
unsigned int ret_data_len;
- char *retbuf = NULL;
struct duplicate_extents_to_file dup_ext_buf;
struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
@@ -849,7 +855,7 @@ smb2_duplicate_extents(const unsigned int xid,
FSCTL_DUPLICATE_EXTENTS_TO_FILE,
true /* is_fsctl */, (char *)&dup_ext_buf,
sizeof(struct duplicate_extents_to_file),
- (char **)&retbuf,
+ NULL,
&ret_data_len);
if (ret_data_len > 0)
@@ -872,7 +878,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
struct cifsFileInfo *cfile)
{
struct fsctl_set_integrity_information_req integr_info;
- char *retbuf = NULL;
unsigned int ret_data_len;
integr_info.ChecksumAlgorithm = cpu_to_le16(CHECKSUM_TYPE_UNCHANGED);
@@ -884,9 +889,53 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
FSCTL_SET_INTEGRITY_INFORMATION,
true /* is_fsctl */, (char *)&integr_info,
sizeof(struct fsctl_set_integrity_information_req),
+ NULL,
+ &ret_data_len);
+
+}
+
+static int
+smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifsFileInfo *cfile, void __user *ioc_buf)
+{
+ char *retbuf = NULL;
+ unsigned int ret_data_len = 0;
+ int rc;
+ struct smb_snapshot_array snapshot_in;
+
+ rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
+ cfile->fid.volatile_fid,
+ FSCTL_SRV_ENUMERATE_SNAPSHOTS,
+ true /* is_fsctl */, NULL, 0 /* no input data */,
(char **)&retbuf,
&ret_data_len);
+ cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
+ rc, ret_data_len);
+ if (rc)
+ return rc;
+ if (ret_data_len && (ioc_buf != NULL) && (retbuf != NULL)) {
+ /* Fixup buffer */
+ if (copy_from_user(&snapshot_in, ioc_buf,
+ sizeof(struct smb_snapshot_array))) {
+ rc = -EFAULT;
+ kfree(retbuf);
+ return rc;
+ }
+ if (snapshot_in.snapshot_array_size < sizeof(struct smb_snapshot_array)) {
+ rc = -ERANGE;
+ return rc;
+ }
+
+ if (ret_data_len > snapshot_in.snapshot_array_size)
+ ret_data_len = snapshot_in.snapshot_array_size;
+
+ if (copy_to_user(ioc_buf, retbuf, ret_data_len))
+ rc = -EFAULT;
+ }
+
+ kfree(retbuf);
+ return rc;
}
static int
@@ -1041,7 +1090,7 @@ smb2_set_lease_key(struct inode *inode, struct cifs_fid *fid)
static void
smb2_new_lease_key(struct cifs_fid *fid)
{
- get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
+ generate_random_uuid(fid->lease_key);
}
#define SMB2_SYMLINK_STRUCT_SIZE \
@@ -1654,6 +1703,7 @@ struct smb_version_operations smb21_operations = {
.clone_range = smb2_clone_range,
.wp_retry_size = smb2_wp_retry_size,
.dir_needs_close = smb2_dir_needs_close,
+ .enum_snapshots = smb3_enum_snapshots,
};
struct smb_version_operations smb30_operations = {
@@ -1740,6 +1790,7 @@ struct smb_version_operations smb30_operations = {
.wp_retry_size = smb2_wp_retry_size,
.dir_needs_close = smb2_dir_needs_close,
.fallocate = smb3_fallocate,
+ .enum_snapshots = smb3_enum_snapshots,
};
#ifdef CONFIG_CIFS_SMB311
@@ -1827,6 +1878,7 @@ struct smb_version_operations smb311_operations = {
.wp_retry_size = smb2_wp_retry_size,
.dir_needs_close = smb2_dir_needs_close,
.fallocate = smb3_fallocate,
+ .enum_snapshots = smb3_enum_snapshots,
};
#endif /* CIFS_SMB311 */
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 29e06db5f187..5ca5ea4668a1 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -100,7 +100,21 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
hdr->ProtocolId = SMB2_PROTO_NUMBER;
hdr->StructureSize = cpu_to_le16(64);
hdr->Command = smb2_cmd;
- hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
+ if (tcon && tcon->ses && tcon->ses->server) {
+ struct TCP_Server_Info *server = tcon->ses->server;
+
+ spin_lock(&server->req_lock);
+ /* Request up to 2 credits but don't go over the limit. */
+ if (server->credits >= server->max_credits)
+ hdr->CreditRequest = cpu_to_le16(0);
+ else
+ hdr->CreditRequest = cpu_to_le16(
+ min_t(int, server->max_credits -
+ server->credits, 2));
+ spin_unlock(&server->req_lock);
+ } else {
+ hdr->CreditRequest = cpu_to_le16(2);
+ }
hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
if (!tcon)
@@ -236,8 +250,13 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
}
cifs_mark_open_files_invalid(tcon);
+
rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
mutex_unlock(&tcon->ses->session_mutex);
+
+ if (tcon->use_persistent)
+ cifs_reopen_persistent_handles(tcon);
+
cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
if (rc)
goto out;
@@ -574,59 +593,42 @@ vneg_out:
return -EIO;
}
-int
-SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *nls_cp)
+struct SMB2_sess_data {
+ unsigned int xid;
+ struct cifs_ses *ses;
+ struct nls_table *nls_cp;
+ void (*func)(struct SMB2_sess_data *);
+ int result;
+ u64 previous_session;
+
+ /* we will send the SMB in three pieces:
+ * a fixed length beginning part, an optional
+ * SPNEGO blob (which can be zero length), and a
+ * last part which will include the strings
+ * and rest of bcc area. This allows us to avoid
+ * a large buffer 17K allocation
+ */
+ int buf0_type;
+ struct kvec iov[2];
+};
+
+static int
+SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
{
+ int rc;
+ struct cifs_ses *ses = sess_data->ses;
struct smb2_sess_setup_req *req;
- struct smb2_sess_setup_rsp *rsp = NULL;
- struct kvec iov[2];
- int rc = 0;
- int resp_buftype = CIFS_NO_BUFFER;
- __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
struct TCP_Server_Info *server = ses->server;
- u16 blob_length = 0;
- struct key *spnego_key = NULL;
- char *security_blob = NULL;
- unsigned char *ntlmssp_blob = NULL;
- bool use_spnego = false; /* else use raw ntlmssp */
-
- cifs_dbg(FYI, "Session Setup\n");
-
- if (!server) {
- WARN(1, "%s: server is NULL!\n", __func__);
- return -EIO;
- }
-
- /*
- * If we are here due to reconnect, free per-smb session key
- * in case signing was required.
- */
- kfree(ses->auth_key.response);
- ses->auth_key.response = NULL;
-
- /*
- * If memory allocation is successful, caller of this function
- * frees it.
- */
- ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
- if (!ses->ntlmssp)
- return -ENOMEM;
- ses->ntlmssp->sesskey_per_smbsess = true;
-
- /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
- if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
- ses->sectype = RawNTLMSSP;
-
-ssetup_ntlmssp_authenticate:
- if (phase == NtLmChallenge)
- phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
if (rc)
return rc;
req->hdr.SessionId = 0; /* First session, not a reauthenticate */
+
+ /* if reconnect, we need to send previous sess id, otherwise it is 0 */
+ req->PreviousSessionId = sess_data->previous_session;
+
req->Flags = 0; /* MBZ */
/* to enable echos and oplocks */
req->hdr.CreditRequest = cpu_to_le16(3);
@@ -642,199 +644,368 @@ ssetup_ntlmssp_authenticate:
req->Capabilities = 0;
req->Channel = 0; /* MBZ */
- iov[0].iov_base = (char *)req;
+ sess_data->iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field and 1 for pad */
- iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+ sess_data->iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+ /*
+ * This variable will be used to clear the buffer
+ * allocated above in case of any error in the calling function.
+ */
+ sess_data->buf0_type = CIFS_SMALL_BUFFER;
- if (ses->sectype == Kerberos) {
-#ifdef CONFIG_CIFS_UPCALL
- struct cifs_spnego_msg *msg;
+ return 0;
+}
- spnego_key = cifs_get_spnego_key(ses);
- if (IS_ERR(spnego_key)) {
- rc = PTR_ERR(spnego_key);
- spnego_key = NULL;
- goto ssetup_exit;
- }
+static void
+SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
+{
+ free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
+ sess_data->buf0_type = CIFS_NO_BUFFER;
+}
- msg = spnego_key->payload.data[0];
- /*
- * check version field to make sure that cifs.upcall is
- * sending us a response in an expected form
- */
- if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
- cifs_dbg(VFS,
- "bad cifs.upcall version. Expected %d got %d",
- CIFS_SPNEGO_UPCALL_VERSION, msg->version);
- rc = -EKEYREJECTED;
- goto ssetup_exit;
- }
- ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
- GFP_KERNEL);
- if (!ses->auth_key.response) {
- cifs_dbg(VFS,
- "Kerberos can't allocate (%u bytes) memory",
- msg->sesskey_len);
- rc = -ENOMEM;
- goto ssetup_exit;
- }
- ses->auth_key.len = msg->sesskey_len;
- blob_length = msg->secblob_len;
- iov[1].iov_base = msg->data + msg->sesskey_len;
- iov[1].iov_len = blob_length;
-#else
- rc = -EOPNOTSUPP;
- goto ssetup_exit;
-#endif /* CONFIG_CIFS_UPCALL */
- } else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */
- ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
- GFP_KERNEL);
- if (ntlmssp_blob == NULL) {
- rc = -ENOMEM;
- goto ssetup_exit;
- }
- build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
- if (use_spnego) {
- /* blob_length = build_spnego_ntlmssp_blob(
- &security_blob,
- sizeof(struct _NEGOTIATE_MESSAGE),
- ntlmssp_blob); */
- /* BB eventually need to add this */
- cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
- rc = -EOPNOTSUPP;
- kfree(ntlmssp_blob);
- goto ssetup_exit;
- } else {
- blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
- /* with raw NTLMSSP we don't encapsulate in SPNEGO */
- security_blob = ntlmssp_blob;
- }
- iov[1].iov_base = security_blob;
- iov[1].iov_len = blob_length;
- } else if (phase == NtLmAuthenticate) {
- req->hdr.SessionId = ses->Suid;
- rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
- nls_cp);
- if (rc) {
- cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
- rc);
- goto ssetup_exit; /* BB double check error handling */
- }
- if (use_spnego) {
- /* blob_length = build_spnego_ntlmssp_blob(
- &security_blob,
- blob_length,
- ntlmssp_blob); */
- cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
- rc = -EOPNOTSUPP;
- kfree(ntlmssp_blob);
- goto ssetup_exit;
- } else {
- security_blob = ntlmssp_blob;
- }
- iov[1].iov_base = security_blob;
- iov[1].iov_len = blob_length;
- } else {
- cifs_dbg(VFS, "illegal ntlmssp phase\n");
- rc = -EIO;
- goto ssetup_exit;
- }
+static int
+SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
+{
+ int rc;
+ struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
/* Testing shows that buffer offset must be at location of Buffer[0] */
req->SecurityBufferOffset =
- cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
- 1 /* pad */ - 4 /* rfc1001 len */);
- req->SecurityBufferLength = cpu_to_le16(blob_length);
+ cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
+ 1 /* pad */ - 4 /* rfc1001 len */);
+ req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
- inc_rfc1001_len(req, blob_length - 1 /* pad */);
+ inc_rfc1001_len(req, sess_data->iov[1].iov_len - 1 /* pad */);
/* BB add code to build os and lm fields */
- rc = SendReceive2(xid, ses, iov, 2, &resp_buftype,
- CIFS_LOG_ERROR | CIFS_NEG_OP);
+ rc = SendReceive2(sess_data->xid, sess_data->ses,
+ sess_data->iov, 2,
+ &sess_data->buf0_type,
+ CIFS_LOG_ERROR | CIFS_NEG_OP);
- kfree(security_blob);
- rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
- ses->Suid = rsp->hdr.SessionId;
- if (resp_buftype != CIFS_NO_BUFFER &&
- rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
- if (phase != NtLmNegotiate) {
- cifs_dbg(VFS, "Unexpected more processing error\n");
- goto ssetup_exit;
- }
- if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
- le16_to_cpu(rsp->SecurityBufferOffset)) {
- cifs_dbg(VFS, "Invalid security buffer offset %d\n",
- le16_to_cpu(rsp->SecurityBufferOffset));
- rc = -EIO;
- goto ssetup_exit;
+ return rc;
+}
+
+static int
+SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
+{
+ int rc = 0;
+ struct cifs_ses *ses = sess_data->ses;
+
+ mutex_lock(&ses->server->srv_mutex);
+ if (ses->server->sign && ses->server->ops->generate_signingkey) {
+ rc = ses->server->ops->generate_signingkey(ses);
+ kfree(ses->auth_key.response);
+ ses->auth_key.response = NULL;
+ if (rc) {
+ cifs_dbg(FYI,
+ "SMB3 session key generation failed\n");
+ mutex_unlock(&ses->server->srv_mutex);
+ goto keygen_exit;
}
+ }
+ if (!ses->server->session_estab) {
+ ses->server->sequence_number = 0x2;
+ ses->server->session_estab = true;
+ }
+ mutex_unlock(&ses->server->srv_mutex);
+
+ cifs_dbg(FYI, "SMB2/3 session established successfully\n");
+ spin_lock(&GlobalMid_Lock);
+ ses->status = CifsGood;
+ ses->need_reconnect = false;
+ spin_unlock(&GlobalMid_Lock);
- /* NTLMSSP Negotiate sent now processing challenge (response) */
- phase = NtLmChallenge; /* process ntlmssp challenge */
- rc = 0; /* MORE_PROCESSING is not an error here but expected */
- rc = decode_ntlmssp_challenge(rsp->Buffer,
- le16_to_cpu(rsp->SecurityBufferLength), ses);
+keygen_exit:
+ if (!ses->server->sign) {
+ kfree(ses->auth_key.response);
+ ses->auth_key.response = NULL;
+ }
+ return rc;
+}
+
+#ifdef CONFIG_CIFS_UPCALL
+static void
+SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
+{
+ int rc;
+ struct cifs_ses *ses = sess_data->ses;
+ struct cifs_spnego_msg *msg;
+ struct key *spnego_key = NULL;
+ struct smb2_sess_setup_rsp *rsp = NULL;
+
+ rc = SMB2_sess_alloc_buffer(sess_data);
+ if (rc)
+ goto out;
+
+ spnego_key = cifs_get_spnego_key(ses);
+ if (IS_ERR(spnego_key)) {
+ rc = PTR_ERR(spnego_key);
+ spnego_key = NULL;
+ goto out;
}
+ msg = spnego_key->payload.data[0];
/*
- * BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
- * but at least the raw NTLMSSP case works.
+ * check version field to make sure that cifs.upcall is
+ * sending us a response in an expected form
*/
+ if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
+ cifs_dbg(VFS,
+ "bad cifs.upcall version. Expected %d got %d",
+ CIFS_SPNEGO_UPCALL_VERSION, msg->version);
+ rc = -EKEYREJECTED;
+ goto out_put_spnego_key;
+ }
+
+ ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
+ GFP_KERNEL);
+ if (!ses->auth_key.response) {
+ cifs_dbg(VFS,
+ "Kerberos can't allocate (%u bytes) memory",
+ msg->sesskey_len);
+ rc = -ENOMEM;
+ goto out_put_spnego_key;
+ }
+ ses->auth_key.len = msg->sesskey_len;
+
+ sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
+ sess_data->iov[1].iov_len = msg->secblob_len;
+
+ rc = SMB2_sess_sendreceive(sess_data);
+ if (rc)
+ goto out_put_spnego_key;
+
+ rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
+ ses->Suid = rsp->hdr.SessionId;
+
+ ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
+ cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
+
+ rc = SMB2_sess_establish_session(sess_data);
+out_put_spnego_key:
+ key_invalidate(spnego_key);
+ key_put(spnego_key);
+out:
+ sess_data->result = rc;
+ sess_data->func = NULL;
+ SMB2_sess_free_buffer(sess_data);
+}
+#else
+static void
+SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
+{
+ cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
+ sess_data->result = -EOPNOTSUPP;
+ sess_data->func = NULL;
+}
+#endif
+
+static void
+SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
+
+static void
+SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
+{
+ int rc;
+ struct cifs_ses *ses = sess_data->ses;
+ struct smb2_sess_setup_rsp *rsp = NULL;
+ char *ntlmssp_blob = NULL;
+ bool use_spnego = false; /* else use raw ntlmssp */
+ u16 blob_length = 0;
+
/*
- * No tcon so can't do
- * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
+ * If memory allocation is successful, caller of this function
+ * frees it.
*/
- if (rc != 0)
- goto ssetup_exit;
+ ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
+ if (!ses->ntlmssp) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+ ses->ntlmssp->sesskey_per_smbsess = true;
+
+ rc = SMB2_sess_alloc_buffer(sess_data);
+ if (rc)
+ goto out_err;
+
+ ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
+ GFP_KERNEL);
+ if (ntlmssp_blob == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
+ if (use_spnego) {
+ /* BB eventually need to add this */
+ cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
+ rc = -EOPNOTSUPP;
+ goto out;
+ } else {
+ blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
+ /* with raw NTLMSSP we don't encapsulate in SPNEGO */
+ }
+ sess_data->iov[1].iov_base = ntlmssp_blob;
+ sess_data->iov[1].iov_len = blob_length;
+
+ rc = SMB2_sess_sendreceive(sess_data);
+ rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
+
+ /* If true, rc here is expected and not an error */
+ if (sess_data->buf0_type != CIFS_NO_BUFFER &&
+ rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
+ rc = 0;
+
+ if (rc)
+ goto out;
+
+ if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
+ le16_to_cpu(rsp->SecurityBufferOffset)) {
+ cifs_dbg(VFS, "Invalid security buffer offset %d\n",
+ le16_to_cpu(rsp->SecurityBufferOffset));
+ rc = -EIO;
+ goto out;
+ }
+ rc = decode_ntlmssp_challenge(rsp->Buffer,
+ le16_to_cpu(rsp->SecurityBufferLength), ses);
+ if (rc)
+ goto out;
+
+ cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
+
+ ses->Suid = rsp->hdr.SessionId;
ses->session_flags = le16_to_cpu(rsp->SessionFlags);
if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
-ssetup_exit:
- free_rsp_buf(resp_buftype, rsp);
-
- /* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
- if ((phase == NtLmChallenge) && (rc == 0))
- goto ssetup_ntlmssp_authenticate;
+out:
+ kfree(ntlmssp_blob);
+ SMB2_sess_free_buffer(sess_data);
if (!rc) {
- mutex_lock(&server->srv_mutex);
- if (server->sign && server->ops->generate_signingkey) {
- rc = server->ops->generate_signingkey(ses);
- kfree(ses->auth_key.response);
- ses->auth_key.response = NULL;
- if (rc) {
- cifs_dbg(FYI,
- "SMB3 session key generation failed\n");
- mutex_unlock(&server->srv_mutex);
- goto keygen_exit;
- }
- }
- if (!server->session_estab) {
- server->sequence_number = 0x2;
- server->session_estab = true;
- }
- mutex_unlock(&server->srv_mutex);
-
- cifs_dbg(FYI, "SMB2/3 session established successfully\n");
- spin_lock(&GlobalMid_Lock);
- ses->status = CifsGood;
- ses->need_reconnect = false;
- spin_unlock(&GlobalMid_Lock);
+ sess_data->result = 0;
+ sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
+ return;
}
+out_err:
+ kfree(ses->ntlmssp);
+ ses->ntlmssp = NULL;
+ sess_data->result = rc;
+ sess_data->func = NULL;
+}
-keygen_exit:
- if (!server->sign) {
- kfree(ses->auth_key.response);
- ses->auth_key.response = NULL;
+static void
+SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
+{
+ int rc;
+ struct cifs_ses *ses = sess_data->ses;
+ struct smb2_sess_setup_req *req;
+ struct smb2_sess_setup_rsp *rsp = NULL;
+ unsigned char *ntlmssp_blob = NULL;
+ bool use_spnego = false; /* else use raw ntlmssp */
+ u16 blob_length = 0;
+
+ rc = SMB2_sess_alloc_buffer(sess_data);
+ if (rc)
+ goto out;
+
+ req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
+ req->hdr.SessionId = ses->Suid;
+
+ rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
+ sess_data->nls_cp);
+ if (rc) {
+ cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
+ goto out;
}
- if (spnego_key) {
- key_invalidate(spnego_key);
- key_put(spnego_key);
+
+ if (use_spnego) {
+ /* BB eventually need to add this */
+ cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
+ rc = -EOPNOTSUPP;
+ goto out;
}
+ sess_data->iov[1].iov_base = ntlmssp_blob;
+ sess_data->iov[1].iov_len = blob_length;
+
+ rc = SMB2_sess_sendreceive(sess_data);
+ if (rc)
+ goto out;
+
+ rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
+
+ ses->Suid = rsp->hdr.SessionId;
+ ses->session_flags = le16_to_cpu(rsp->SessionFlags);
+ if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
+ cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
+
+ rc = SMB2_sess_establish_session(sess_data);
+out:
+ kfree(ntlmssp_blob);
+ SMB2_sess_free_buffer(sess_data);
kfree(ses->ntlmssp);
+ ses->ntlmssp = NULL;
+ sess_data->result = rc;
+ sess_data->func = NULL;
+}
+static int
+SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
+{
+ if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
+ ses->sectype = RawNTLMSSP;
+
+ switch (ses->sectype) {
+ case Kerberos:
+ sess_data->func = SMB2_auth_kerberos;
+ break;
+ case RawNTLMSSP:
+ sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
+ break;
+ default:
+ cifs_dbg(VFS, "secType %d not supported!\n", ses->sectype);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int
+SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ const struct nls_table *nls_cp)
+{
+ int rc = 0;
+ struct TCP_Server_Info *server = ses->server;
+ struct SMB2_sess_data *sess_data;
+
+ cifs_dbg(FYI, "Session Setup\n");
+
+ if (!server) {
+ WARN(1, "%s: server is NULL!\n", __func__);
+ return -EIO;
+ }
+
+ sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
+ if (!sess_data)
+ return -ENOMEM;
+
+ rc = SMB2_select_sec(ses, sess_data);
+ if (rc)
+ goto out;
+ sess_data->xid = xid;
+ sess_data->ses = ses;
+ sess_data->buf0_type = CIFS_NO_BUFFER;
+ sess_data->nls_cp = (struct nls_table *) nls_cp;
+
+ while (sess_data->func)
+ sess_data->func(sess_data);
+
+ rc = sess_data->result;
+out:
+ kfree(sess_data);
return rc;
}
@@ -1164,7 +1335,7 @@ create_durable_v2_buf(struct cifs_fid *pfid)
buf->dcontext.Timeout = 0; /* Should this be configurable by workload */
buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
- get_random_bytes(buf->dcontext.CreateGuid, 16);
+ generate_random_uuid(buf->dcontext.CreateGuid);
memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
@@ -2057,6 +2228,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
if (rdata->credits) {
buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
SMB2_MAX_BUFFER_SIZE));
+ buf->CreditRequest = buf->CreditCharge;
spin_lock(&server->req_lock);
server->credits += rdata->credits -
le16_to_cpu(buf->CreditCharge);
@@ -2243,6 +2415,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
if (wdata->credits) {
req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
SMB2_MAX_BUFFER_SIZE));
+ req->hdr.CreditRequest = req->hdr.CreditCharge;
spin_lock(&server->req_lock);
server->credits += wdata->credits -
le16_to_cpu(req->hdr.CreditCharge);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index ff88d9feb01e..fd3709e8de33 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -276,7 +276,7 @@ struct smb2_sess_setup_req {
__le32 Channel;
__le16 SecurityBufferOffset;
__le16 SecurityBufferLength;
- __le64 PreviousSessionId;
+ __u64 PreviousSessionId;
__u8 Buffer[1]; /* variable length GSS security buffer */
} __packed;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 5e23f64c0804..20af5187ba63 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -33,7 +33,8 @@
#define MAX_EA_VALUE_SIZE 65535
#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
-
+#define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
+#define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
/* BB need to add server (Samba e.g) support for security and trusted prefix */
enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT };
@@ -144,6 +145,54 @@ out:
return rc;
}
+static int cifs_attrib_get(struct dentry *dentry,
+ struct inode *inode, void *value,
+ size_t size)
+{
+ ssize_t rc;
+ __u32 *pattribute;
+
+ rc = cifs_revalidate_dentry_attr(dentry);
+
+ if (rc)
+ return rc;
+
+ if ((value == NULL) || (size == 0))
+ return sizeof(__u32);
+ else if (size < sizeof(__u32))
+ return -ERANGE;
+
+ /* return dos attributes as pseudo xattr */
+ pattribute = (__u32 *)value;
+ *pattribute = CIFS_I(inode)->cifsAttrs;
+
+ return sizeof(__u32);
+}
+
+static int cifs_creation_time_get(struct dentry *dentry, struct inode *inode,
+ void *value, size_t size)
+{
+ ssize_t rc;
+ __u64 * pcreatetime;
+
+ rc = cifs_revalidate_dentry_attr(dentry);
+ if (rc)
+ return rc;
+
+ if ((value == NULL) || (size == 0))
+ return sizeof(__u64);
+ else if (size < sizeof(__u64))
+ return -ERANGE;
+
+ /* return dos attributes as pseudo xattr */
+ pcreatetime = (__u64 *)value;
+ *pcreatetime = CIFS_I(inode)->createtime;
+ return sizeof(__u64);
+
+ return rc;
+}
+
+
static int cifs_xattr_get(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, void *value, size_t size)
@@ -168,10 +217,19 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
rc = -ENOMEM;
goto out;
}
- /* return dos attributes as pseudo xattr */
+
/* return alt name if available as pseudo attr */
switch (handler->flags) {
case XATTR_USER:
+ cifs_dbg(FYI, "%s:querying user xattr %s\n", __func__, name);
+ if (strcmp(name, CIFS_XATTR_ATTRIB) == 0) {
+ rc = cifs_attrib_get(dentry, inode, value, size);
+ break;
+ } else if (strcmp(name, CIFS_XATTR_CREATETIME) == 0) {
+ rc = cifs_creation_time_get(dentry, inode, value, size);
+ break;
+ }
+
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto out;
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 61057b7dbddb..98f87fe8f186 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -151,7 +151,10 @@ static int do_page_crypto(struct inode *inode,
struct page *src_page, struct page *dest_page,
gfp_t gfp_flags)
{
- u8 xts_tweak[FS_XTS_TWEAK_SIZE];
+ struct {
+ __le64 index;
+ u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
+ } xts_tweak;
struct skcipher_request *req = NULL;
DECLARE_FS_COMPLETION_RESULT(ecr);
struct scatterlist dst, src;
@@ -171,17 +174,15 @@ static int do_page_crypto(struct inode *inode,
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
page_crypt_complete, &ecr);
- BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index));
- memcpy(xts_tweak, &index, sizeof(index));
- memset(&xts_tweak[sizeof(index)], 0,
- FS_XTS_TWEAK_SIZE - sizeof(index));
+ BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
+ xts_tweak.index = cpu_to_le64(index);
+ memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
sg_init_table(&src, 1);
sg_set_page(&src, src_page, PAGE_SIZE, 0);
- skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
- xts_tweak);
+ skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
if (rw == FS_DECRYPT)
res = crypto_skcipher_decrypt(req);
else
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index ed115acb5dee..6865663aac69 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -109,6 +109,8 @@ int fscrypt_process_policy(struct file *filp,
if (ret)
return ret;
+ inode_lock(inode);
+
if (!inode_has_encryption_context(inode)) {
if (!S_ISDIR(inode->i_mode))
ret = -EINVAL;
@@ -127,6 +129,8 @@ int fscrypt_process_policy(struct file *filp,
ret = -EINVAL;
}
+ inode_unlock(inode);
+
mnt_drop_write_file(filp);
return ret;
}
diff --git a/fs/exec.c b/fs/exec.c
index 6fcfb3f7b137..4e497b9ee71e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -191,6 +191,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
{
struct page *page;
int ret;
+ unsigned int gup_flags = FOLL_FORCE;
#ifdef CONFIG_STACK_GROWSUP
if (write) {
@@ -199,12 +200,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
return NULL;
}
#endif
+
+ if (write)
+ gup_flags |= FOLL_WRITE;
+
/*
* We are doing an exec(). 'current' is the process
* doing the exec and bprm->mm is the new process's mm.
*/
- ret = get_user_pages_remote(current, bprm->mm, pos, 1, write,
- 1, &page, NULL);
+ ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
+ &page, NULL);
if (ret <= 0)
return NULL;
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 79101651fe9e..42f9a0a0c4ca 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -137,7 +137,7 @@ Espan:
bad_entry:
EXOFS_ERR(
"ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - "
- "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n",
+ "offset=%lu, inode=0x%llx, rec_len=%d, name_len=%d\n",
dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
_LLU(le64_to_cpu(p->inode_no)),
rec_len, p->name_len);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index d831e24dc885..41b8b44a391c 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -622,7 +622,7 @@ static int ext2_get_blocks(struct inode *inode,
u32 *bno, bool *new, bool *boundary,
int create)
{
- int err = -EIO;
+ int err;
int offsets[4];
Indirect chain[4];
Indirect *partial;
@@ -639,7 +639,7 @@ static int ext2_get_blocks(struct inode *inode,
depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
if (depth == 0)
- return (err);
+ return -EIO;
partial = ext2_get_branch(inode, depth, offsets, chain, &err);
/* Simplest case - block found, no allocation needed */
@@ -761,7 +761,6 @@ static int ext2_get_blocks(struct inode *inode,
ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
mutex_unlock(&ei->truncate_mutex);
got_it:
- *bno = le32_to_cpu(chain[depth-1].key);
if (count > blocks_to_boundary)
*boundary = true;
err = count;
@@ -772,6 +771,8 @@ cleanup:
brelse(partial->bh);
partial--;
}
+ if (err > 0)
+ *bno = le32_to_cpu(chain[depth-1].key);
return err;
}
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 02ddec6d8a7d..fdb19543af1e 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -128,12 +128,12 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
node = rb_first(&sbi->system_blks);
while (node) {
entry = rb_entry(node, struct ext4_system_zone, node);
- printk("%s%llu-%llu", first ? "" : ", ",
+ printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
entry->start_blk, entry->start_blk + entry->count - 1);
first = 0;
node = rb_next(node);
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
int ext4_setup_system_zone(struct super_block *sb)
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 3ef1df6ae9ec..1aba469f8220 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -27,16 +27,15 @@
#ifdef CONFIG_EXT4_DEBUG
extern ushort ext4_mballoc_debug;
-#define mb_debug(n, fmt, a...) \
- do { \
- if ((n) <= ext4_mballoc_debug) { \
- printk(KERN_DEBUG "(%s, %d): %s: ", \
- __FILE__, __LINE__, __func__); \
- printk(fmt, ## a); \
- } \
- } while (0)
+#define mb_debug(n, fmt, ...) \
+do { \
+ if ((n) <= ext4_mballoc_debug) { \
+ printk(KERN_DEBUG "(%s, %d): %s: " fmt, \
+ __FILE__, __LINE__, __func__, ##__VA_ARGS__); \
+ } \
+} while (0)
#else
-#define mb_debug(n, fmt, a...) no_printk(fmt, ## a)
+#define mb_debug(n, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index f92f10d4f66a..104f8bfba718 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -577,12 +577,13 @@ static inline unsigned dx_node_limit(struct inode *dir)
static void dx_show_index(char * label, struct dx_entry *entries)
{
int i, n = dx_get_count (entries);
- printk(KERN_DEBUG "%s index ", label);
+ printk(KERN_DEBUG "%s index", label);
for (i = 0; i < n; i++) {
- printk("%x->%lu ", i ? dx_get_hash(entries + i) :
- 0, (unsigned long)dx_get_block(entries + i));
+ printk(KERN_CONT " %x->%lu",
+ i ? dx_get_hash(entries + i) : 0,
+ (unsigned long)dx_get_block(entries + i));
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
struct stats
@@ -679,7 +680,7 @@ static struct stats dx_show_leaf(struct inode *dir,
}
de = ext4_next_entry(de, size);
}
- printk("(%i)\n", names);
+ printk(KERN_CONT "(%i)\n", names);
return (struct stats) { names, space, 1 };
}
@@ -798,7 +799,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
q = entries + count - 1;
while (p <= q) {
m = p + (q - p) / 2;
- dxtrace(printk("."));
+ dxtrace(printk(KERN_CONT "."));
if (dx_get_hash(m) > hash)
q = m - 1;
else
@@ -810,7 +811,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
at = entries;
while (n--)
{
- dxtrace(printk(","));
+ dxtrace(printk(KERN_CONT ","));
if (dx_get_hash(++at) > hash)
{
at--;
@@ -821,7 +822,8 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
}
at = p - 1;
- dxtrace(printk(" %x->%u\n", at == entries ? 0 : dx_get_hash(at),
+ dxtrace(printk(KERN_CONT " %x->%u\n",
+ at == entries ? 0 : dx_get_hash(at),
dx_get_block(at)));
frame->entries = entries;
frame->at = at;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 6db81fbcbaa6..20da99da0a34 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -597,14 +597,15 @@ void __ext4_std_error(struct super_block *sb, const char *function,
void __ext4_abort(struct super_block *sb, const char *function,
unsigned int line, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
save_error_info(sb, function, line);
va_start(args, fmt);
- printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id,
- function, line);
- vprintk(fmt, args);
- printk("\n");
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
+ sb->s_id, function, line, &vaf);
va_end(args);
if ((sb->s_flags & MS_RDONLY) == 0) {
@@ -2715,12 +2716,12 @@ static void print_daily_error_info(unsigned long arg)
es->s_first_error_func,
le32_to_cpu(es->s_first_error_line));
if (es->s_first_error_ino)
- printk(": inode %u",
+ printk(KERN_CONT ": inode %u",
le32_to_cpu(es->s_first_error_ino));
if (es->s_first_error_block)
- printk(": block %llu", (unsigned long long)
+ printk(KERN_CONT ": block %llu", (unsigned long long)
le64_to_cpu(es->s_first_error_block));
- printk("\n");
+ printk(KERN_CONT "\n");
}
if (es->s_last_error_time) {
printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
@@ -2729,12 +2730,12 @@ static void print_daily_error_info(unsigned long arg)
es->s_last_error_func,
le32_to_cpu(es->s_last_error_line));
if (es->s_last_error_ino)
- printk(": inode %u",
+ printk(KERN_CONT ": inode %u",
le32_to_cpu(es->s_last_error_ino));
if (es->s_last_error_block)
- printk(": block %llu", (unsigned long long)
+ printk(KERN_CONT ": block %llu", (unsigned long long)
le64_to_cpu(es->s_last_error_block));
- printk("\n");
+ printk(KERN_CONT "\n");
}
mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
}
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 73bcfd41f5f2..42145be5c6b4 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -223,14 +223,18 @@ static struct attribute *ext4_attrs[] = {
EXT4_ATTR_FEATURE(lazy_itable_init);
EXT4_ATTR_FEATURE(batched_discard);
EXT4_ATTR_FEATURE(meta_bg_resize);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
EXT4_ATTR_FEATURE(encryption);
+#endif
EXT4_ATTR_FEATURE(metadata_csum_seed);
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
ATTR_LIST(batched_discard),
ATTR_LIST(meta_bg_resize),
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
ATTR_LIST(encryption),
+#endif
ATTR_LIST(metadata_csum_seed),
NULL,
};
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index c15d63389957..d77be9e9f535 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -61,18 +61,12 @@
#include "acl.h"
#ifdef EXT4_XATTR_DEBUG
-# define ea_idebug(inode, f...) do { \
- printk(KERN_DEBUG "inode %s:%lu: ", \
- inode->i_sb->s_id, inode->i_ino); \
- printk(f); \
- printk("\n"); \
- } while (0)
-# define ea_bdebug(bh, f...) do { \
- printk(KERN_DEBUG "block %pg:%lu: ", \
- bh->b_bdev, (unsigned long) bh->b_blocknr); \
- printk(f); \
- printk("\n"); \
- } while (0)
+# define ea_idebug(inode, fmt, ...) \
+ printk(KERN_DEBUG "inode %s:%lu: " fmt "\n", \
+ inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__)
+# define ea_bdebug(bh, fmt, ...) \
+ printk(KERN_DEBUG "block %pg:%lu: " fmt "\n", \
+ bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
#else
# define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
# define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
@@ -241,7 +235,7 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
int error = -EFSCORRUPTED;
if (((void *) header >= end) ||
- (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC)))
+ (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
goto errout;
error = ext4_xattr_check_names(entry, end, entry);
errout:
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 93985c64d8a8..6f14ee923acd 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -852,16 +852,16 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
for (segno = start_segno; segno < end_segno; segno++) {
- if (get_valid_blocks(sbi, segno, 1) == 0 ||
- unlikely(f2fs_cp_error(sbi)))
- goto next;
-
/* find segment summary of victim */
sum_page = find_get_page(META_MAPPING(sbi),
GET_SUM_BLOCK(sbi, segno));
- f2fs_bug_on(sbi, !PageUptodate(sum_page));
f2fs_put_page(sum_page, 0);
+ if (get_valid_blocks(sbi, segno, 1) == 0 ||
+ !PageUptodate(sum_page) ||
+ unlikely(f2fs_cp_error(sbi)))
+ goto next;
+
sum = page_address(sum_page);
f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
diff --git a/fs/iomap.c b/fs/iomap.c
index 013d1d36fbbf..a8ee8c33ca78 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -433,8 +433,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
struct page *page = data;
int ret;
- ret = __block_write_begin_int(page, pos & ~PAGE_MASK, length,
- NULL, iomap);
+ ret = __block_write_begin_int(page, pos, length, NULL, iomap);
if (ret)
return ret;
@@ -561,7 +560,7 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
}
while (len > 0) {
- ret = iomap_apply(inode, start, len, 0, ops, &ctx,
+ ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
iomap_fiemap_actor);
/* inode with no (attribute) mapping will give ENOENT */
if (ret == -ENOENT)
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index ad0c745ebad7..871c8b392099 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -687,6 +687,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
pri_bh = NULL;
root_found:
+ /* We don't support read-write mounts */
+ if (!(s->s_flags & MS_RDONLY)) {
+ error = -EACCES;
+ goto out_freebh;
+ }
if (joliet_level && (pri == NULL || !opt.rock)) {
/* This is the case of Joliet with the norock mount flag.
@@ -1501,9 +1506,6 @@ struct inode *__isofs_iget(struct super_block *sb,
static struct dentry *isofs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- /* We don't support read-write mounts */
- if (!(flags & MS_RDONLY))
- return ERR_PTR(-EACCES);
return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 3d8246a9faa4..e1652665bd93 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1149,6 +1149,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "file as BJ_Reserved");
spin_lock(&journal->j_list_lock);
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
+ spin_unlock(&journal->j_list_lock);
} else if (jh->b_transaction == journal->j_committing_transaction) {
/* first access by this transaction */
jh->b_modified = 0;
@@ -1156,8 +1157,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "set next transaction");
spin_lock(&journal->j_list_lock);
jh->b_next_transaction = transaction;
+ spin_unlock(&journal->j_list_lock);
}
- spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
/*
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index dcd96aac02f5..cf4c636ff4da 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -110,8 +110,9 @@ static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
* kn_to: /n1/n2/n3 [depth=3]
* result: /../..
*
- * return value: length of the string. If greater than buflen,
- * then contents of buf are undefined. On error, -1 is returned.
+ * Returns the length of the full path. If the full length is equal to or
+ * greater than @buflen, @buf contains the truncated path with the trailing
+ * '\0'. On error, -errno is returned.
*/
static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
struct kernfs_node *kn_from,
@@ -119,9 +120,8 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
{
struct kernfs_node *kn, *common;
const char parent_str[] = "/..";
- size_t depth_from, depth_to, len = 0, nlen = 0;
- char *p;
- int i;
+ size_t depth_from, depth_to, len = 0;
+ int i, j;
if (!kn_from)
kn_from = kernfs_root(kn_to)->kn;
@@ -131,7 +131,7 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
common = kernfs_common_ancestor(kn_from, kn_to);
if (WARN_ON(!common))
- return -1;
+ return -EINVAL;
depth_to = kernfs_depth(common, kn_to);
depth_from = kernfs_depth(common, kn_from);
@@ -144,22 +144,16 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
len < buflen ? buflen - len : 0);
/* Calculate how many bytes we need for the rest */
- for (kn = kn_to; kn != common; kn = kn->parent)
- nlen += strlen(kn->name) + 1;
-
- if (len + nlen >= buflen)
- return len + nlen;
-
- p = buf + len + nlen;
- *p = '\0';
- for (kn = kn_to; kn != common; kn = kn->parent) {
- size_t tmp = strlen(kn->name);
- p -= tmp;
- memcpy(p, kn->name, tmp);
- *(--p) = '/';
+ for (i = depth_to - 1; i >= 0; i--) {
+ for (kn = kn_to, j = 0; j < i; j++)
+ kn = kn->parent;
+ len += strlcpy(buf + len, "/",
+ len < buflen ? buflen - len : 0);
+ len += strlcpy(buf + len, kn->name,
+ len < buflen ? buflen - len : 0);
}
- return len + nlen;
+ return len;
}
/**
@@ -186,29 +180,6 @@ int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
}
/**
- * kernfs_path_len - determine the length of the full path of a given node
- * @kn: kernfs_node of interest
- *
- * The returned length doesn't include the space for the terminating '\0'.
- */
-size_t kernfs_path_len(struct kernfs_node *kn)
-{
- size_t len = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&kernfs_rename_lock, flags);
-
- do {
- len += strlen(kn->name) + 1;
- kn = kn->parent;
- } while (kn && kn->parent);
-
- spin_unlock_irqrestore(&kernfs_rename_lock, flags);
-
- return len;
-}
-
-/**
* kernfs_path_from_node - build path of node @to relative to @from.
* @from: parent kernfs_node relative to which we need to build the path
* @to: kernfs_node of interest
@@ -220,8 +191,9 @@ size_t kernfs_path_len(struct kernfs_node *kn)
* path (which includes '..'s) as needed to reach from @from to @to is
* returned.
*
- * If @buf isn't long enough, the return value will be greater than @buflen
- * and @buf contents are undefined.
+ * Returns the length of the full path. If the full length is equal to or
+ * greater than @buflen, @buf contains the truncated path with the trailing
+ * '\0'. On error, -errno is returned.
*/
int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
char *buf, size_t buflen)
@@ -237,28 +209,6 @@ int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
EXPORT_SYMBOL_GPL(kernfs_path_from_node);
/**
- * kernfs_path - build full path of a given node
- * @kn: kernfs_node of interest
- * @buf: buffer to copy @kn's name into
- * @buflen: size of @buf
- *
- * Builds and returns the full path of @kn in @buf of @buflen bytes. The
- * path is built from the end of @buf so the returned pointer usually
- * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated
- * and %NULL is returned.
- */
-char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
-{
- int ret;
-
- ret = kernfs_path_from_node(kn, NULL, buf, buflen);
- if (ret < 0 || ret >= buflen)
- return NULL;
- return buf;
-}
-EXPORT_SYMBOL_GPL(kernfs_path);
-
-/**
* pr_cont_kernfs_name - pr_cont name of a kernfs_node
* @kn: kernfs_node of interest
*
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 2bcb86e6e6ca..78219d5644e9 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -911,6 +911,7 @@ const struct file_operations kernfs_file_fops = {
.open = kernfs_fop_open,
.release = kernfs_fop_release,
.poll = kernfs_fop_poll,
+ .fsync = noop_fsync,
};
/**
diff --git a/fs/locks.c b/fs/locks.c
index ce93b416b490..22c5b4aa4961 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1609,6 +1609,7 @@ int fcntl_getlease(struct file *filp)
ctx = smp_load_acquire(&inode->i_flctx);
if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+ percpu_down_read_preempt_disable(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
@@ -1618,6 +1619,8 @@ int fcntl_getlease(struct file *filp)
break;
}
spin_unlock(&ctx->flc_lock);
+ percpu_up_read_preempt_enable(&file_rwsem);
+
locks_dispose_list(&dispose);
}
return type;
@@ -2529,11 +2532,14 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
if (list_empty(&ctx->flc_lease))
return;
+ percpu_down_read_preempt_disable(&file_rwsem);
spin_lock(&ctx->flc_lock);
list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
if (filp == fl->fl_file)
lease_modify(fl, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
+ percpu_up_read_preempt_enable(&file_rwsem);
+
locks_dispose_list(&dispose);
}
diff --git a/fs/namei.c b/fs/namei.c
index a7f601cd521a..5b4eed221530 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -4668,6 +4668,31 @@ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
}
EXPORT_SYMBOL(generic_readlink);
+/**
+ * vfs_get_link - get symlink body
+ * @dentry: dentry on which to get symbolic link
+ * @done: caller needs to free returned data with this
+ *
+ * Calls security hook and i_op->get_link() on the supplied inode.
+ *
+ * It does not touch atime. That's up to the caller if necessary.
+ *
+ * Does not work on "special" symlinks like /proc/$$/fd/N
+ */
+const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done)
+{
+ const char *res = ERR_PTR(-EINVAL);
+ struct inode *inode = d_inode(dentry);
+
+ if (d_is_symlink(dentry)) {
+ res = ERR_PTR(security_inode_readlink(dentry));
+ if (!res)
+ res = inode->i_op->get_link(dentry, inode, done);
+ }
+ return res;
+}
+EXPORT_SYMBOL(vfs_get_link);
+
/* get the link contents into pagecache */
const char *page_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *callback)
diff --git a/fs/namespace.c b/fs/namespace.c
index 58aca9c931ac..e6c234b1a645 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2824,6 +2824,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
return new_ns;
}
+__latent_entropy
struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
struct user_namespace *user_ns, struct fs_struct *new_fs)
{
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 217847679f0e..2905479f214a 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -344,9 +344,10 @@ static void bl_write_cleanup(struct work_struct *work)
u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
u64 end = (hdr->args.offset + hdr->args.count +
PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
+ u64 lwb = hdr->args.offset + hdr->args.count;
ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
- (end - start) >> SECTOR_SHIFT, end);
+ (end - start) >> SECTOR_SHIFT, lwb);
}
pnfs_ld_write_done(hdr);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ad917bd72b38..7897826d7c51 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1545,7 +1545,7 @@ static int update_open_stateid(struct nfs4_state *state,
struct nfs_client *clp = server->nfs_client;
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *deleg_cur;
- nfs4_stateid freeme = {0};
+ nfs4_stateid freeme = { };
int ret = 0;
fmode &= (FMODE_READ|FMODE_WRITE);
diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c
index 1e8fe844e69f..5355efba4bc8 100644
--- a/fs/orangefs/dcache.c
+++ b/fs/orangefs/dcache.c
@@ -73,7 +73,7 @@ static int orangefs_revalidate_lookup(struct dentry *dentry)
}
}
- dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+ orangefs_set_timeout(dentry);
ret = 1;
out_release_op:
op_release(new_op);
@@ -94,8 +94,9 @@ out_drop:
static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)
{
int ret;
+ unsigned long time = (unsigned long) dentry->d_fsdata;
- if (time_before(jiffies, dentry->d_time))
+ if (time_before(jiffies, time))
return 1;
if (flags & LOOKUP_RCU)
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 66ea0cc37b18..02cc6139ec90 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -621,9 +621,9 @@ static int orangefs_file_release(struct inode *inode, struct file *file)
* readahead cache (if any); this forces an expensive refresh of
* data for the next caller of mmap (or 'get_block' accesses)
*/
- if (file->f_path.dentry->d_inode &&
- file->f_path.dentry->d_inode->i_mapping &&
- mapping_nrpages(&file->f_path.dentry->d_inode->i_data)) {
+ if (file_inode(file) &&
+ file_inode(file)->i_mapping &&
+ mapping_nrpages(&file_inode(file)->i_data)) {
if (orangefs_features & ORANGEFS_FEATURE_READAHEAD) {
gossip_debug(GOSSIP_INODE_DEBUG,
"calling flush_racache on %pU\n",
@@ -632,7 +632,7 @@ static int orangefs_file_release(struct inode *inode, struct file *file)
gossip_debug(GOSSIP_INODE_DEBUG,
"flush_racache finished\n");
}
- truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping,
+ truncate_inode_pages(file_inode(file)->i_mapping,
0);
}
return 0;
@@ -648,7 +648,7 @@ static int orangefs_fsync(struct file *file,
{
int ret = -EINVAL;
struct orangefs_inode_s *orangefs_inode =
- ORANGEFS_I(file->f_path.dentry->d_inode);
+ ORANGEFS_I(file_inode(file));
struct orangefs_kernel_op_s *new_op = NULL;
/* required call */
@@ -661,7 +661,7 @@ static int orangefs_fsync(struct file *file,
ret = service_operation(new_op,
"orangefs_fsync",
- get_interruptible_flag(file->f_path.dentry->d_inode));
+ get_interruptible_flag(file_inode(file)));
gossip_debug(GOSSIP_FILE_DEBUG,
"orangefs_fsync got return value of %d\n",
@@ -669,7 +669,7 @@ static int orangefs_fsync(struct file *file,
op_release(new_op);
- orangefs_flush_inode(file->f_path.dentry->d_inode);
+ orangefs_flush_inode(file_inode(file));
return ret;
}
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index d15d3d2dba62..a290ff6ec756 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -72,7 +72,7 @@ static int orangefs_create(struct inode *dir,
d_instantiate(dentry, inode);
unlock_new_inode(inode);
- dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+ orangefs_set_timeout(dentry);
ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
@@ -183,7 +183,7 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+ orangefs_set_timeout(dentry);
inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn);
if (IS_ERR(inode)) {
@@ -322,7 +322,7 @@ static int orangefs_symlink(struct inode *dir,
d_instantiate(dentry, inode);
unlock_new_inode(inode);
- dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+ orangefs_set_timeout(dentry);
ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
@@ -386,7 +386,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
d_instantiate(dentry, inode);
unlock_new_inode(inode);
- dentry->d_time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+ orangefs_set_timeout(dentry);
ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 0a82048f3aaf..3bf803d732c5 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -580,4 +580,11 @@ static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
#endif
}
+static inline void orangefs_set_timeout(struct dentry *dentry)
+{
+ unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
+
+ dentry->d_fsdata = (void *) time;
+}
+
#endif /* __ORANGEFSKERNEL_H */
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 3f803b3a1f82..aeb60f791418 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -57,6 +57,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
ssize_t list_size, size, value_size = 0;
char *buf, *name, *value = NULL;
int uninitialized_var(error);
+ size_t slen;
if (!(old->d_inode->i_opflags & IOP_XATTR) ||
!(new->d_inode->i_opflags & IOP_XATTR))
@@ -79,7 +80,16 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
goto out;
}
- for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
+ for (name = buf; list_size; name += slen) {
+ slen = strnlen(name, list_size) + 1;
+
+ /* underlying fs providing us with an broken xattr list? */
+ if (WARN_ON(slen > list_size)) {
+ error = -EIO;
+ break;
+ }
+ list_size -= slen;
+
if (ovl_is_private_xattr(name))
continue;
retry:
@@ -174,40 +184,6 @@ out_fput:
return error;
}
-static char *ovl_read_symlink(struct dentry *realdentry)
-{
- int res;
- char *buf;
- struct inode *inode = realdentry->d_inode;
- mm_segment_t old_fs;
-
- res = -EINVAL;
- if (!inode->i_op->readlink)
- goto err;
-
- res = -ENOMEM;
- buf = (char *) __get_free_page(GFP_KERNEL);
- if (!buf)
- goto err;
-
- old_fs = get_fs();
- set_fs(get_ds());
- /* The cast to a user pointer is valid due to the set_fs() */
- res = inode->i_op->readlink(realdentry,
- (char __user *)buf, PAGE_SIZE - 1);
- set_fs(old_fs);
- if (res < 0) {
- free_page((unsigned long) buf);
- goto err;
- }
- buf[res] = '\0';
-
- return buf;
-
-err:
- return ERR_PTR(res);
-}
-
static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
{
struct iattr attr = {
@@ -354,19 +330,20 @@ out_cleanup:
int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
struct path *lowerpath, struct kstat *stat)
{
+ DEFINE_DELAYED_CALL(done);
struct dentry *workdir = ovl_workdir(dentry);
int err;
struct kstat pstat;
struct path parentpath;
+ struct dentry *lowerdentry = lowerpath->dentry;
struct dentry *upperdir;
struct dentry *upperdentry;
- const struct cred *old_cred;
- char *link = NULL;
+ const char *link = NULL;
if (WARN_ON(!workdir))
return -EROFS;
- ovl_do_check_copy_up(lowerpath->dentry);
+ ovl_do_check_copy_up(lowerdentry);
ovl_path_upper(parent, &parentpath);
upperdir = parentpath.dentry;
@@ -376,13 +353,11 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
return err;
if (S_ISLNK(stat->mode)) {
- link = ovl_read_symlink(lowerpath->dentry);
+ link = vfs_get_link(lowerdentry, &done);
if (IS_ERR(link))
return PTR_ERR(link);
}
- old_cred = ovl_override_creds(dentry->d_sb);
-
err = -EIO;
if (lock_rename(workdir, upperdir) != NULL) {
pr_err("overlayfs: failed to lock workdir+upperdir\n");
@@ -403,19 +378,16 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
}
out_unlock:
unlock_rename(workdir, upperdir);
- revert_creds(old_cred);
-
- if (link)
- free_page((unsigned long) link);
+ do_delayed_call(&done);
return err;
}
int ovl_copy_up(struct dentry *dentry)
{
- int err;
+ int err = 0;
+ const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
- err = 0;
while (!err) {
struct dentry *next;
struct dentry *parent;
@@ -447,6 +419,7 @@ int ovl_copy_up(struct dentry *dentry)
dput(parent);
dput(next);
}
+ revert_creds(old_cred);
return err;
}
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 5f90ddf778ba..306b6c161840 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -14,6 +14,7 @@
#include <linux/cred.h>
#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
+#include <linux/atomic.h>
#include "overlayfs.h"
void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
@@ -37,8 +38,10 @@ struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
{
struct dentry *temp;
char name[20];
+ static atomic_t temp_id = ATOMIC_INIT(0);
- snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry);
+ /* counter is allowed to wrap, since temp dentries are ephemeral */
+ snprintf(name, sizeof(name), "#%x", atomic_inc_return(&temp_id));
temp = lookup_one_len(name, workdir, strlen(name));
if (!IS_ERR(temp) && temp->d_inode) {
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index c18d6a4ff456..c58f01babf30 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -19,6 +19,7 @@ static int ovl_copy_up_truncate(struct dentry *dentry)
struct dentry *parent;
struct kstat stat;
struct path lowerpath;
+ const struct cred *old_cred;
parent = dget_parent(dentry);
err = ovl_copy_up(parent);
@@ -26,12 +27,14 @@ static int ovl_copy_up_truncate(struct dentry *dentry)
goto out_dput_parent;
ovl_path_lower(dentry, &lowerpath);
- err = vfs_getattr(&lowerpath, &stat);
- if (err)
- goto out_dput_parent;
- stat.size = 0;
- err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
+ old_cred = ovl_override_creds(dentry->d_sb);
+ err = vfs_getattr(&lowerpath, &stat);
+ if (!err) {
+ stat.size = 0;
+ err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat);
+ }
+ revert_creds(old_cred);
out_dput_parent:
dput(parent);
@@ -153,45 +156,18 @@ static const char *ovl_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- struct dentry *realdentry;
- struct inode *realinode;
const struct cred *old_cred;
const char *p;
if (!dentry)
return ERR_PTR(-ECHILD);
- realdentry = ovl_dentry_real(dentry);
- realinode = realdentry->d_inode;
-
- if (WARN_ON(!realinode->i_op->get_link))
- return ERR_PTR(-EPERM);
-
old_cred = ovl_override_creds(dentry->d_sb);
- p = realinode->i_op->get_link(realdentry, realinode, done);
+ p = vfs_get_link(ovl_dentry_real(dentry), done);
revert_creds(old_cred);
return p;
}
-static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
-{
- struct path realpath;
- struct inode *realinode;
- const struct cred *old_cred;
- int err;
-
- ovl_path_real(dentry, &realpath);
- realinode = realpath.dentry->d_inode;
-
- if (!realinode->i_op->readlink)
- return -EINVAL;
-
- old_cred = ovl_override_creds(dentry->d_sb);
- err = realinode->i_op->readlink(realpath.dentry, buf, bufsiz);
- revert_creds(old_cred);
- return err;
-}
-
bool ovl_is_private_xattr(const char *name)
{
return strncmp(name, OVL_XATTR_PREFIX,
@@ -375,7 +351,7 @@ static const struct inode_operations ovl_file_inode_operations = {
static const struct inode_operations ovl_symlink_inode_operations = {
.setattr = ovl_setattr,
.get_link = ovl_get_link,
- .readlink = ovl_readlink,
+ .readlink = generic_readlink,
.getattr = ovl_getattr,
.listxattr = ovl_listxattr,
.update_time = ovl_update_time,
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 7e3f0127fc1a..bcf3965be819 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -273,12 +273,11 @@ static bool ovl_is_opaquedir(struct dentry *dentry)
{
int res;
char val;
- struct inode *inode = dentry->d_inode;
- if (!S_ISDIR(inode->i_mode) || !(inode->i_opflags & IOP_XATTR))
+ if (!d_is_dir(dentry))
return false;
- res = __vfs_getxattr(dentry, inode, OVL_XATTR_OPAQUE, &val, 1);
+ res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
if (res == 1 && val == 'y')
return true;
@@ -419,16 +418,12 @@ static bool ovl_dentry_weird(struct dentry *dentry)
DCACHE_OP_COMPARE);
}
-static inline struct dentry *ovl_lookup_real(struct super_block *ovl_sb,
- struct dentry *dir,
+static inline struct dentry *ovl_lookup_real(struct dentry *dir,
const struct qstr *name)
{
- const struct cred *old_cred;
struct dentry *dentry;
- old_cred = ovl_override_creds(ovl_sb);
dentry = lookup_one_len_unlocked(name->name, dir, name->len);
- revert_creds(old_cred);
if (IS_ERR(dentry)) {
if (PTR_ERR(dentry) == -ENOENT)
@@ -469,6 +464,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct ovl_entry *oe;
+ const struct cred *old_cred;
struct ovl_entry *poe = dentry->d_parent->d_fsdata;
struct path *stack = NULL;
struct dentry *upperdir, *upperdentry = NULL;
@@ -479,9 +475,10 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
unsigned int i;
int err;
+ old_cred = ovl_override_creds(dentry->d_sb);
upperdir = ovl_upperdentry_dereference(poe);
if (upperdir) {
- this = ovl_lookup_real(dentry->d_sb, upperdir, &dentry->d_name);
+ this = ovl_lookup_real(upperdir, &dentry->d_name);
err = PTR_ERR(this);
if (IS_ERR(this))
goto out;
@@ -514,8 +511,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
bool opaque = false;
struct path lowerpath = poe->lowerstack[i];
- this = ovl_lookup_real(dentry->d_sb,
- lowerpath.dentry, &dentry->d_name);
+ this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name);
err = PTR_ERR(this);
if (IS_ERR(this)) {
/*
@@ -588,6 +584,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
ovl_copyattr(realdentry->d_inode, inode);
}
+ revert_creds(old_cred);
oe->opaque = upperopaque;
oe->__upperdentry = upperdentry;
memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
@@ -606,6 +603,7 @@ out_put:
out_put_upper:
dput(upperdentry);
out:
+ revert_creds(old_cred);
return ERR_PTR(err);
}
@@ -834,6 +832,19 @@ retry:
if (err)
goto out_dput;
+ /*
+ * Try to remove POSIX ACL xattrs from workdir. We are good if:
+ *
+ * a) success (there was a POSIX ACL xattr and was removed)
+ * b) -ENODATA (there was no POSIX ACL xattr)
+ * c) -EOPNOTSUPP (POSIX ACL xattrs are not supported)
+ *
+ * There are various other error values that could effectively
+ * mean that the xattr doesn't exist (e.g. -ERANGE is returned
+ * if the xattr name is too long), but the set of filesystems
+ * allowed as upper are limited to "normal" ones, where checking
+ * for the above two errors is sufficient.
+ */
err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
if (err && err != -ENODATA && err != -EOPNOTSUPP)
goto out_dput;
@@ -1292,6 +1303,12 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
if (!oe)
goto out_put_cred;
+ sb->s_magic = OVERLAYFS_SUPER_MAGIC;
+ sb->s_op = &ovl_super_operations;
+ sb->s_xattr = ovl_xattr_handlers;
+ sb->s_fs_info = ufs;
+ sb->s_flags |= MS_POSIXACL | MS_NOREMOTELOCK;
+
root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR));
if (!root_dentry)
goto out_free_oe;
@@ -1315,12 +1332,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
ovl_inode_init(d_inode(root_dentry), realinode, !!upperpath.dentry);
ovl_copyattr(realinode, d_inode(root_dentry));
- sb->s_magic = OVERLAYFS_SUPER_MAGIC;
- sb->s_op = &ovl_super_operations;
- sb->s_xattr = ovl_xattr_handlers;
sb->s_root = root_dentry;
- sb->s_fs_info = ufs;
- sb->s_flags |= MS_POSIXACL | MS_NOREMOTELOCK;
return 0;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 89600fd5963d..81818adb8e9e 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -412,10 +412,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
mm = get_task_mm(task);
if (mm) {
vsize = task_vsize(mm);
- if (permitted) {
- eip = KSTK_EIP(task);
- esp = KSTK_ESP(task);
- }
+ /*
+ * esp and eip are intentionally zeroed out. There is no
+ * non-racy way to read them without freezing the task.
+ * Programs that need reliable values can use ptrace(2).
+ */
}
get_task_comm(tcomm, task);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c2964d890c9a..ca651ac00660 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -832,6 +832,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
unsigned long addr = *ppos;
ssize_t copied;
char *page;
+ unsigned int flags;
if (!mm)
return 0;
@@ -844,6 +845,11 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
if (!atomic_inc_not_zero(&mm->mm_users))
goto free;
+ /* Maybe we should limit FOLL_FORCE to actual ptrace users? */
+ flags = FOLL_FORCE;
+ if (write)
+ flags |= FOLL_WRITE;
+
while (count > 0) {
int this_len = min_t(int, count, PAGE_SIZE);
@@ -852,7 +858,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
break;
}
- this_len = access_remote_vm(mm, addr, page, this_len, write);
+ this_len = access_remote_vm(mm, addr, page, this_len, flags);
if (!this_len) {
if (!copied)
copied = -EIO;
@@ -964,8 +970,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
max_len = min_t(size_t, PAGE_SIZE, count);
this_len = min(max_len, this_len);
- retval = access_remote_vm(mm, (env_start + src),
- page, this_len, 0);
+ retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
if (retval <= 0) {
ret = retval;
@@ -1007,6 +1012,9 @@ static ssize_t auxv_read(struct file *file, char __user *buf,
{
struct mm_struct *mm = file->private_data;
unsigned int nwords = 0;
+
+ if (!mm)
+ return 0;
do {
nwords += 2;
} while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 6909582ce5e5..35b92d81692f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -266,24 +266,15 @@ static int do_maps_open(struct inode *inode, struct file *file,
* /proc/PID/maps that is the stack of the main task.
*/
static int is_stack(struct proc_maps_private *priv,
- struct vm_area_struct *vma, int is_pid)
+ struct vm_area_struct *vma)
{
- int stack = 0;
-
- if (is_pid) {
- stack = vma->vm_start <= vma->vm_mm->start_stack &&
- vma->vm_end >= vma->vm_mm->start_stack;
- } else {
- struct inode *inode = priv->inode;
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid_task(proc_pid(inode), PIDTYPE_PID);
- if (task)
- stack = vma_is_stack_for_task(vma, task);
- rcu_read_unlock();
- }
- return stack;
+ /*
+ * We make no effort to guess what a given thread considers to be
+ * its "stack". It's not even well-defined for programs written
+ * languages like Go.
+ */
+ return vma->vm_start <= vma->vm_mm->start_stack &&
+ vma->vm_end >= vma->vm_mm->start_stack;
}
static void
@@ -354,7 +345,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
goto done;
}
- if (is_stack(priv, vma, is_pid))
+ if (is_stack(priv, vma))
name = "[stack]";
}
@@ -1669,7 +1660,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
seq_file_path(m, file, "\n\t= ");
} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
seq_puts(m, " heap");
- } else if (is_stack(proc_priv, vma, is_pid)) {
+ } else if (is_stack(proc_priv, vma)) {
seq_puts(m, " stack");
}
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index faacb0c0d857..37175621e890 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -124,25 +124,17 @@ unsigned long task_statm(struct mm_struct *mm,
}
static int is_stack(struct proc_maps_private *priv,
- struct vm_area_struct *vma, int is_pid)
+ struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
- int stack = 0;
-
- if (is_pid) {
- stack = vma->vm_start <= mm->start_stack &&
- vma->vm_end >= mm->start_stack;
- } else {
- struct inode *inode = priv->inode;
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid_task(proc_pid(inode), PIDTYPE_PID);
- if (task)
- stack = vma_is_stack_for_task(vma, task);
- rcu_read_unlock();
- }
- return stack;
+
+ /*
+ * We make no effort to guess what a given thread considers to be
+ * its "stack". It's not even well-defined for programs written
+ * languages like Go.
+ */
+ return vma->vm_start <= mm->start_stack &&
+ vma->vm_end >= mm->start_stack;
}
/*
@@ -184,7 +176,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
if (file) {
seq_pad(m, ' ');
seq_file_path(m, file, "");
- } else if (mm && is_stack(priv, vma, is_pid)) {
+ } else if (mm && is_stack(priv, vma)) {
seq_pad(m, ' ');
seq_printf(m, "[stack]");
}
diff --git a/fs/read_write.c b/fs/read_write.c
index 66215a7b17cf..190e0d362581 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -730,6 +730,35 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter,
/* A write operation does a read from user space and vice versa */
#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
+/**
+ * rw_copy_check_uvector() - Copy an array of &struct iovec from userspace
+ * into the kernel and check that it is valid.
+ *
+ * @type: One of %CHECK_IOVEC_ONLY, %READ, or %WRITE.
+ * @uvector: Pointer to the userspace array.
+ * @nr_segs: Number of elements in userspace array.
+ * @fast_segs: Number of elements in @fast_pointer.
+ * @fast_pointer: Pointer to (usually small on-stack) kernel array.
+ * @ret_pointer: (output parameter) Pointer to a variable that will point to
+ * either @fast_pointer, a newly allocated kernel array, or NULL,
+ * depending on which array was used.
+ *
+ * This function copies an array of &struct iovec of @nr_segs from
+ * userspace into the kernel and checks that each element is valid (e.g.
+ * it does not point to a kernel address or cause overflow by being too
+ * large, etc.).
+ *
+ * As an optimization, the caller may provide a pointer to a small
+ * on-stack array in @fast_pointer, typically %UIO_FASTIOV elements long
+ * (the size of this array, or 0 if unused, should be given in @fast_segs).
+ *
+ * @ret_pointer will always point to the array that was used, so the
+ * caller must take care not to call kfree() on it e.g. in case the
+ * @fast_pointer array was used and it was allocated on the stack.
+ *
+ * Return: The total number of bytes covered by the iovec array on success
+ * or a negative error code on error.
+ */
ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_pointer,
diff --git a/fs/super.c b/fs/super.c
index c2ff475c1711..c183835566c1 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1269,25 +1269,34 @@ EXPORT_SYMBOL(__sb_start_write);
static void sb_wait_write(struct super_block *sb, int level)
{
percpu_down_write(sb->s_writers.rw_sem + level-1);
- /*
- * We are going to return to userspace and forget about this lock, the
- * ownership goes to the caller of thaw_super() which does unlock.
- *
- * FIXME: we should do this before return from freeze_super() after we
- * called sync_filesystem(sb) and s_op->freeze_fs(sb), and thaw_super()
- * should re-acquire these locks before s_op->unfreeze_fs(sb). However
- * this leads to lockdep false-positives, so currently we do the early
- * release right after acquire.
- */
- percpu_rwsem_release(sb->s_writers.rw_sem + level-1, 0, _THIS_IP_);
}
-static void sb_freeze_unlock(struct super_block *sb)
+/*
+ * We are going to return to userspace and forget about these locks, the
+ * ownership goes to the caller of thaw_super() which does unlock().
+ */
+static void lockdep_sb_freeze_release(struct super_block *sb)
+{
+ int level;
+
+ for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
+ percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
+}
+
+/*
+ * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
+ */
+static void lockdep_sb_freeze_acquire(struct super_block *sb)
{
int level;
for (level = 0; level < SB_FREEZE_LEVELS; ++level)
percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
+}
+
+static void sb_freeze_unlock(struct super_block *sb)
+{
+ int level;
for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
percpu_up_write(sb->s_writers.rw_sem + level);
@@ -1379,10 +1388,11 @@ int freeze_super(struct super_block *sb)
}
}
/*
- * This is just for debugging purposes so that fs can warn if it
- * sees write activity when frozen is set to SB_FREEZE_COMPLETE.
+ * For debugging purposes so that fs can warn if it sees write activity
+ * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
*/
sb->s_writers.frozen = SB_FREEZE_COMPLETE;
+ lockdep_sb_freeze_release(sb);
up_write(&sb->s_umount);
return 0;
}
@@ -1399,7 +1409,7 @@ int thaw_super(struct super_block *sb)
int error;
down_write(&sb->s_umount);
- if (sb->s_writers.frozen == SB_UNFROZEN) {
+ if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
up_write(&sb->s_umount);
return -EINVAL;
}
@@ -1409,11 +1419,14 @@ int thaw_super(struct super_block *sb)
goto out;
}
+ lockdep_sb_freeze_acquire(sb);
+
if (sb->s_op->unfreeze_fs) {
error = sb->s_op->unfreeze_fs(sb);
if (error) {
printk(KERN_ERR
"VFS:Filesystem thaw failed\n");
+ lockdep_sb_freeze_release(sb);
up_write(&sb->s_umount);
return error;
}
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 94374e435025..2b67bda2021b 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -21,14 +21,14 @@ DEFINE_SPINLOCK(sysfs_symlink_target_lock);
void sysfs_warn_dup(struct kernfs_node *parent, const char *name)
{
- char *buf, *path = NULL;
+ char *buf;
buf = kzalloc(PATH_MAX, GFP_KERNEL);
if (buf)
- path = kernfs_path(parent, buf, PATH_MAX);
+ kernfs_path(parent, buf, PATH_MAX);
WARN(1, KERN_WARNING "sysfs: cannot create duplicate filename '%s/%s'\n",
- path, name);
+ buf, name);
kfree(buf);
}
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index c8f60df2733e..ca16c5d7bab1 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -439,7 +439,7 @@ static unsigned int vfs_dent_type(uint8_t type)
*/
static int ubifs_readdir(struct file *file, struct dir_context *ctx)
{
- int err;
+ int err = 0;
struct qstr nm;
union ubifs_key key;
struct ubifs_dent_node *dent;
@@ -541,14 +541,20 @@ out:
kfree(file->private_data);
file->private_data = NULL;
- if (err != -ENOENT) {
+ if (err != -ENOENT)
ubifs_err(c, "cannot find next direntry, error %d", err);
- return err;
- }
+ else
+ /*
+ * -ENOENT is a non-fatal error in this context, the TNC uses
+ * it to indicate that the cursor moved past the current directory
+ * and readdir() has to stop.
+ */
+ err = 0;
+
/* 2 is a special value indicating that there are no more direntries */
ctx->pos = 2;
- return 0;
+ return err;
}
/* Free saved readdir() state when the directory is closed */
@@ -1060,9 +1066,9 @@ static void unlock_4_inodes(struct inode *inode1, struct inode *inode2,
mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
}
-static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
{
struct ubifs_info *c = old_dir->i_sb->s_fs_info;
struct inode *old_inode = d_inode(old_dentry);
@@ -1323,7 +1329,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
return err;
}
-static int ubifs_rename2(struct inode *old_dir, struct dentry *old_dentry,
+static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
@@ -1336,7 +1342,7 @@ static int ubifs_rename2(struct inode *old_dir, struct dentry *old_dentry,
if (flags & RENAME_EXCHANGE)
return ubifs_xrename(old_dir, old_dentry, new_dir, new_dentry);
- return ubifs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
+ return do_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
}
int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
@@ -1387,7 +1393,7 @@ const struct inode_operations ubifs_dir_inode_operations = {
.mkdir = ubifs_mkdir,
.rmdir = ubifs_rmdir,
.mknod = ubifs_mknod,
- .rename = ubifs_rename2,
+ .rename = ubifs_rename,
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
.listxattr = ubifs_listxattr,
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 6c2f4d41ed73..d9f9615bfd71 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -172,6 +172,7 @@ out_cancel:
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
+ host_ui->xattr_names -= nm->len;
mutex_unlock(&host_ui->ui_mutex);
out_free:
make_bad_inode(inode);
@@ -478,6 +479,7 @@ out_cancel:
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
+ host_ui->xattr_names += nm->len;
mutex_unlock(&host_ui->ui_mutex);
ubifs_release_budget(c, &req);
make_bad_inode(inode);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index c27344cf38e1..c6eb21940783 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3974,9 +3974,6 @@ xfs_bmap_remap_alloc(
* allocating, so skip that check by pretending to be freeing.
*/
error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
- if (error)
- goto error0;
-error0:
xfs_perag_put(args.pag);
if (error)
trace_xfs_bmap_remap_alloc_error(ap->ip, error, _RET_IP_);
@@ -3999,6 +3996,39 @@ xfs_bmap_alloc(
return xfs_bmap_btalloc(ap);
}
+/* Trim extent to fit a logical block range. */
+void
+xfs_trim_extent(
+ struct xfs_bmbt_irec *irec,
+ xfs_fileoff_t bno,
+ xfs_filblks_t len)
+{
+ xfs_fileoff_t distance;
+ xfs_fileoff_t end = bno + len;
+
+ if (irec->br_startoff + irec->br_blockcount <= bno ||
+ irec->br_startoff >= end) {
+ irec->br_blockcount = 0;
+ return;
+ }
+
+ if (irec->br_startoff < bno) {
+ distance = bno - irec->br_startoff;
+ if (isnullstartblock(irec->br_startblock))
+ irec->br_startblock = DELAYSTARTBLOCK;
+ if (irec->br_startblock != DELAYSTARTBLOCK &&
+ irec->br_startblock != HOLESTARTBLOCK)
+ irec->br_startblock += distance;
+ irec->br_startoff += distance;
+ irec->br_blockcount -= distance;
+ }
+
+ if (end < irec->br_startoff + irec->br_blockcount) {
+ distance = irec->br_startoff + irec->br_blockcount - end;
+ irec->br_blockcount -= distance;
+ }
+}
+
/*
* Trim the returned map to the required bounds
*/
@@ -4829,6 +4859,219 @@ xfs_bmap_split_indlen(
return stolen;
}
+int
+xfs_bmap_del_extent_delay(
+ struct xfs_inode *ip,
+ int whichfork,
+ xfs_extnum_t *idx,
+ struct xfs_bmbt_irec *got,
+ struct xfs_bmbt_irec *del)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ struct xfs_bmbt_irec new;
+ int64_t da_old, da_new, da_diff = 0;
+ xfs_fileoff_t del_endoff, got_endoff;
+ xfs_filblks_t got_indlen, new_indlen, stolen;
+ int error = 0, state = 0;
+ bool isrt;
+
+ XFS_STATS_INC(mp, xs_del_exlist);
+
+ isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
+ del_endoff = del->br_startoff + del->br_blockcount;
+ got_endoff = got->br_startoff + got->br_blockcount;
+ da_old = startblockval(got->br_startblock);
+ da_new = 0;
+
+ ASSERT(*idx >= 0);
+ ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+ ASSERT(del->br_blockcount > 0);
+ ASSERT(got->br_startoff <= del->br_startoff);
+ ASSERT(got_endoff >= del_endoff);
+
+ if (isrt) {
+ int64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
+
+ do_div(rtexts, mp->m_sb.sb_rextsize);
+ xfs_mod_frextents(mp, rtexts);
+ }
+
+ /*
+ * Update the inode delalloc counter now and wait to update the
+ * sb counters as we might have to borrow some blocks for the
+ * indirect block accounting.
+ */
+ xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del->br_blockcount), 0,
+ isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+ ip->i_delayed_blks -= del->br_blockcount;
+
+ if (whichfork == XFS_COW_FORK)
+ state |= BMAP_COWFORK;
+
+ if (got->br_startoff == del->br_startoff)
+ state |= BMAP_LEFT_CONTIG;
+ if (got_endoff == del_endoff)
+ state |= BMAP_RIGHT_CONTIG;
+
+ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ /*
+ * Matches the whole extent. Delete the entry.
+ */
+ xfs_iext_remove(ip, *idx, 1, state);
+ --*idx;
+ break;
+ case BMAP_LEFT_CONTIG:
+ /*
+ * Deleting the first part of the extent.
+ */
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ got->br_startoff = del_endoff;
+ got->br_blockcount -= del->br_blockcount;
+ da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
+ got->br_blockcount), da_old);
+ got->br_startblock = nullstartblock((int)da_new);
+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ break;
+ case BMAP_RIGHT_CONTIG:
+ /*
+ * Deleting the last part of the extent.
+ */
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ got->br_blockcount = got->br_blockcount - del->br_blockcount;
+ da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
+ got->br_blockcount), da_old);
+ got->br_startblock = nullstartblock((int)da_new);
+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ break;
+ case 0:
+ /*
+ * Deleting the middle of the extent.
+ *
+ * Distribute the original indlen reservation across the two new
+ * extents. Steal blocks from the deleted extent if necessary.
+ * Stealing blocks simply fudges the fdblocks accounting below.
+ * Warn if either of the new indlen reservations is zero as this
+ * can lead to delalloc problems.
+ */
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+
+ got->br_blockcount = del->br_startoff - got->br_startoff;
+ got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
+
+ new.br_blockcount = got_endoff - del_endoff;
+ new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
+
+ WARN_ON_ONCE(!got_indlen || !new_indlen);
+ stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
+ del->br_blockcount);
+
+ got->br_startblock = nullstartblock((int)got_indlen);
+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+ trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);
+
+ new.br_startoff = del_endoff;
+ new.br_state = got->br_state;
+ new.br_startblock = nullstartblock((int)new_indlen);
+
+ ++*idx;
+ xfs_iext_insert(ip, *idx, 1, &new, state);
+
+ da_new = got_indlen + new_indlen - stolen;
+ del->br_blockcount -= stolen;
+ break;
+ }
+
+ ASSERT(da_old >= da_new);
+ da_diff = da_old - da_new;
+ if (!isrt)
+ da_diff += del->br_blockcount;
+ if (da_diff)
+ xfs_mod_fdblocks(mp, da_diff, false);
+ return error;
+}
+
+void
+xfs_bmap_del_extent_cow(
+ struct xfs_inode *ip,
+ xfs_extnum_t *idx,
+ struct xfs_bmbt_irec *got,
+ struct xfs_bmbt_irec *del)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ struct xfs_bmbt_irec new;
+ xfs_fileoff_t del_endoff, got_endoff;
+ int state = BMAP_COWFORK;
+
+ XFS_STATS_INC(mp, xs_del_exlist);
+
+ del_endoff = del->br_startoff + del->br_blockcount;
+ got_endoff = got->br_startoff + got->br_blockcount;
+
+ ASSERT(*idx >= 0);
+ ASSERT(*idx < ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
+ ASSERT(del->br_blockcount > 0);
+ ASSERT(got->br_startoff <= del->br_startoff);
+ ASSERT(got_endoff >= del_endoff);
+ ASSERT(!isnullstartblock(got->br_startblock));
+
+ if (got->br_startoff == del->br_startoff)
+ state |= BMAP_LEFT_CONTIG;
+ if (got_endoff == del_endoff)
+ state |= BMAP_RIGHT_CONTIG;
+
+ switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
+ case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
+ /*
+ * Matches the whole extent. Delete the entry.
+ */
+ xfs_iext_remove(ip, *idx, 1, state);
+ --*idx;
+ break;
+ case BMAP_LEFT_CONTIG:
+ /*
+ * Deleting the first part of the extent.
+ */
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ got->br_startoff = del_endoff;
+ got->br_blockcount -= del->br_blockcount;
+ got->br_startblock = del->br_startblock + del->br_blockcount;
+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ break;
+ case BMAP_RIGHT_CONTIG:
+ /*
+ * Deleting the last part of the extent.
+ */
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ got->br_blockcount -= del->br_blockcount;
+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+ break;
+ case 0:
+ /*
+ * Deleting the middle of the extent.
+ */
+ trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+ got->br_blockcount = del->br_startoff - got->br_startoff;
+ xfs_bmbt_set_all(xfs_iext_get_ext(ifp, *idx), got);
+ trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+ new.br_startoff = del_endoff;
+ new.br_blockcount = got_endoff - del_endoff;
+ new.br_state = got->br_state;
+ new.br_startblock = del->br_startblock + del->br_blockcount;
+
+ ++*idx;
+ xfs_iext_insert(ip, *idx, 1, &new, state);
+ break;
+ }
+}
+
/*
* Called by xfs_bmapi to update file extent records and the btree
* after removing space (or undoing a delayed allocation).
@@ -5171,175 +5414,6 @@ done:
return error;
}
-/* Remove an extent from the CoW fork. Similar to xfs_bmap_del_extent. */
-int
-xfs_bunmapi_cow(
- struct xfs_inode *ip,
- struct xfs_bmbt_irec *del)
-{
- xfs_filblks_t da_new;
- xfs_filblks_t da_old;
- xfs_fsblock_t del_endblock = 0;
- xfs_fileoff_t del_endoff;
- int delay;
- struct xfs_bmbt_rec_host *ep;
- int error;
- struct xfs_bmbt_irec got;
- xfs_fileoff_t got_endoff;
- struct xfs_ifork *ifp;
- struct xfs_mount *mp;
- xfs_filblks_t nblks;
- struct xfs_bmbt_irec new;
- /* REFERENCED */
- uint qfield;
- xfs_filblks_t temp;
- xfs_filblks_t temp2;
- int state = BMAP_COWFORK;
- int eof;
- xfs_extnum_t eidx;
-
- mp = ip->i_mount;
- XFS_STATS_INC(mp, xs_del_exlist);
-
- ep = xfs_bmap_search_extents(ip, del->br_startoff, XFS_COW_FORK, &eof,
- &eidx, &got, &new);
-
- ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); ifp = ifp;
- ASSERT((eidx >= 0) && (eidx < ifp->if_bytes /
- (uint)sizeof(xfs_bmbt_rec_t)));
- ASSERT(del->br_blockcount > 0);
- ASSERT(got.br_startoff <= del->br_startoff);
- del_endoff = del->br_startoff + del->br_blockcount;
- got_endoff = got.br_startoff + got.br_blockcount;
- ASSERT(got_endoff >= del_endoff);
- delay = isnullstartblock(got.br_startblock);
- ASSERT(isnullstartblock(del->br_startblock) == delay);
- qfield = 0;
- error = 0;
- /*
- * If deleting a real allocation, must free up the disk space.
- */
- if (!delay) {
- nblks = del->br_blockcount;
- qfield = XFS_TRANS_DQ_BCOUNT;
- /*
- * Set up del_endblock and cur for later.
- */
- del_endblock = del->br_startblock + del->br_blockcount;
- da_old = da_new = 0;
- } else {
- da_old = startblockval(got.br_startblock);
- da_new = 0;
- nblks = 0;
- }
- qfield = qfield;
- nblks = nblks;
-
- /*
- * Set flag value to use in switch statement.
- * Left-contig is 2, right-contig is 1.
- */
- switch (((got.br_startoff == del->br_startoff) << 1) |
- (got_endoff == del_endoff)) {
- case 3:
- /*
- * Matches the whole extent. Delete the entry.
- */
- xfs_iext_remove(ip, eidx, 1, BMAP_COWFORK);
- --eidx;
- break;
-
- case 2:
- /*
- * Deleting the first part of the extent.
- */
- trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
- xfs_bmbt_set_startoff(ep, del_endoff);
- temp = got.br_blockcount - del->br_blockcount;
- xfs_bmbt_set_blockcount(ep, temp);
- if (delay) {
- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- da_old);
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
- da_new = temp;
- break;
- }
- xfs_bmbt_set_startblock(ep, del_endblock);
- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
- break;
-
- case 1:
- /*
- * Deleting the last part of the extent.
- */
- temp = got.br_blockcount - del->br_blockcount;
- trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep, temp);
- if (delay) {
- temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- da_old);
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
- da_new = temp;
- break;
- }
- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
- break;
-
- case 0:
- /*
- * Deleting the middle of the extent.
- */
- temp = del->br_startoff - got.br_startoff;
- trace_xfs_bmap_pre_update(ip, eidx, state, _THIS_IP_);
- xfs_bmbt_set_blockcount(ep, temp);
- new.br_startoff = del_endoff;
- temp2 = got_endoff - del_endoff;
- new.br_blockcount = temp2;
- new.br_state = got.br_state;
- if (!delay) {
- new.br_startblock = del_endblock;
- } else {
- temp = xfs_bmap_worst_indlen(ip, temp);
- xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
- temp2 = xfs_bmap_worst_indlen(ip, temp2);
- new.br_startblock = nullstartblock((int)temp2);
- da_new = temp + temp2;
- while (da_new > da_old) {
- if (temp) {
- temp--;
- da_new--;
- xfs_bmbt_set_startblock(ep,
- nullstartblock((int)temp));
- }
- if (da_new == da_old)
- break;
- if (temp2) {
- temp2--;
- da_new--;
- new.br_startblock =
- nullstartblock((int)temp2);
- }
- }
- }
- trace_xfs_bmap_post_update(ip, eidx, state, _THIS_IP_);
- xfs_iext_insert(ip, eidx + 1, 1, &new, state);
- ++eidx;
- break;
- }
-
- /*
- * Account for change in delayed indirect blocks.
- * Nothing to do for disk quota accounting here.
- */
- ASSERT(da_old >= da_new);
- if (da_old > da_new)
- xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
-
- return error;
-}
-
/*
* Unmap (remove) blocks from a file.
* If nexts is nonzero then the number of extents to remove is limited to
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index f97db7132564..7cae6ec27fa6 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -190,6 +190,8 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
#define XFS_BMAP_TRACE_EXLIST(ip,c,w)
#endif
+void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
+ xfs_filblks_t len);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
@@ -221,7 +223,11 @@ int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops, int *done);
-int xfs_bunmapi_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *del);
+int xfs_bmap_del_extent_delay(struct xfs_inode *ip, int whichfork,
+ xfs_extnum_t *idx, struct xfs_bmbt_irec *got,
+ struct xfs_bmbt_irec *del);
+void xfs_bmap_del_extent_cow(struct xfs_inode *ip, xfs_extnum_t *idx,
+ struct xfs_bmbt_irec *got, struct xfs_bmbt_irec *del);
int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
xfs_extnum_t num);
uint xfs_default_attroffset(struct xfs_inode *ip);
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 5c8e6f2ce44f..0e80993c8a59 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -4826,7 +4826,7 @@ xfs_btree_calc_size(
return rval;
}
-int
+static int
xfs_btree_count_blocks_helper(
struct xfs_btree_cur *cur,
int level,
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index 3cc3cf767474..ac9a003dd29a 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
if (mp->m_quotainfo)
ndquots = mp->m_quotainfo->qi_dqperchunk;
else
- ndquots = xfs_calc_dquots_per_chunk(
- XFS_BB_TO_FSB(mp, bp->b_length));
+ ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
for (i = 0; i < ndquots; i++, d++) {
if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index f6547fc5e016..6b7579e7b60a 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -865,7 +865,6 @@ typedef struct xfs_timestamp {
* padding field for v3 inodes.
*/
#define XFS_DINODE_MAGIC 0x494e /* 'IN' */
-#define XFS_DINODE_GOOD_VERSION(v) ((v) >= 1 && (v) <= 3)
typedef struct xfs_dinode {
__be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */
__be16 di_mode; /* mode and type of file */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 8de9a3a29589..134424fac434 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -57,6 +57,17 @@ xfs_inobp_check(
}
#endif
+bool
+xfs_dinode_good_version(
+ struct xfs_mount *mp,
+ __u8 version)
+{
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ return version == 3;
+
+ return version == 1 || version == 2;
+}
+
/*
* If we are doing readahead on an inode buffer, we might be in log recovery
* reading an inode allocation buffer that hasn't yet been replayed, and hence
@@ -91,7 +102,7 @@ xfs_inode_buf_verify(
dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
- XFS_DINODE_GOOD_VERSION(dip->di_version);
+ xfs_dinode_good_version(mp, dip->di_version);
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP,
XFS_RANDOM_ITOBP_INOTOBP))) {
diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h
index 62d9d4681c8c..3cfe12a4f58a 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.h
+++ b/fs/xfs/libxfs/xfs_inode_buf.h
@@ -74,6 +74,8 @@ void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
void xfs_log_dinode_to_disk(struct xfs_log_dinode *from,
struct xfs_dinode *to);
+bool xfs_dinode_good_version(struct xfs_mount *mp, __u8 version);
+
#if defined(DEBUG)
void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
#else
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index a314fc7b56fa..6e4f7f900fea 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -249,6 +249,7 @@ xfs_file_dio_aio_read(
struct xfs_inode *ip = XFS_I(inode);
loff_t isize = i_size_read(inode);
size_t count = iov_iter_count(to);
+ loff_t end = iocb->ki_pos + count - 1;
struct iov_iter data;
struct xfs_buftarg *target;
ssize_t ret = 0;
@@ -272,49 +273,21 @@ xfs_file_dio_aio_read(
file_accessed(iocb->ki_filp);
- /*
- * Locking is a bit tricky here. If we take an exclusive lock for direct
- * IO, we effectively serialise all new concurrent read IO to this file
- * and block it behind IO that is currently in progress because IO in
- * progress holds the IO lock shared. We only need to hold the lock
- * exclusive to blow away the page cache, so only take lock exclusively
- * if the page cache needs invalidation. This allows the normal direct
- * IO case of no page cache pages to proceeed concurrently without
- * serialisation.
- */
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
if (mapping->nrpages) {
- xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
- xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
+ ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
+ if (ret)
+ goto out_unlock;
/*
- * The generic dio code only flushes the range of the particular
- * I/O. Because we take an exclusive lock here, this whole
- * sequence is considerably more expensive for us. This has a
- * noticeable performance impact for any file with cached pages,
- * even when outside of the range of the particular I/O.
- *
- * Hence, amortize the cost of the lock against a full file
- * flush and reduce the chances of repeated iolock cycles going
- * forward.
+ * Invalidate whole pages. This can return an error if we fail
+ * to invalidate a page, but this should never happen on XFS.
+ * Warn if it does fail.
*/
- if (mapping->nrpages) {
- ret = filemap_write_and_wait(mapping);
- if (ret) {
- xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
- return ret;
- }
-
- /*
- * Invalidate whole pages. This can return an error if
- * we fail to invalidate a page, but this should never
- * happen on XFS. Warn if it does fail.
- */
- ret = invalidate_inode_pages2(mapping);
- WARN_ON_ONCE(ret);
- ret = 0;
- }
- xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+ ret = invalidate_inode_pages2_range(mapping,
+ iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
+ WARN_ON_ONCE(ret);
+ ret = 0;
}
data = *to;
@@ -324,8 +297,9 @@ xfs_file_dio_aio_read(
iocb->ki_pos += ret;
iov_iter_advance(to, ret);
}
- xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
+out_unlock:
+ xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
return ret;
}
@@ -570,61 +544,49 @@ xfs_file_dio_aio_write(
if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
return -EINVAL;
- /* "unaligned" here means not aligned to a filesystem block */
- if ((iocb->ki_pos & mp->m_blockmask) ||
- ((iocb->ki_pos + count) & mp->m_blockmask))
- unaligned_io = 1;
-
/*
- * We don't need to take an exclusive lock unless there page cache needs
- * to be invalidated or unaligned IO is being executed. We don't need to
- * consider the EOF extension case here because
- * xfs_file_aio_write_checks() will relock the inode as necessary for
- * EOF zeroing cases and fill out the new inode size as appropriate.
+ * Don't take the exclusive iolock here unless the I/O is unaligned to
+ * the file system block size. We don't need to consider the EOF
+ * extension case here because xfs_file_aio_write_checks() will relock
+ * the inode as necessary for EOF zeroing cases and fill out the new
+ * inode size as appropriate.
*/
- if (unaligned_io || mapping->nrpages)
+ if ((iocb->ki_pos & mp->m_blockmask) ||
+ ((iocb->ki_pos + count) & mp->m_blockmask)) {
+ unaligned_io = 1;
iolock = XFS_IOLOCK_EXCL;
- else
+ } else {
iolock = XFS_IOLOCK_SHARED;
- xfs_rw_ilock(ip, iolock);
-
- /*
- * Recheck if there are cached pages that need invalidate after we got
- * the iolock to protect against other threads adding new pages while
- * we were waiting for the iolock.
- */
- if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
- xfs_rw_iunlock(ip, iolock);
- iolock = XFS_IOLOCK_EXCL;
- xfs_rw_ilock(ip, iolock);
}
+ xfs_rw_ilock(ip, iolock);
+
ret = xfs_file_aio_write_checks(iocb, from, &iolock);
if (ret)
goto out;
count = iov_iter_count(from);
end = iocb->ki_pos + count - 1;
- /*
- * See xfs_file_dio_aio_read() for why we do a full-file flush here.
- */
if (mapping->nrpages) {
- ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
+ ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
if (ret)
goto out;
+
/*
* Invalidate whole pages. This can return an error if we fail
* to invalidate a page, but this should never happen on XFS.
* Warn if it does fail.
*/
- ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
+ ret = invalidate_inode_pages2_range(mapping,
+ iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
WARN_ON_ONCE(ret);
ret = 0;
}
/*
* If we are doing unaligned IO, wait for all other IO to drain,
- * otherwise demote the lock if we had to flush cached pages
+ * otherwise demote the lock if we had to take the exclusive lock
+ * for other reasons in xfs_file_aio_write_checks.
*/
if (unaligned_io)
inode_dio_wait(inode);
@@ -947,134 +909,6 @@ out_unlock:
return error;
}
-/*
- * Flush all file writes out to disk.
- */
-static int
-xfs_file_wait_for_io(
- struct inode *inode,
- loff_t offset,
- size_t len)
-{
- loff_t rounding;
- loff_t ioffset;
- loff_t iendoffset;
- loff_t bs;
- int ret;
-
- bs = inode->i_sb->s_blocksize;
- inode_dio_wait(inode);
-
- rounding = max_t(xfs_off_t, bs, PAGE_SIZE);
- ioffset = round_down(offset, rounding);
- iendoffset = round_up(offset + len, rounding) - 1;
- ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
- iendoffset);
- return ret;
-}
-
-/* Hook up to the VFS reflink function */
-STATIC int
-xfs_file_share_range(
- struct file *file_in,
- loff_t pos_in,
- struct file *file_out,
- loff_t pos_out,
- u64 len,
- bool is_dedupe)
-{
- struct inode *inode_in;
- struct inode *inode_out;
- ssize_t ret;
- loff_t bs;
- loff_t isize;
- int same_inode;
- loff_t blen;
- unsigned int flags = 0;
-
- inode_in = file_inode(file_in);
- inode_out = file_inode(file_out);
- bs = inode_out->i_sb->s_blocksize;
-
- /* Don't touch certain kinds of inodes */
- if (IS_IMMUTABLE(inode_out))
- return -EPERM;
- if (IS_SWAPFILE(inode_in) ||
- IS_SWAPFILE(inode_out))
- return -ETXTBSY;
-
- /* Reflink only works within this filesystem. */
- if (inode_in->i_sb != inode_out->i_sb)
- return -EXDEV;
- same_inode = (inode_in->i_ino == inode_out->i_ino);
-
- /* Don't reflink dirs, pipes, sockets... */
- if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
- return -EISDIR;
- if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode))
- return -EINVAL;
- if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
- return -EINVAL;
-
- /* Don't share DAX file data for now. */
- if (IS_DAX(inode_in) || IS_DAX(inode_out))
- return -EINVAL;
-
- /* Are we going all the way to the end? */
- isize = i_size_read(inode_in);
- if (isize == 0)
- return 0;
- if (len == 0)
- len = isize - pos_in;
-
- /* Ensure offsets don't wrap and the input is inside i_size */
- if (pos_in + len < pos_in || pos_out + len < pos_out ||
- pos_in + len > isize)
- return -EINVAL;
-
- /* Don't allow dedupe past EOF in the dest file */
- if (is_dedupe) {
- loff_t disize;
-
- disize = i_size_read(inode_out);
- if (pos_out >= disize || pos_out + len > disize)
- return -EINVAL;
- }
-
- /* If we're linking to EOF, continue to the block boundary. */
- if (pos_in + len == isize)
- blen = ALIGN(isize, bs) - pos_in;
- else
- blen = len;
-
- /* Only reflink if we're aligned to block boundaries */
- if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
- !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
- return -EINVAL;
-
- /* Don't allow overlapped reflink within the same file */
- if (same_inode && pos_out + blen > pos_in && pos_out < pos_in + blen)
- return -EINVAL;
-
- /* Wait for the completion of any pending IOs on srcfile */
- ret = xfs_file_wait_for_io(inode_in, pos_in, len);
- if (ret)
- goto out;
- ret = xfs_file_wait_for_io(inode_out, pos_out, len);
- if (ret)
- goto out;
-
- if (is_dedupe)
- flags |= XFS_REFLINK_DEDUPE;
- ret = xfs_reflink_remap_range(XFS_I(inode_in), pos_in, XFS_I(inode_out),
- pos_out, len, flags);
- if (ret < 0)
- goto out;
-
-out:
- return ret;
-}
-
STATIC ssize_t
xfs_file_copy_range(
struct file *file_in,
@@ -1086,7 +920,7 @@ xfs_file_copy_range(
{
int error;
- error = xfs_file_share_range(file_in, pos_in, file_out, pos_out,
+ error = xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
len, false);
if (error)
return error;
@@ -1101,7 +935,7 @@ xfs_file_clone_range(
loff_t pos_out,
u64 len)
{
- return xfs_file_share_range(file_in, pos_in, file_out, pos_out,
+ return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
len, false);
}
@@ -1124,7 +958,7 @@ xfs_file_dedupe_range(
if (len > XFS_MAX_DEDUPE_LEN)
len = XFS_MAX_DEDUPE_LEN;
- error = xfs_file_share_range(src_file, loff, dst_file, dst_loff,
+ error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
len, true);
if (error)
return error;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 14796b744e0a..f295049db681 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1656,9 +1656,9 @@ void
xfs_inode_set_cowblocks_tag(
xfs_inode_t *ip)
{
- trace_xfs_inode_set_eofblocks_tag(ip);
+ trace_xfs_inode_set_cowblocks_tag(ip);
return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
- trace_xfs_perag_set_eofblocks,
+ trace_xfs_perag_set_cowblocks,
XFS_ICI_COWBLOCKS_TAG);
}
@@ -1666,7 +1666,7 @@ void
xfs_inode_clear_cowblocks_tag(
xfs_inode_t *ip)
{
- trace_xfs_inode_clear_eofblocks_tag(ip);
+ trace_xfs_inode_clear_cowblocks_tag(ip);
return __xfs_inode_clear_eofblocks_tag(ip,
- trace_xfs_perag_clear_eofblocks, XFS_ICI_COWBLOCKS_TAG);
+ trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index d907eb9f8ef3..436e109bb01e 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -566,6 +566,17 @@ xfs_file_iomap_begin_delay(
xfs_bmap_search_extents(ip, offset_fsb, XFS_DATA_FORK, &eof, &idx,
&got, &prev);
if (!eof && got.br_startoff <= offset_fsb) {
+ if (xfs_is_reflink_inode(ip)) {
+ bool shared;
+
+ end_fsb = min(XFS_B_TO_FSB(mp, offset + count),
+ maxbytes_fsb);
+ xfs_trim_extent(&got, offset_fsb, end_fsb - offset_fsb);
+ error = xfs_reflink_reserve_cow(ip, &got, &shared);
+ if (error)
+ goto out_unlock;
+ }
+
trace_xfs_iomap_found(ip, offset, count, 0, &got);
goto done;
}
@@ -961,19 +972,13 @@ xfs_file_iomap_begin(
struct xfs_mount *mp = ip->i_mount;
struct xfs_bmbt_irec imap;
xfs_fileoff_t offset_fsb, end_fsb;
- bool shared, trimmed;
int nimaps = 1, error = 0;
+ bool shared = false, trimmed = false;
unsigned lockmode;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
- error = xfs_reflink_reserve_cow_range(ip, offset, length);
- if (error < 0)
- return error;
- }
-
if ((flags & IOMAP_WRITE) && !IS_DAX(inode) &&
!xfs_get_extsz_hint(ip)) {
/* Reserve delalloc blocks for regular writeback. */
@@ -981,7 +986,16 @@ xfs_file_iomap_begin(
iomap);
}
- lockmode = xfs_ilock_data_map_shared(ip);
+ /*
+ * COW writes will allocate delalloc space, so we need to make sure
+ * to take the lock exclusively here.
+ */
+ if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
+ lockmode = XFS_ILOCK_EXCL;
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ } else {
+ lockmode = xfs_ilock_data_map_shared(ip);
+ }
ASSERT(offset <= mp->m_super->s_maxbytes);
if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
@@ -991,16 +1005,24 @@ xfs_file_iomap_begin(
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, 0);
- if (error) {
- xfs_iunlock(ip, lockmode);
- return error;
+ if (error)
+ goto out_unlock;
+
+ if (flags & IOMAP_REPORT) {
+ /* Trim the mapping to the nearest shared extent boundary. */
+ error = xfs_reflink_trim_around_shared(ip, &imap, &shared,
+ &trimmed);
+ if (error)
+ goto out_unlock;
}
- /* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed);
- if (error) {
- xfs_iunlock(ip, lockmode);
- return error;
+ if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) && xfs_is_reflink_inode(ip)) {
+ error = xfs_reflink_reserve_cow(ip, &imap, &shared);
+ if (error)
+ goto out_unlock;
+
+ end_fsb = imap.br_startoff + imap.br_blockcount;
+ length = XFS_FSB_TO_B(mp, end_fsb) - offset;
}
if ((flags & IOMAP_WRITE) && imap_needs_alloc(inode, &imap, nimaps)) {
@@ -1039,6 +1061,9 @@ xfs_file_iomap_begin(
if (shared)
iomap->flags |= IOMAP_F_SHARED;
return 0;
+out_unlock:
+ xfs_iunlock(ip, lockmode);
+ return error;
}
static int
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index fc7873942bea..b341f10cf481 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1009,6 +1009,7 @@ xfs_mountfs(
out_quota:
xfs_qm_unmount_quotas(mp);
out_rtunmount:
+ mp->m_super->s_flags &= ~MS_ACTIVE;
xfs_rtunmount_inodes(mp);
out_rele_rip:
IRELE(rip);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 5965e9455d91..a279b4e7f5fe 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -182,7 +182,8 @@ xfs_reflink_trim_around_shared(
if (!xfs_is_reflink_inode(ip) ||
ISUNWRITTEN(irec) ||
irec->br_startblock == HOLESTARTBLOCK ||
- irec->br_startblock == DELAYSTARTBLOCK) {
+ irec->br_startblock == DELAYSTARTBLOCK ||
+ isnullstartblock(irec->br_startblock)) {
*shared = false;
return 0;
}
@@ -227,50 +228,54 @@ xfs_reflink_trim_around_shared(
}
}
-/* Create a CoW reservation for a range of blocks within a file. */
-static int
-__xfs_reflink_reserve_cow(
+/*
+ * Trim the passed in imap to the next shared/unshared extent boundary, and
+ * if imap->br_startoff points to a shared extent reserve space for it in the
+ * COW fork. In this case *shared is set to true, else to false.
+ *
+ * Note that imap will always contain the block numbers for the existing blocks
+ * in the data fork, as the upper layers need them for read-modify-write
+ * operations.
+ */
+int
+xfs_reflink_reserve_cow(
struct xfs_inode *ip,
- xfs_fileoff_t *offset_fsb,
- xfs_fileoff_t end_fsb,
- bool *skipped)
+ struct xfs_bmbt_irec *imap,
+ bool *shared)
{
- struct xfs_bmbt_irec got, prev, imap;
- xfs_fileoff_t orig_end_fsb;
- int nimaps, eof = 0, error = 0;
- bool shared = false, trimmed = false;
+ struct xfs_bmbt_irec got, prev;
+ xfs_fileoff_t end_fsb, orig_end_fsb;
+ int eof = 0, error = 0;
+ bool trimmed;
xfs_extnum_t idx;
xfs_extlen_t align;
- /* Already reserved? Skip the refcount btree access. */
- xfs_bmap_search_extents(ip, *offset_fsb, XFS_COW_FORK, &eof, &idx,
+ /*
+ * Search the COW fork extent list first. This serves two purposes:
+ * first this implement the speculative preallocation using cowextisze,
+ * so that we also unshared block adjacent to shared blocks instead
+ * of just the shared blocks themselves. Second the lookup in the
+ * extent list is generally faster than going out to the shared extent
+ * tree.
+ */
+ xfs_bmap_search_extents(ip, imap->br_startoff, XFS_COW_FORK, &eof, &idx,
&got, &prev);
- if (!eof && got.br_startoff <= *offset_fsb) {
- end_fsb = orig_end_fsb = got.br_startoff + got.br_blockcount;
- trace_xfs_reflink_cow_found(ip, &got);
- goto done;
- }
+ if (!eof && got.br_startoff <= imap->br_startoff) {
+ trace_xfs_reflink_cow_found(ip, imap);
+ xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
- /* Read extent from the source file. */
- nimaps = 1;
- error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb,
- &imap, &nimaps, 0);
- if (error)
- goto out_unlock;
- ASSERT(nimaps == 1);
+ *shared = true;
+ return 0;
+ }
/* Trim the mapping to the nearest shared extent boundary. */
- error = xfs_reflink_trim_around_shared(ip, &imap, &shared, &trimmed);
+ error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
if (error)
- goto out_unlock;
-
- end_fsb = orig_end_fsb = imap.br_startoff + imap.br_blockcount;
+ return error;
/* Not shared? Just report the (potentially capped) extent. */
- if (!shared) {
- *skipped = true;
- goto done;
- }
+ if (!*shared)
+ return 0;
/*
* Fork all the shared blocks from our write offset until the end of
@@ -278,72 +283,38 @@ __xfs_reflink_reserve_cow(
*/
error = xfs_qm_dqattach_locked(ip, 0);
if (error)
- goto out_unlock;
+ return error;
+
+ end_fsb = orig_end_fsb = imap->br_startoff + imap->br_blockcount;
align = xfs_eof_alignment(ip, xfs_get_cowextsz_hint(ip));
if (align)
end_fsb = roundup_64(end_fsb, align);
retry:
- error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, *offset_fsb,
- end_fsb - *offset_fsb, &got,
- &prev, &idx, eof);
+ error = xfs_bmapi_reserve_delalloc(ip, XFS_COW_FORK, imap->br_startoff,
+ end_fsb - imap->br_startoff, &got, &prev, &idx, eof);
switch (error) {
case 0:
break;
case -ENOSPC:
case -EDQUOT:
/* retry without any preallocation */
- trace_xfs_reflink_cow_enospc(ip, &imap);
+ trace_xfs_reflink_cow_enospc(ip, imap);
if (end_fsb != orig_end_fsb) {
end_fsb = orig_end_fsb;
goto retry;
}
/*FALLTHRU*/
default:
- goto out_unlock;
+ return error;
}
if (end_fsb != orig_end_fsb)
xfs_inode_set_cowblocks_tag(ip);
trace_xfs_reflink_cow_alloc(ip, &got);
-done:
- *offset_fsb = end_fsb;
-out_unlock:
- return error;
-}
-
-/* Create a CoW reservation for part of a file. */
-int
-xfs_reflink_reserve_cow_range(
- struct xfs_inode *ip,
- xfs_off_t offset,
- xfs_off_t count)
-{
- struct xfs_mount *mp = ip->i_mount;
- xfs_fileoff_t offset_fsb, end_fsb;
- bool skipped = false;
- int error;
-
- trace_xfs_reflink_reserve_cow_range(ip, offset, count);
-
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- end_fsb = XFS_B_TO_FSB(mp, offset + count);
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- while (offset_fsb < end_fsb) {
- error = __xfs_reflink_reserve_cow(ip, &offset_fsb, end_fsb,
- &skipped);
- if (error) {
- trace_xfs_reflink_reserve_cow_range_error(ip, error,
- _RET_IP_);
- break;
- }
- }
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
- return error;
+ return 0;
}
/* Allocate all CoW reservations covering a range of blocks in a file. */
@@ -358,9 +329,8 @@ __xfs_reflink_allocate_cow(
struct xfs_defer_ops dfops;
struct xfs_trans *tp;
xfs_fsblock_t first_block;
- xfs_fileoff_t next_fsb;
int nimaps = 1, error;
- bool skipped = false;
+ bool shared;
xfs_defer_init(&dfops, &first_block);
@@ -371,33 +341,38 @@ __xfs_reflink_allocate_cow(
xfs_ilock(ip, XFS_ILOCK_EXCL);
- next_fsb = *offset_fsb;
- error = __xfs_reflink_reserve_cow(ip, &next_fsb, end_fsb, &skipped);
+ /* Read extent from the source file. */
+ nimaps = 1;
+ error = xfs_bmapi_read(ip, *offset_fsb, end_fsb - *offset_fsb,
+ &imap, &nimaps, 0);
+ if (error)
+ goto out_unlock;
+ ASSERT(nimaps == 1);
+
+ error = xfs_reflink_reserve_cow(ip, &imap, &shared);
if (error)
goto out_trans_cancel;
- if (skipped) {
- *offset_fsb = next_fsb;
+ if (!shared) {
+ *offset_fsb = imap.br_startoff + imap.br_blockcount;
goto out_trans_cancel;
}
xfs_trans_ijoin(tp, ip, 0);
- error = xfs_bmapi_write(tp, ip, *offset_fsb, next_fsb - *offset_fsb,
+ error = xfs_bmapi_write(tp, ip, imap.br_startoff, imap.br_blockcount,
XFS_BMAPI_COWFORK, &first_block,
XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK),
&imap, &nimaps, &dfops);
if (error)
goto out_trans_cancel;
- /* We might not have been able to map the whole delalloc extent */
- *offset_fsb = min(*offset_fsb + imap.br_blockcount, next_fsb);
-
error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_trans_cancel;
error = xfs_trans_commit(tp);
+ *offset_fsb = imap.br_startoff + imap.br_blockcount;
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
@@ -536,58 +511,49 @@ xfs_reflink_cancel_cow_blocks(
xfs_fileoff_t offset_fsb,
xfs_fileoff_t end_fsb)
{
- struct xfs_bmbt_irec irec;
- xfs_filblks_t count_fsb;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ struct xfs_bmbt_irec got, prev, del;
+ xfs_extnum_t idx;
xfs_fsblock_t firstfsb;
struct xfs_defer_ops dfops;
- int error = 0;
- int nimaps;
+ int error = 0, eof = 0;
if (!xfs_is_reflink_inode(ip))
return 0;
- /* Go find the old extent in the CoW fork. */
- while (offset_fsb < end_fsb) {
- nimaps = 1;
- count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
- error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec,
- &nimaps, XFS_BMAPI_COWFORK);
- if (error)
- break;
- ASSERT(nimaps == 1);
-
- trace_xfs_reflink_cancel_cow(ip, &irec);
+ xfs_bmap_search_extents(ip, offset_fsb, XFS_COW_FORK, &eof, &idx,
+ &got, &prev);
+ if (eof)
+ return 0;
- if (irec.br_startblock == DELAYSTARTBLOCK) {
- /* Free a delayed allocation. */
- xfs_mod_fdblocks(ip->i_mount, irec.br_blockcount,
- false);
- ip->i_delayed_blks -= irec.br_blockcount;
+ while (got.br_startoff < end_fsb) {
+ del = got;
+ xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
+ trace_xfs_reflink_cancel_cow(ip, &del);
- /* Remove the mapping from the CoW fork. */
- error = xfs_bunmapi_cow(ip, &irec);
+ if (isnullstartblock(del.br_startblock)) {
+ error = xfs_bmap_del_extent_delay(ip, XFS_COW_FORK,
+ &idx, &got, &del);
if (error)
break;
- } else if (irec.br_startblock == HOLESTARTBLOCK) {
- /* empty */
} else {
xfs_trans_ijoin(*tpp, ip, 0);
xfs_defer_init(&dfops, &firstfsb);
/* Free the CoW orphan record. */
error = xfs_refcount_free_cow_extent(ip->i_mount,
- &dfops, irec.br_startblock,
- irec.br_blockcount);
+ &dfops, del.br_startblock,
+ del.br_blockcount);
if (error)
break;
xfs_bmap_add_free(ip->i_mount, &dfops,
- irec.br_startblock, irec.br_blockcount,
+ del.br_startblock, del.br_blockcount,
NULL);
/* Update quota accounting */
xfs_trans_mod_dquot_byino(*tpp, ip, XFS_TRANS_DQ_BCOUNT,
- -(long)irec.br_blockcount);
+ -(long)del.br_blockcount);
/* Roll the transaction */
error = xfs_defer_finish(tpp, &dfops, ip);
@@ -597,15 +563,18 @@ xfs_reflink_cancel_cow_blocks(
}
/* Remove the mapping from the CoW fork. */
- error = xfs_bunmapi_cow(ip, &irec);
- if (error)
- break;
+ xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
}
- /* Roll on... */
- offset_fsb = irec.br_startoff + irec.br_blockcount;
+ if (++idx >= ifp->if_bytes / sizeof(struct xfs_bmbt_rec))
+ break;
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got);
}
+ /* clear tag if cow fork is emptied */
+ if (!ifp->if_bytes)
+ xfs_inode_clear_cowblocks_tag(ip);
+
return error;
}
@@ -668,25 +637,26 @@ xfs_reflink_end_cow(
xfs_off_t offset,
xfs_off_t count)
{
- struct xfs_bmbt_irec irec;
- struct xfs_bmbt_irec uirec;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ struct xfs_bmbt_irec got, prev, del;
struct xfs_trans *tp;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t end_fsb;
- xfs_filblks_t count_fsb;
xfs_fsblock_t firstfsb;
struct xfs_defer_ops dfops;
- int error;
+ int error, eof = 0;
unsigned int resblks;
- xfs_filblks_t ilen;
xfs_filblks_t rlen;
- int nimaps;
+ xfs_extnum_t idx;
trace_xfs_reflink_end_cow(ip, offset, count);
+ /* No COW extents? That's easy! */
+ if (ifp->if_bytes == 0)
+ return 0;
+
offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
- count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
/* Start a rolling transaction to switch the mappings */
resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
@@ -698,72 +668,65 @@ xfs_reflink_end_cow(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
- /* Go find the old extent in the CoW fork. */
- while (offset_fsb < end_fsb) {
- /* Read extent from the source file */
- nimaps = 1;
- count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
- error = xfs_bmapi_read(ip, offset_fsb, count_fsb, &irec,
- &nimaps, XFS_BMAPI_COWFORK);
- if (error)
- goto out_cancel;
- ASSERT(nimaps == 1);
+ xfs_bmap_search_extents(ip, end_fsb - 1, XFS_COW_FORK, &eof, &idx,
+ &got, &prev);
- ASSERT(irec.br_startblock != DELAYSTARTBLOCK);
- trace_xfs_reflink_cow_remap(ip, &irec);
+ /* If there is a hole at end_fsb - 1 go to the previous extent */
+ if (eof || got.br_startoff > end_fsb) {
+ ASSERT(idx > 0);
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, --idx), &got);
+ }
- /*
- * We can have a hole in the CoW fork if part of a directio
- * write is CoW but part of it isn't.
- */
- rlen = ilen = irec.br_blockcount;
- if (irec.br_startblock == HOLESTARTBLOCK)
+ /* Walk backwards until we're out of the I/O range... */
+ while (got.br_startoff + got.br_blockcount > offset_fsb) {
+ del = got;
+ xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
+
+ /* Extent delete may have bumped idx forward */
+ if (!del.br_blockcount) {
+ idx--;
goto next_extent;
+ }
+
+ ASSERT(!isnullstartblock(got.br_startblock));
/* Unmap the old blocks in the data fork. */
- while (rlen) {
- xfs_defer_init(&dfops, &firstfsb);
- error = __xfs_bunmapi(tp, ip, irec.br_startoff,
- &rlen, 0, 1, &firstfsb, &dfops);
- if (error)
- goto out_defer;
-
- /*
- * Trim the extent to whatever got unmapped.
- * Remember, bunmapi works backwards.
- */
- uirec.br_startblock = irec.br_startblock + rlen;
- uirec.br_startoff = irec.br_startoff + rlen;
- uirec.br_blockcount = irec.br_blockcount - rlen;
- irec.br_blockcount = rlen;
- trace_xfs_reflink_cow_remap_piece(ip, &uirec);
+ xfs_defer_init(&dfops, &firstfsb);
+ rlen = del.br_blockcount;
+ error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1,
+ &firstfsb, &dfops);
+ if (error)
+ goto out_defer;
- /* Free the CoW orphan record. */
- error = xfs_refcount_free_cow_extent(tp->t_mountp,
- &dfops, uirec.br_startblock,
- uirec.br_blockcount);
- if (error)
- goto out_defer;
+ /* Trim the extent to whatever got unmapped. */
+ if (rlen) {
+ xfs_trim_extent(&del, del.br_startoff + rlen,
+ del.br_blockcount - rlen);
+ }
+ trace_xfs_reflink_cow_remap(ip, &del);
- /* Map the new blocks into the data fork. */
- error = xfs_bmap_map_extent(tp->t_mountp, &dfops,
- ip, &uirec);
- if (error)
- goto out_defer;
+ /* Free the CoW orphan record. */
+ error = xfs_refcount_free_cow_extent(tp->t_mountp, &dfops,
+ del.br_startblock, del.br_blockcount);
+ if (error)
+ goto out_defer;
- /* Remove the mapping from the CoW fork. */
- error = xfs_bunmapi_cow(ip, &uirec);
- if (error)
- goto out_defer;
+ /* Map the new blocks into the data fork. */
+ error = xfs_bmap_map_extent(tp->t_mountp, &dfops, ip, &del);
+ if (error)
+ goto out_defer;
- error = xfs_defer_finish(&tp, &dfops, ip);
- if (error)
- goto out_defer;
- }
+ /* Remove the mapping from the CoW fork. */
+ xfs_bmap_del_extent_cow(ip, &idx, &got, &del);
+
+ error = xfs_defer_finish(&tp, &dfops, ip);
+ if (error)
+ goto out_defer;
next_extent:
- /* Roll on... */
- offset_fsb = irec.br_startoff + ilen;
+ if (idx < 0)
+ break;
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &got);
}
error = xfs_trans_commit(tp);
@@ -774,7 +737,6 @@ next_extent:
out_defer:
xfs_defer_cancel(&dfops);
-out_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
out:
@@ -1312,19 +1274,26 @@ out_error:
*/
int
xfs_reflink_remap_range(
- struct xfs_inode *src,
- xfs_off_t srcoff,
- struct xfs_inode *dest,
- xfs_off_t destoff,
- xfs_off_t len,
- unsigned int flags)
+ struct file *file_in,
+ loff_t pos_in,
+ struct file *file_out,
+ loff_t pos_out,
+ u64 len,
+ bool is_dedupe)
{
+ struct inode *inode_in = file_inode(file_in);
+ struct xfs_inode *src = XFS_I(inode_in);
+ struct inode *inode_out = file_inode(file_out);
+ struct xfs_inode *dest = XFS_I(inode_out);
struct xfs_mount *mp = src->i_mount;
+ loff_t bs = inode_out->i_sb->s_blocksize;
+ bool same_inode = (inode_in == inode_out);
xfs_fileoff_t sfsbno, dfsbno;
xfs_filblks_t fsblen;
- int error;
xfs_extlen_t cowextsize;
- bool is_same;
+ loff_t isize;
+ ssize_t ret;
+ loff_t blen;
if (!xfs_sb_version_hasreflink(&mp->m_sb))
return -EOPNOTSUPP;
@@ -1332,17 +1301,8 @@ xfs_reflink_remap_range(
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- /* Don't reflink realtime inodes */
- if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
- return -EINVAL;
-
- if (flags & ~XFS_REFLINK_ALL)
- return -EINVAL;
-
- trace_xfs_reflink_remap_range(src, srcoff, len, dest, destoff);
-
/* Lock both files against IO */
- if (src->i_ino == dest->i_ino) {
+ if (same_inode) {
xfs_ilock(src, XFS_IOLOCK_EXCL);
xfs_ilock(src, XFS_MMAPLOCK_EXCL);
} else {
@@ -1350,39 +1310,126 @@ xfs_reflink_remap_range(
xfs_lock_two_inodes(src, dest, XFS_MMAPLOCK_EXCL);
}
+ /* Don't touch certain kinds of inodes */
+ ret = -EPERM;
+ if (IS_IMMUTABLE(inode_out))
+ goto out_unlock;
+
+ ret = -ETXTBSY;
+ if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
+ goto out_unlock;
+
+
+ /* Don't reflink dirs, pipes, sockets... */
+ ret = -EISDIR;
+ if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
+ goto out_unlock;
+ ret = -EINVAL;
+ if (S_ISFIFO(inode_in->i_mode) || S_ISFIFO(inode_out->i_mode))
+ goto out_unlock;
+ if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
+ goto out_unlock;
+
+ /* Don't reflink realtime inodes */
+ if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
+ goto out_unlock;
+
+ /* Don't share DAX file data for now. */
+ if (IS_DAX(inode_in) || IS_DAX(inode_out))
+ goto out_unlock;
+
+ /* Are we going all the way to the end? */
+ isize = i_size_read(inode_in);
+ if (isize == 0) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ if (len == 0)
+ len = isize - pos_in;
+
+ /* Ensure offsets don't wrap and the input is inside i_size */
+ if (pos_in + len < pos_in || pos_out + len < pos_out ||
+ pos_in + len > isize)
+ goto out_unlock;
+
+ /* Don't allow dedupe past EOF in the dest file */
+ if (is_dedupe) {
+ loff_t disize;
+
+ disize = i_size_read(inode_out);
+ if (pos_out >= disize || pos_out + len > disize)
+ goto out_unlock;
+ }
+
+ /* If we're linking to EOF, continue to the block boundary. */
+ if (pos_in + len == isize)
+ blen = ALIGN(isize, bs) - pos_in;
+ else
+ blen = len;
+
+ /* Only reflink if we're aligned to block boundaries */
+ if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
+ !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
+ goto out_unlock;
+
+ /* Don't allow overlapped reflink within the same file */
+ if (same_inode) {
+ if (pos_out + blen > pos_in && pos_out < pos_in + blen)
+ goto out_unlock;
+ }
+
+ /* Wait for the completion of any pending IOs on both files */
+ inode_dio_wait(inode_in);
+ if (!same_inode)
+ inode_dio_wait(inode_out);
+
+ ret = filemap_write_and_wait_range(inode_in->i_mapping,
+ pos_in, pos_in + len - 1);
+ if (ret)
+ goto out_unlock;
+
+ ret = filemap_write_and_wait_range(inode_out->i_mapping,
+ pos_out, pos_out + len - 1);
+ if (ret)
+ goto out_unlock;
+
+ trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
/*
* Check that the extents are the same.
*/
- if (flags & XFS_REFLINK_DEDUPE) {
- is_same = false;
- error = xfs_compare_extents(VFS_I(src), srcoff, VFS_I(dest),
- destoff, len, &is_same);
- if (error)
- goto out_error;
+ if (is_dedupe) {
+ bool is_same = false;
+
+ ret = xfs_compare_extents(inode_in, pos_in, inode_out, pos_out,
+ len, &is_same);
+ if (ret)
+ goto out_unlock;
if (!is_same) {
- error = -EBADE;
- goto out_error;
+ ret = -EBADE;
+ goto out_unlock;
}
}
- error = xfs_reflink_set_inode_flag(src, dest);
- if (error)
- goto out_error;
+ ret = xfs_reflink_set_inode_flag(src, dest);
+ if (ret)
+ goto out_unlock;
/*
* Invalidate the page cache so that we can clear any CoW mappings
* in the destination file.
*/
- truncate_inode_pages_range(&VFS_I(dest)->i_data, destoff,
- PAGE_ALIGN(destoff + len) - 1);
+ truncate_inode_pages_range(&inode_out->i_data, pos_out,
+ PAGE_ALIGN(pos_out + len) - 1);
- dfsbno = XFS_B_TO_FSBT(mp, destoff);
- sfsbno = XFS_B_TO_FSBT(mp, srcoff);
+ dfsbno = XFS_B_TO_FSBT(mp, pos_out);
+ sfsbno = XFS_B_TO_FSBT(mp, pos_in);
fsblen = XFS_B_TO_FSB(mp, len);
- error = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,
- destoff + len);
- if (error)
- goto out_error;
+ ret = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,
+ pos_out + len);
+ if (ret)
+ goto out_unlock;
/*
* Carry the cowextsize hint from src to dest if we're sharing the
@@ -1390,26 +1437,24 @@ xfs_reflink_remap_range(
* has a cowextsize hint, and the destination file does not.
*/
cowextsize = 0;
- if (srcoff == 0 && len == i_size_read(VFS_I(src)) &&
+ if (pos_in == 0 && len == i_size_read(inode_in) &&
(src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
- destoff == 0 && len >= i_size_read(VFS_I(dest)) &&
+ pos_out == 0 && len >= i_size_read(inode_out) &&
!(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
cowextsize = src->i_d.di_cowextsize;
- error = xfs_reflink_update_dest(dest, destoff + len, cowextsize);
- if (error)
- goto out_error;
+ ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize);
-out_error:
+out_unlock:
xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
xfs_iunlock(src, XFS_IOLOCK_EXCL);
if (src->i_ino != dest->i_ino) {
xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
xfs_iunlock(dest, XFS_IOLOCK_EXCL);
}
- if (error)
- trace_xfs_reflink_remap_range_error(dest, error, _RET_IP_);
- return error;
+ if (ret)
+ trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
+ return ret;
}
/*
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 5dc3c8ac12aa..fad11607c9ad 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -26,8 +26,8 @@ extern int xfs_reflink_find_shared(struct xfs_mount *mp, xfs_agnumber_t agno,
extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip,
struct xfs_bmbt_irec *irec, bool *shared, bool *trimmed);
-extern int xfs_reflink_reserve_cow_range(struct xfs_inode *ip,
- xfs_off_t offset, xfs_off_t count);
+extern int xfs_reflink_reserve_cow(struct xfs_inode *ip,
+ struct xfs_bmbt_irec *imap, bool *shared);
extern int xfs_reflink_allocate_cow_range(struct xfs_inode *ip,
xfs_off_t offset, xfs_off_t count);
extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset,
@@ -43,11 +43,8 @@ extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t count);
extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
-#define XFS_REFLINK_DEDUPE 1 /* only reflink if contents match */
-#define XFS_REFLINK_ALL (XFS_REFLINK_DEDUPE)
-extern int xfs_reflink_remap_range(struct xfs_inode *src, xfs_off_t srcoff,
- struct xfs_inode *dest, xfs_off_t destoff, xfs_off_t len,
- unsigned int flags);
+extern int xfs_reflink_remap_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, u64 len, bool is_dedupe);
extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip,
struct xfs_trans **tpp);
extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset,
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index 5f8d55d29a11..276d3023d60f 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -512,13 +512,13 @@ static struct attribute *xfs_error_attrs[] = {
};
-struct kobj_type xfs_error_cfg_ktype = {
+static struct kobj_type xfs_error_cfg_ktype = {
.release = xfs_sysfs_release,
.sysfs_ops = &xfs_sysfs_ops,
.default_attrs = xfs_error_attrs,
};
-struct kobj_type xfs_error_ktype = {
+static struct kobj_type xfs_error_ktype = {
.release = xfs_sysfs_release,
.sysfs_ops = &xfs_sysfs_ops,
};
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index ad188d3a83f3..0907752be62d 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -3346,7 +3346,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
-DEFINE_RW_EVENT(xfs_reflink_reserve_cow_range);
+DEFINE_RW_EVENT(xfs_reflink_reserve_cow);
DEFINE_RW_EVENT(xfs_reflink_allocate_cow_range);
DEFINE_INODE_IREC_EVENT(xfs_reflink_bounce_dio_write);
@@ -3356,9 +3356,7 @@ DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range);
DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow);
DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap);
-DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_piece);
-DEFINE_INODE_ERROR_EVENT(xfs_reflink_reserve_cow_range_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_allocate_cow_range_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error);
DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error);
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h
index 17a940a14477..8caa79c61703 100644
--- a/include/acpi/pcc.h
+++ b/include/acpi/pcc.h
@@ -21,7 +21,7 @@ extern void pcc_mbox_free_channel(struct mbox_chan *chan);
static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
int subspace_id)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { }
#endif
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
new file mode 100644
index 000000000000..63554e9f6e0c
--- /dev/null
+++ b/include/asm-generic/export.h
@@ -0,0 +1,94 @@
+#ifndef __ASM_GENERIC_EXPORT_H
+#define __ASM_GENERIC_EXPORT_H
+
+#ifndef KSYM_FUNC
+#define KSYM_FUNC(x) x
+#endif
+#ifdef CONFIG_64BIT
+#define __put .quad
+#ifndef KSYM_ALIGN
+#define KSYM_ALIGN 8
+#endif
+#ifndef KCRC_ALIGN
+#define KCRC_ALIGN 8
+#endif
+#else
+#define __put .long
+#ifndef KSYM_ALIGN
+#define KSYM_ALIGN 4
+#endif
+#ifndef KCRC_ALIGN
+#define KCRC_ALIGN 4
+#endif
+#endif
+
+#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
+#define KSYM(name) _##name
+#else
+#define KSYM(name) name
+#endif
+
+/*
+ * note on .section use: @progbits vs %progbits nastiness doesn't matter,
+ * since we immediately emit into those sections anyway.
+ */
+.macro ___EXPORT_SYMBOL name,val,sec
+#ifdef CONFIG_MODULES
+ .globl KSYM(__ksymtab_\name)
+ .section ___ksymtab\sec+\name,"a"
+ .balign KSYM_ALIGN
+KSYM(__ksymtab_\name):
+ __put \val, KSYM(__kstrtab_\name)
+ .previous
+ .section __ksymtab_strings,"a"
+KSYM(__kstrtab_\name):
+#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
+ .asciz "_\name"
+#else
+ .asciz "\name"
+#endif
+ .previous
+#ifdef CONFIG_MODVERSIONS
+ .section ___kcrctab\sec+\name,"a"
+ .balign KCRC_ALIGN
+KSYM(__kcrctab_\name):
+ __put KSYM(__crc_\name)
+ .weak KSYM(__crc_\name)
+ .previous
+#endif
+#endif
+.endm
+#undef __put
+
+#if defined(__KSYM_DEPS__)
+
+#define __EXPORT_SYMBOL(sym, val, sec) === __KSYM_##sym ===
+
+#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+
+#include <linux/kconfig.h>
+#include <generated/autoksyms.h>
+
+#define __EXPORT_SYMBOL(sym, val, sec) \
+ __cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
+#define __cond_export_sym(sym, val, sec, conf) \
+ ___cond_export_sym(sym, val, sec, conf)
+#define ___cond_export_sym(sym, val, sec, enabled) \
+ __cond_export_sym_##enabled(sym, val, sec)
+#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
+#define __cond_export_sym_0(sym, val, sec) /* nothing */
+
+#else
+#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
+#endif
+
+#define EXPORT_SYMBOL(name) \
+ __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),)
+#define EXPORT_SYMBOL_GPL(name) \
+ __EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl)
+#define EXPORT_DATA_SYMBOL(name) \
+ __EXPORT_SYMBOL(name, KSYM(name),)
+#define EXPORT_DATA_SYMBOL_GPL(name) \
+ __EXPORT_SYMBOL(name, KSYM(name),_gpl)
+
+#endif
diff --git a/include/asm-generic/libata-portmap.h b/include/asm-generic/libata-portmap.h
deleted file mode 100644
index cf14f2ff40b6..000000000000
--- a/include/asm-generic/libata-portmap.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_GENERIC_LIBATA_PORTMAP_H
-#define __ASM_GENERIC_LIBATA_PORTMAP_H
-
-#define ATA_PRIMARY_IRQ(dev) 14
-#define ATA_SECONDARY_IRQ(dev) 15
-
-#endif
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 4d9f233c4ba8..40e887068da2 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -65,6 +65,11 @@ extern void setup_per_cpu_areas(void);
#define PER_CPU_DEF_ATTRIBUTES
#endif
+#define raw_cpu_generic_read(pcp) \
+({ \
+ *raw_cpu_ptr(&(pcp)); \
+})
+
#define raw_cpu_generic_to_op(pcp, val, op) \
do { \
*raw_cpu_ptr(&(pcp)) op val; \
@@ -72,34 +77,39 @@ do { \
#define raw_cpu_generic_add_return(pcp, val) \
({ \
- raw_cpu_add(pcp, val); \
- raw_cpu_read(pcp); \
+ typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
+ \
+ *__p += val; \
+ *__p; \
})
#define raw_cpu_generic_xchg(pcp, nval) \
({ \
+ typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
typeof(pcp) __ret; \
- __ret = raw_cpu_read(pcp); \
- raw_cpu_write(pcp, nval); \
+ __ret = *__p; \
+ *__p = nval; \
__ret; \
})
#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
+ typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
typeof(pcp) __ret; \
- __ret = raw_cpu_read(pcp); \
+ __ret = *__p; \
if (__ret == (oval)) \
- raw_cpu_write(pcp, nval); \
+ *__p = nval; \
__ret; \
})
#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
+ typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1)); \
+ typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2)); \
int __ret = 0; \
- if (raw_cpu_read(pcp1) == (oval1) && \
- raw_cpu_read(pcp2) == (oval2)) { \
- raw_cpu_write(pcp1, nval1); \
- raw_cpu_write(pcp2, nval2); \
+ if (*__p1 == (oval1) && *__p2 == (oval2)) { \
+ *__p1 = nval1; \
+ *__p2 = nval2; \
__ret = 1; \
} \
(__ret); \
@@ -109,7 +119,7 @@ do { \
({ \
typeof(pcp) __ret; \
preempt_disable(); \
- __ret = *this_cpu_ptr(&(pcp)); \
+ __ret = raw_cpu_generic_read(pcp); \
preempt_enable(); \
__ret; \
})
@@ -118,17 +128,17 @@ do { \
do { \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- *raw_cpu_ptr(&(pcp)) op val; \
+ raw_cpu_generic_to_op(pcp, val, op); \
raw_local_irq_restore(__flags); \
} while (0)
+
#define this_cpu_generic_add_return(pcp, val) \
({ \
typeof(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- raw_cpu_add(pcp, val); \
- __ret = raw_cpu_read(pcp); \
+ __ret = raw_cpu_generic_add_return(pcp, val); \
raw_local_irq_restore(__flags); \
__ret; \
})
@@ -138,8 +148,7 @@ do { \
typeof(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- __ret = raw_cpu_read(pcp); \
- raw_cpu_write(pcp, nval); \
+ __ret = raw_cpu_generic_xchg(pcp, nval); \
raw_local_irq_restore(__flags); \
__ret; \
})
@@ -149,9 +158,7 @@ do { \
typeof(pcp) __ret; \
unsigned long __flags; \
raw_local_irq_save(__flags); \
- __ret = raw_cpu_read(pcp); \
- if (__ret == (oval)) \
- raw_cpu_write(pcp, nval); \
+ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \
raw_local_irq_restore(__flags); \
__ret; \
})
@@ -168,16 +175,16 @@ do { \
})
#ifndef raw_cpu_read_1
-#define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp)))
+#define raw_cpu_read_1(pcp) raw_cpu_generic_read(pcp)
#endif
#ifndef raw_cpu_read_2
-#define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp)))
+#define raw_cpu_read_2(pcp) raw_cpu_generic_read(pcp)
#endif
#ifndef raw_cpu_read_4
-#define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp)))
+#define raw_cpu_read_4(pcp) raw_cpu_generic_read(pcp)
#endif
#ifndef raw_cpu_read_8
-#define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp)))
+#define raw_cpu_read_8(pcp) raw_cpu_generic_read(pcp)
#endif
#ifndef raw_cpu_write_1
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 3e42bcdd014b..30747960bc54 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -196,9 +196,14 @@
*(.dtb.init.rodata) \
VMLINUX_SYMBOL(__dtb_end) = .;
-/* .data section */
+/*
+ * .data section
+ * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
+ * .data.identifier which needs to be pulled in with .data, but don't want to
+ * pull in .data..stuff which has its own requirements. Same for bss.
+ */
#define DATA_DATA \
- *(.data) \
+ *(.data .data.[0-9a-zA-Z_]*) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data) \
@@ -320,76 +325,76 @@
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
- *(SORT(___ksymtab+*)) \
+ KEEP(*(SORT(___ksymtab+*))) \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
- *(SORT(___ksymtab_gpl+*)) \
+ KEEP(*(SORT(___ksymtab_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
- *(SORT(___ksymtab_unused+*)) \
+ KEEP(*(SORT(___ksymtab_unused+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
- *(SORT(___ksymtab_unused_gpl+*)) \
+ KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
- *(SORT(___ksymtab_gpl_future+*)) \
+ KEEP(*(SORT(___ksymtab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab) = .; \
- *(SORT(___kcrctab+*)) \
+ KEEP(*(SORT(___kcrctab+*))) \
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
- *(SORT(___kcrctab_gpl+*)) \
+ KEEP(*(SORT(___kcrctab_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
- *(SORT(___kcrctab_unused+*)) \
+ KEEP(*(SORT(___kcrctab_unused+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
- *(SORT(___kcrctab_unused_gpl+*)) \
+ KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
- *(SORT(___kcrctab_gpl_future+*)) \
+ KEEP(*(SORT(___kcrctab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
} \
\
/* Kernel symbol table: strings */ \
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
- *(__ksymtab_strings) \
+ KEEP(*(__ksymtab_strings)) \
} \
\
/* __*init sections */ \
@@ -424,12 +429,17 @@
#define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
- *(.security_initcall.init) \
+ KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .; \
}
/* .text section. Map to function alignment to avoid address changes
- * during second ld run in second ld pass when generating System.map */
+ * during second ld run in second ld pass when generating System.map
+ * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
+ * .text.identifier which needs to be pulled in with .text , but some
+ * architectures define .text.foo which is not intended to be pulled in here.
+ * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
+ * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
#define TEXT_TEXT \
ALIGN_FUNCTION(); \
*(.text.hot .text .text.fixup .text.unlikely) \
@@ -533,6 +543,7 @@
/* init and exit section handling */
#define INIT_DATA \
+ KEEP(*(SORT(___kentry+*))) \
*(.init.data) \
MEM_DISCARD(init.data) \
KERNEL_CTORS() \
@@ -599,7 +610,7 @@
BSS_FIRST_SECTIONS \
*(.bss..page_aligned) \
*(.dynbss) \
- *(.bss) \
+ *(.bss .bss.[0-9a-zA-Z_]*) \
*(COMMON) \
}
@@ -682,12 +693,12 @@
#define INIT_CALLS_LEVEL(level) \
VMLINUX_SYMBOL(__initcall##level##_start) = .; \
- *(.initcall##level##.init) \
- *(.initcall##level##s.init) \
+ KEEP(*(.initcall##level##.init)) \
+ KEEP(*(.initcall##level##s.init)) \
#define INIT_CALLS \
VMLINUX_SYMBOL(__initcall_start) = .; \
- *(.initcallearly.init) \
+ KEEP(*(.initcallearly.init)) \
INIT_CALLS_LEVEL(0) \
INIT_CALLS_LEVEL(1) \
INIT_CALLS_LEVEL(2) \
@@ -701,21 +712,21 @@
#define CON_INITCALL \
VMLINUX_SYMBOL(__con_initcall_start) = .; \
- *(.con_initcall.init) \
+ KEEP(*(.con_initcall.init)) \
VMLINUX_SYMBOL(__con_initcall_end) = .;
#define SECURITY_INITCALL \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
- *(.security_initcall.init) \
+ KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .;
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
VMLINUX_SYMBOL(__initramfs_start) = .; \
- *(.init.ramfs) \
+ KEEP(*(.init.ramfs)) \
. = ALIGN(8); \
- *(.init.ramfs.info)
+ KEEP(*(.init.ramfs.info))
#else
#define INIT_RAM_FS
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 94afcb2c384c..689a8b9b9c8f 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -326,6 +326,7 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);
@@ -946,9 +947,17 @@ struct acpi_reference_args {
#ifdef CONFIG_ACPI
int acpi_dev_get_property(struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj);
-int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
- const char *name, size_t index,
- struct acpi_reference_args *args);
+int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+ const char *name, size_t index, size_t num_args,
+ struct acpi_reference_args *args);
+
+static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+ const char *name, size_t index,
+ struct acpi_reference_args *args)
+{
+ return __acpi_node_get_property_reference(fwnode, name, index,
+ MAX_ACPI_REFERENCE_ARGS, args);
+}
int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
void **valptr);
@@ -1024,6 +1033,14 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
return -ENXIO;
}
+static inline int
+__acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+ const char *name, size_t index, size_t num_args,
+ struct acpi_reference_args *args)
+{
+ return -ENXIO;
+}
+
static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode,
const char *name, size_t index,
struct acpi_reference_args *args)
diff --git a/include/linux/ata.h b/include/linux/ata.h
index adbc812c009b..fdb180367ba1 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -105,6 +105,7 @@ enum {
ATA_ID_CFA_KEY_MGMT = 162,
ATA_ID_CFA_MODES = 163,
ATA_ID_DATA_SET_MGMT = 169,
+ ATA_ID_SCT_CMD_XPORT = 206,
ATA_ID_ROT_SPEED = 217,
ATA_ID_PIO4 = (1 << 1),
@@ -789,6 +790,48 @@ static inline bool ata_id_sense_reporting_enabled(const u16 *id)
}
/**
+ *
+ * Word: 206 - SCT Command Transport
+ * 15:12 - Vendor Specific
+ * 11:6 - Reserved
+ * 5 - SCT Command Transport Data Tables supported
+ * 4 - SCT Command Transport Features Control supported
+ * 3 - SCT Command Transport Error Recovery Control supported
+ * 2 - SCT Command Transport Write Same supported
+ * 1 - SCT Command Transport Long Sector Access supported
+ * 0 - SCT Command Transport supported
+ */
+static inline bool ata_id_sct_data_tables(const u16 *id)
+{
+ return id[ATA_ID_SCT_CMD_XPORT] & (1 << 5) ? true : false;
+}
+
+static inline bool ata_id_sct_features_ctrl(const u16 *id)
+{
+ return id[ATA_ID_SCT_CMD_XPORT] & (1 << 4) ? true : false;
+}
+
+static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id)
+{
+ return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false;
+}
+
+static inline bool ata_id_sct_write_same(const u16 *id)
+{
+ return id[ATA_ID_SCT_CMD_XPORT] & (1 << 2) ? true : false;
+}
+
+static inline bool ata_id_sct_long_sector_access(const u16 *id)
+{
+ return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false;
+}
+
+static inline bool ata_id_sct_supported(const u16 *id)
+{
+ return id[ATA_ID_SCT_CMD_XPORT] & (1 << 0) ? true : false;
+}
+
+/**
* ata_id_major_version - get ATA level of drive
* @id: Identify data
*
@@ -1071,32 +1114,6 @@ static inline void ata_id_to_hd_driveid(u16 *id)
#endif
}
-/*
- * Write LBA Range Entries to the buffer that will cover the extent from
- * sector to sector + count. This is used for TRIM and for ADD LBA(S)
- * TO NV CACHE PINNED SET.
- */
-static inline unsigned ata_set_lba_range_entries(void *_buffer,
- unsigned num, u64 sector, unsigned long count)
-{
- __le64 *buffer = _buffer;
- unsigned i = 0, used_bytes;
-
- while (i < num) {
- u64 entry = sector |
- ((u64)(count > 0xffff ? 0xffff : count) << 48);
- buffer[i++] = __cpu_to_le64(entry);
- if (count <= 0xffff)
- break;
- count -= 0xffff;
- sector += 0xffff;
- }
-
- used_bytes = ALIGN(i * 8, 512);
- memset(buffer + i, 0, used_bytes - i * 8);
- return used_bytes;
-}
-
static inline bool ata_ok(u8 status)
{
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index cbdbf34de5b6..3bf5d33800ab 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -343,16 +343,7 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
*/
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
{
- char *p;
-
- p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
- if (!p) {
- strncpy(buf, "<unavailable>", buflen);
- return -ENAMETOOLONG;
- }
-
- memmove(buf, p, buf + buflen - p);
- return 0;
+ return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
}
/**
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 440a72164a11..c83c23f0577b 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -97,7 +97,7 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts);
void cgroup_file_notify(struct cgroup_file *cfile);
-char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
+int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk);
@@ -555,8 +555,7 @@ static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
return kernfs_name(cgrp->kn, buf, buflen);
}
-static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
- size_t buflen)
+static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
{
return kernfs_path(cgrp->kn, buf, buflen);
}
@@ -658,8 +657,8 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
struct user_namespace *user_ns,
struct cgroup_namespace *old_ns);
-char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
- struct cgroup_namespace *ns);
+int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+ struct cgroup_namespace *ns);
#else /* !CONFIG_CGROUPS */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index af596381fa0f..a428aec36ace 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -785,7 +785,7 @@ extern struct of_device_id __clk_of_table;
* routines, one at of_clk_init(), and one at platform device probe
*/
#define CLK_OF_DECLARE_DRIVER(name, compat, fn) \
- static void name##_of_clk_init_driver(struct device_node *np) \
+ static void __init name##_of_clk_init_driver(struct device_node *np) \
{ \
of_node_clear_flag(np, OF_POPULATED); \
fn(np); \
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 573c5a18908f..432f5c97e18f 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -188,6 +188,13 @@
#endif /* GCC_VERSION >= 40300 */
#if GCC_VERSION >= 40500
+
+#ifndef __CHECKER__
+#ifdef LATENT_ENTROPY_PLUGIN
+#define __latent_entropy __attribute__((latent_entropy))
+#endif
+#endif
+
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 668569844d37..cf0fa5d86059 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -182,6 +182,29 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define unreachable() do { } while (1)
#endif
+/*
+ * KENTRY - kernel entry point
+ * This can be used to annotate symbols (functions or data) that are used
+ * without their linker symbol being referenced explicitly. For example,
+ * interrupt vector handlers, or functions in the kernel image that are found
+ * programatically.
+ *
+ * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
+ * are handled in their own way (with KEEP() in linker scripts).
+ *
+ * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
+ * linker script. For example an architecture could KEEP() its entire
+ * boot/exception vector code rather than annotate each function and data.
+ */
+#ifndef KENTRY
+# define KENTRY(sym) \
+ extern typeof(sym) sym; \
+ static const unsigned long __kentry_##sym \
+ __used \
+ __attribute__((section("___kentry" "+" #sym ), used)) \
+ = (unsigned long)&sym;
+#endif
+
#ifndef RELOC_HIDE
# define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
@@ -406,6 +429,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
# define __attribute_const__ /* unimplemented */
#endif
+#ifndef __latent_entropy
+# define __latent_entropy
+#endif
+
/*
* Tell gcc if a function is cold. The compiler will assume any path
* directly leading to the call is unlikely.
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 631ba33bbe9f..32dc0cbd51ca 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -639,19 +639,19 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *best = table - 1;
unsigned int freq;
- int i, best = -1;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- freq = table[i].frequency;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
if (freq >= target_freq)
- return i;
+ return pos - table;
- best = i;
+ best = pos;
}
- return best;
+ return best - table;
}
/* Find lowest freq at or above target in a table in descending order */
@@ -659,28 +659,28 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *best = table - 1;
unsigned int freq;
- int i, best = -1;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- freq = table[i].frequency;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
if (freq == target_freq)
- return i;
+ return pos - table;
if (freq > target_freq) {
- best = i;
+ best = pos;
continue;
}
/* No freq found above target_freq */
- if (best == -1)
- return i;
+ if (best == table - 1)
+ return pos - table;
- return best;
+ return best - table;
}
- return best;
+ return best - table;
}
/* Works only on sorted freq-tables */
@@ -700,28 +700,28 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *best = table - 1;
unsigned int freq;
- int i, best = -1;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- freq = table[i].frequency;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
if (freq == target_freq)
- return i;
+ return pos - table;
if (freq < target_freq) {
- best = i;
+ best = pos;
continue;
}
/* No freq found below target_freq */
- if (best == -1)
- return i;
+ if (best == table - 1)
+ return pos - table;
- return best;
+ return best - table;
}
- return best;
+ return best - table;
}
/* Find highest freq at or below target in a table in descending order */
@@ -729,19 +729,19 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *best = table - 1;
unsigned int freq;
- int i, best = -1;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- freq = table[i].frequency;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
if (freq <= target_freq)
- return i;
+ return pos - table;
- best = i;
+ best = pos;
}
- return best;
+ return best - table;
}
/* Works only on sorted freq-tables */
@@ -761,32 +761,32 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *best = table - 1;
unsigned int freq;
- int i, best = -1;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- freq = table[i].frequency;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
if (freq == target_freq)
- return i;
+ return pos - table;
if (freq < target_freq) {
- best = i;
+ best = pos;
continue;
}
/* No freq found below target_freq */
- if (best == -1)
- return i;
+ if (best == table - 1)
+ return pos - table;
/* Choose the closest freq */
- if (target_freq - table[best].frequency > freq - target_freq)
- return i;
+ if (target_freq - best->frequency > freq - target_freq)
+ return pos - table;
- return best;
+ return best - table;
}
- return best;
+ return best - table;
}
/* Find closest freq to target in a table in descending order */
@@ -794,32 +794,32 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
+ struct cpufreq_frequency_table *pos, *best = table - 1;
unsigned int freq;
- int i, best = -1;
- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
- freq = table[i].frequency;
+ cpufreq_for_each_valid_entry(pos, table) {
+ freq = pos->frequency;
if (freq == target_freq)
- return i;
+ return pos - table;
if (freq > target_freq) {
- best = i;
+ best = pos;
continue;
}
/* No freq found above target_freq */
- if (best == -1)
- return i;
+ if (best == table - 1)
+ return pos - table;
/* Choose the closest freq */
- if (table[best].frequency - target_freq > target_freq - freq)
- return i;
+ if (best->frequency - target_freq > target_freq - freq)
+ return pos - table;
- return best;
+ return best - table;
}
- return best;
+ return best - table;
}
/* Works only on sorted freq-tables */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 9b207a8c5af3..afe641c02dca 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -81,6 +81,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_DUMMY_TIMER_STARTING,
+ CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_METAG_TIMER_STARTING,
diff --git a/include/linux/export.h b/include/linux/export.h
index d7df4922da1d..2a0f61fbc731 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -1,5 +1,6 @@
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
+
/*
* Export symbols from the kernel to modules. Forked from module.h
* to reduce the amount of pointless cruft we feed to gcc when only
@@ -42,27 +43,26 @@ extern struct module __this_module;
#ifdef CONFIG_MODVERSIONS
/* Mark the CRC weak since genksyms apparently decides not to
* generate a checksums for some symbols */
-#define __CRC_SYMBOL(sym, sec) \
- extern __visible void *__crc_##sym __attribute__((weak)); \
- static const unsigned long __kcrctab_##sym \
- __used \
- __attribute__((section("___kcrctab" sec "+" #sym), unused)) \
+#define __CRC_SYMBOL(sym, sec) \
+ extern __visible void *__crc_##sym __attribute__((weak)); \
+ static const unsigned long __kcrctab_##sym \
+ __used \
+ __attribute__((section("___kcrctab" sec "+" #sym), used)) \
= (unsigned long) &__crc_##sym;
#else
#define __CRC_SYMBOL(sym, sec)
#endif
/* For every exported symbol, place a struct in the __ksymtab section */
-#define ___EXPORT_SYMBOL(sym, sec) \
- extern typeof(sym) sym; \
- __CRC_SYMBOL(sym, sec) \
- static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), aligned(1))) \
- = VMLINUX_SYMBOL_STR(sym); \
- extern const struct kernel_symbol __ksymtab_##sym; \
- __visible const struct kernel_symbol __ksymtab_##sym \
- __used \
- __attribute__((section("___ksymtab" sec "+" #sym), unused)) \
+#define ___EXPORT_SYMBOL(sym, sec) \
+ extern typeof(sym) sym; \
+ __CRC_SYMBOL(sym, sec) \
+ static const char __kstrtab_##sym[] \
+ __attribute__((section("__ksymtab_strings"), aligned(1))) \
+ = VMLINUX_SYMBOL_STR(sym); \
+ static const struct kernel_symbol __ksymtab_##sym \
+ __used \
+ __attribute__((section("___ksymtab" sec "+" #sym), used)) \
= { (unsigned long)&sym, __kstrtab_##sym }
#if defined(__KSYM_DEPS__)
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index aca2a6a1d035..6e84b2cae6ad 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -105,7 +105,7 @@ struct files_struct *get_files_struct(struct task_struct *);
void put_files_struct(struct files_struct *fs);
void reset_files_struct(struct files_struct *);
int unshare_files(struct files_struct **);
-struct files_struct *dup_fd(struct files_struct *, int *);
+struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
void do_close_on_exec(struct files_struct *);
int iterate_fd(struct files_struct *, unsigned,
int (*)(const void *, struct file *, unsigned),
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bc65d5918140..16d2b6e874d6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2934,6 +2934,7 @@ extern int vfs_stat(const char __user *, struct kstat *);
extern int vfs_lstat(const char __user *, struct kstat *);
extern int vfs_fstat(unsigned int, struct kstat *);
extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
+extern const char *vfs_get_link(struct dentry *, struct delayed_call *);
extern int __generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 1dbf52f9c24b..e0341af6950e 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -437,7 +437,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
/* drivers/char/random.c */
-extern void add_disk_randomness(struct gendisk *disk);
+extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
extern void rand_initialize_disk(struct gendisk *disk);
static inline sector_t get_start_sect(struct block_device *bdev)
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 6824556d37ed..cd184bdca58f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1169,13 +1169,6 @@ int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
const char *mod_name);
void vmbus_driver_unregister(struct hv_driver *hv_driver);
-static inline const char *vmbus_dev_name(const struct hv_device *device_obj)
-{
- const struct kobject *kobj = &device_obj->device.kobj;
-
- return kobj->name;
-}
-
void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
diff --git a/include/linux/init.h b/include/linux/init.h
index 5a3321a7909b..e30104ceb86d 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -39,7 +39,7 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold notrace
+#define __init __section(.init.text) __cold notrace __latent_entropy
#define __initdata __section(.init.data)
#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)
@@ -75,7 +75,8 @@
#define __exit __section(.exit.text) __exitused __cold notrace
/* Used for MEMORY_HOTPLUG */
-#define __meminit __section(.meminit.text) __cold notrace
+#define __meminit __section(.meminit.text) __cold notrace \
+ __latent_entropy
#define __meminitdata __section(.meminit.data)
#define __meminitconst __section(.meminit.rodata)
#define __memexit __section(.memexit.text) __exitused __cold notrace
@@ -139,24 +140,8 @@ extern bool initcall_debug;
#ifndef __ASSEMBLY__
-#ifdef CONFIG_LTO
-/* Work around a LTO gcc problem: when there is no reference to a variable
- * in a module it will be moved to the end of the program. This causes
- * reordering of initcalls which the kernel does not like.
- * Add a dummy reference function to avoid this. The function is
- * deleted by the linker.
- */
-#define LTO_REFERENCE_INITCALL(x) \
- ; /* yes this is needed */ \
- static __used __exit void *reference_##x(void) \
- { \
- return &x; \
- }
-#else
-#define LTO_REFERENCE_INITCALL(x)
-#endif
-
-/* initcalls are now grouped by functionality into separate
+/*
+ * initcalls are now grouped by functionality into separate
* subsections. Ordering inside the subsections is determined
* by link order.
* For backwards compatibility, initcall() puts the call in
@@ -164,12 +149,16 @@ extern bool initcall_debug;
*
* The `id' arg to __define_initcall() is needed so that multiple initcalls
* can point at the same handler without causing duplicate-symbol build errors.
+ *
+ * Initcalls are run by placing pointers in initcall sections that the
+ * kernel iterates at runtime. The linker can do dead code / data elimination
+ * and remove that completely, so the initcall sections have to be marked
+ * as KEEP() in the linker script.
*/
#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used \
- __attribute__((__section__(".initcall" #id ".init"))) = fn; \
- LTO_REFERENCE_INITCALL(__initcall_##fn##id)
+ __attribute__((__section__(".initcall" #id ".init"))) = fn;
/*
* Early initcalls run before initializing SMP.
@@ -205,15 +194,15 @@ extern bool initcall_debug;
#define __initcall(fn) device_initcall(fn)
-#define __exitcall(fn) \
+#define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn
-#define console_initcall(fn) \
- static initcall_t __initcall_##fn \
+#define console_initcall(fn) \
+ static initcall_t __initcall_##fn \
__used __section(.con_initcall.init) = fn
-#define security_initcall(fn) \
- static initcall_t __initcall_##fn \
+#define security_initcall(fn) \
+ static initcall_t __initcall_##fn \
__used __section(.security_initcall.init) = fn
struct obs_kernel_param {
diff --git a/include/linux/io.h b/include/linux/io.h
index e2c8419278c1..82ef36eac8a1 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -141,4 +141,26 @@ enum {
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
void memunmap(void *addr);
+/*
+ * On x86 PAT systems we have memory tracking that keeps track of
+ * the allowed mappings on memory ranges. This tracking works for
+ * all the in-kernel mapping APIs (ioremap*), but where the user
+ * wishes to map a range from a physical device into user memory
+ * the tracking won't be updated. This API is to be used by
+ * drivers which remap physical device pages into userspace,
+ * and wants to make sure they are mapped WC and not UC.
+ */
+#ifndef arch_io_reserve_memtype_wc
+static inline int arch_io_reserve_memtype_wc(resource_size_t base,
+ resource_size_t size)
+{
+ return 0;
+}
+
+static inline void arch_io_free_memtype_wc(resource_size_t base,
+ resource_size_t size)
+{
+}
+#endif
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index e63e288dee83..7892f55a1866 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -19,11 +19,15 @@ struct vm_fault;
#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
/*
- * Flags for iomap mappings:
+ * Flags for all iomap mappings:
*/
-#define IOMAP_F_MERGED 0x01 /* contains multiple blocks/extents */
-#define IOMAP_F_SHARED 0x02 /* block shared with another file */
-#define IOMAP_F_NEW 0x04 /* blocks have been newly allocated */
+#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
+
+/*
+ * Flags that only need to be reported for IOMAP_REPORT requests:
+ */
+#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
+#define IOMAP_F_SHARED 0x20 /* block shared with another file */
/*
* Magic value for blkno:
@@ -42,8 +46,9 @@ struct iomap {
/*
* Flags for iomap_begin / iomap_end. No flag implies a read.
*/
-#define IOMAP_WRITE (1 << 0)
-#define IOMAP_ZERO (1 << 1)
+#define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */
+#define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */
+#define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */
struct iomap_ops {
/*
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 7e9a789be5e0..ca1ad9ebbc92 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -123,12 +123,12 @@ struct inet6_skb_parm {
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
-static inline bool skb_l3mdev_slave(__u16 flags)
+static inline bool ipv6_l3mdev_skb(__u16 flags)
{
return flags & IP6SKB_L3SLAVE;
}
#else
-static inline bool skb_l3mdev_slave(__u16 flags)
+static inline bool ipv6_l3mdev_skb(__u16 flags)
{
return false;
}
@@ -139,11 +139,22 @@ static inline bool skb_l3mdev_slave(__u16 flags)
static inline int inet6_iif(const struct sk_buff *skb)
{
- bool l3_slave = skb_l3mdev_slave(IP6CB(skb)->flags);
+ bool l3_slave = ipv6_l3mdev_skb(IP6CB(skb)->flags);
return l3_slave ? skb->skb_iif : IP6CB(skb)->iif;
}
+/* can not be used in TCP layer after tcp_v6_fill_cb */
+static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
+{
+#if defined(CONFIG_NET_L3_MASTER_DEV)
+ if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
+ ipv6_l3mdev_skb(IP6CB(skb)->flags))
+ return true;
+#endif
+ return false;
+}
+
struct tcp6_request_sock {
struct tcp_request_sock tcp6rsk_tcp;
};
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 8361c8d3edd1..b7e34313cdfe 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -290,7 +290,7 @@
#define GITS_BASER_TYPE_SHIFT (56)
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
-#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
+#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d600303306eb..820c0ad54a01 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -44,6 +44,7 @@ static inline void kasan_disable_current(void)
void kasan_unpoison_shadow(const void *address, size_t size);
void kasan_unpoison_task_stack(struct task_struct *task);
+void kasan_unpoison_stack_above_sp_to(const void *watermark);
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
@@ -85,6 +86,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache);
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
static inline void kasan_enable_current(void) {}
static inline void kasan_disable_current(void) {}
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index 15ec117ec537..8f2e059e4d45 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -31,7 +31,6 @@
* When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
* the last step cherry picks the 2nd arg, we get a zero.
*/
-#define config_enabled(cfg) ___is_defined(cfg)
#define __is_defined(x) ___is_defined(x)
#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
@@ -41,13 +40,13 @@
* otherwise. For boolean options, this is equivalent to
* IS_ENABLED(CONFIG_FOO).
*/
-#define IS_BUILTIN(option) config_enabled(option)
+#define IS_BUILTIN(option) __is_defined(option)
/*
* IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
* otherwise.
*/
-#define IS_MODULE(option) config_enabled(option##_MODULE)
+#define IS_MODULE(option) __is_defined(option##_MODULE)
/*
* IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 96356ef012de..7056238fd9f5 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -269,10 +269,8 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
}
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
-size_t kernfs_path_len(struct kernfs_node *kn);
int kernfs_path_from_node(struct kernfs_node *root_kn, struct kernfs_node *kn,
char *buf, size_t buflen);
-char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen);
void pr_cont_kernfs_name(struct kernfs_node *kn);
void pr_cont_kernfs_path(struct kernfs_node *kn);
struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
@@ -341,12 +339,10 @@ static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
{ return -ENOSYS; }
-static inline size_t kernfs_path_len(struct kernfs_node *kn)
-{ return 0; }
-
-static inline char *kernfs_path(struct kernfs_node *kn, char *buf,
- size_t buflen)
-{ return NULL; }
+static inline int kernfs_path_from_node(struct kernfs_node *root_kn,
+ struct kernfs_node *kn,
+ char *buf, size_t buflen)
+{ return -ENOSYS; }
static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { }
static inline void pr_cont_kernfs_path(struct kernfs_node *kn) { }
@@ -436,6 +432,22 @@ static inline void kernfs_init(void) { }
#endif /* CONFIG_KERNFS */
+/**
+ * kernfs_path - build full path of a given node
+ * @kn: kernfs_node of interest
+ * @buf: buffer to copy @kn's name into
+ * @buflen: size of @buf
+ *
+ * Builds and returns the full path of @kn in @buf of @buflen bytes. The
+ * path is built from the end of @buf so the returned pointer usually
+ * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated
+ * and %NULL is returned.
+ */
+static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
+{
+ return kernfs_path_from_node(kn, NULL, buf, buflen);
+}
+
static inline struct kernfs_node *
kernfs_find_and_get(struct kernfs_node *kn, const char *name)
{
diff --git a/include/linux/libata.h b/include/linux/libata.h
index e37d4f99f510..616eef4d81ea 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -46,7 +46,8 @@
#ifdef CONFIG_ATA_NONSTANDARD
#include <asm/libata-portmap.h>
#else
-#include <asm-generic/libata-portmap.h>
+#define ATA_PRIMARY_IRQ(dev) 14
+#define ATA_SECONDARY_IRQ(dev) 15
#endif
/*
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index f6a164297358..3be7abd6e722 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1399,7 +1399,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey);
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
-int mlx4_test_interrupts(struct mlx4_dev *dev);
+int mlx4_test_interrupt(struct mlx4_dev *dev, int vector);
+int mlx4_test_async(struct mlx4_dev *dev);
int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
const u32 offset[], u32 value[],
size_t array_len, u8 port);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 85c4786427e4..ecc451d89ccd 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -418,8 +418,12 @@ struct mlx5_core_health {
u32 prev;
int miss_counter;
bool sick;
+ /* wq spinlock to synchronize draining */
+ spinlock_t wq_lock;
struct workqueue_struct *wq;
+ unsigned long flags;
struct work_struct work;
+ struct delayed_work recover_work;
};
struct mlx5_cq_table {
@@ -626,10 +630,6 @@ struct mlx5_db {
};
enum {
- MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
-};
-
-enum {
MLX5_COMP_EQ_SIZE = 1024,
};
@@ -638,13 +638,6 @@ enum {
MLX5_PTYS_EN = 1 << 2,
};
-struct mlx5_db_pgdir {
- struct list_head list;
- DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
- __be32 *db_page;
- dma_addr_t db_dma;
-};
-
typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
struct mlx5_cmd_work_ent {
@@ -789,6 +782,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e9caec6a51e9..a92c8d73aeaf 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1266,29 +1266,25 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
+ unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, int write);
+ void *buf, int len, unsigned int gup_flags);
-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- unsigned int foll_flags, struct page **pages,
- struct vm_area_struct **vmas, int *nonblocking);
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages, int *locked);
+ unsigned int gup_flags, struct page **pages, int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags);
+ struct page **pages, unsigned int gup_flags);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages);
+ struct page **pages, unsigned int gup_flags);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
@@ -1306,7 +1302,7 @@ struct frame_vector {
struct frame_vector *frame_vector_create(unsigned int nr_frames);
void frame_vector_destroy(struct frame_vector *vec);
int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
- bool write, bool force, struct frame_vector *vec);
+ unsigned int gup_flags, struct frame_vector *vec);
void put_vaddr_frames(struct frame_vector *vec);
int frame_vector_to_pages(struct frame_vector *vec);
void frame_vector_to_pfns(struct frame_vector *vec);
@@ -1391,7 +1387,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
!vma_growsup(vma->vm_next, addr);
}
-int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
+int vma_is_stack_for_current(struct vm_area_struct *vma);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
@@ -2232,6 +2228,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
#define FOLL_MLOCK 0x1000 /* lock present pages */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
+#define FOLL_COW 0x4000 /* internal GUP flag */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7f2ae99e5daf..0f088f3a2fed 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -440,33 +440,7 @@ struct zone {
seqlock_t span_seqlock;
#endif
- /*
- * wait_table -- the array holding the hash table
- * wait_table_hash_nr_entries -- the size of the hash table array
- * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
- *
- * The purpose of all these is to keep track of the people
- * waiting for a page to become available and make them
- * runnable again when possible. The trouble is that this
- * consumes a lot of space, especially when so few things
- * wait on pages at a given time. So instead of using
- * per-page waitqueues, we use a waitqueue hash table.
- *
- * The bucket discipline is to sleep on the same queue when
- * colliding and wake all in that wait queue when removing.
- * When something wakes, it must check to be sure its page is
- * truly available, a la thundering herd. The cost of a
- * collision is great, but given the expected load of the
- * table, they should be so rare as to be outweighed by the
- * benefits from the saved space.
- *
- * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
- * primary users of these fields, and in mm/page_alloc.c
- * free_area_init_core() performs the initialization of them.
- */
- wait_queue_head_t *wait_table;
- unsigned long wait_table_hash_nr_entries;
- unsigned long wait_table_bits;
+ int initialized;
/* Write-intensive fields used from the page allocator */
ZONE_PADDING(_pad1_)
@@ -546,7 +520,7 @@ static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
static inline bool zone_is_initialized(struct zone *zone)
{
- return !!zone->wait_table;
+ return zone->initialized;
}
static inline bool zone_is_empty(struct zone *zone)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 458c87631e7f..20ce8df115ac 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2167,7 +2167,10 @@ struct napi_gro_cb {
/* Used to determine if flush_id can be ignored */
u8 is_atomic:1;
- /* 5 bit hole */
+ /* Number of gro_receive callbacks this packet already went through */
+ u8 recursion_counter:4;
+
+ /* 1 bit hole */
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -2178,6 +2181,40 @@ struct napi_gro_cb {
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+ return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(head, skb);
+}
+
+typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
+ struct sk_buff *);
+static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
+ struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ if (unlikely(gro_recursion_inc_test(skb))) {
+ NAPI_GRO_CB(skb)->flush |= 1;
+ return NULL;
+ }
+
+ return cb(sk, head, skb);
+}
+
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 7676557ce357..fc3c24206593 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -16,7 +16,6 @@
#define _LINUX_NVME_H
#include <linux/types.h>
-#include <linux/uuid.h>
/* NQN names in commands fields specified one size */
#define NVMF_NQN_FIELD_LEN 256
@@ -182,7 +181,7 @@ struct nvme_id_ctrl {
char fr[8];
__u8 rab;
__u8 ieee[3];
- __u8 mic;
+ __u8 cmic;
__u8 mdts;
__le16 cntlid;
__le32 ver;
@@ -202,7 +201,13 @@ struct nvme_id_ctrl {
__u8 apsta;
__le16 wctemp;
__le16 cctemp;
- __u8 rsvd270[50];
+ __le16 mtfa;
+ __le32 hmpre;
+ __le32 hmmin;
+ __u8 tnvmcap[16];
+ __u8 unvmcap[16];
+ __le32 rpmbs;
+ __u8 rsvd316[4];
__le16 kas;
__u8 rsvd322[190];
__u8 sqes;
@@ -267,7 +272,7 @@ struct nvme_id_ns {
__le16 nabo;
__le16 nabspf;
__u16 rsvd46;
- __le64 nvmcap[2];
+ __u8 nvmcap[16];
__u8 rsvd64[40];
__u8 nguid[16];
__u8 eui64[8];
@@ -277,6 +282,16 @@ struct nvme_id_ns {
};
enum {
+ NVME_ID_CNS_NS = 0x00,
+ NVME_ID_CNS_CTRL = 0x01,
+ NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
+ NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
+ NVME_ID_CNS_NS_PRESENT = 0x11,
+ NVME_ID_CNS_CTRL_NS_LIST = 0x12,
+ NVME_ID_CNS_CTRL_LIST = 0x13,
+};
+
+enum {
NVME_NS_FEAT_THIN = 1 << 0,
NVME_NS_FLBAS_LBA_MASK = 0xf,
NVME_NS_FLBAS_META_EXT = 0x10,
@@ -556,8 +571,10 @@ enum nvme_admin_opcode {
nvme_admin_set_features = 0x09,
nvme_admin_get_features = 0x0a,
nvme_admin_async_event = 0x0c,
+ nvme_admin_ns_mgmt = 0x0d,
nvme_admin_activate_fw = 0x10,
nvme_admin_download_fw = 0x11,
+ nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
@@ -583,6 +600,7 @@ enum {
NVME_FEAT_WRITE_ATOMIC = 0x0a,
NVME_FEAT_ASYNC_EVENT = 0x0b,
NVME_FEAT_AUTO_PST = 0x0c,
+ NVME_FEAT_HOST_MEM_BUF = 0x0d,
NVME_FEAT_KATO = 0x0f,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
@@ -745,7 +763,7 @@ struct nvmf_common_command {
struct nvmf_disc_rsp_page_entry {
__u8 trtype;
__u8 adrfam;
- __u8 nqntype;
+ __u8 subtype;
__u8 treq;
__le16 portid;
__le16 cntlid;
@@ -794,7 +812,7 @@ struct nvmf_connect_command {
};
struct nvmf_connect_data {
- uuid_be hostid;
+ __u8 hostid[16];
__le16 cntlid;
char resv4[238];
char subsysnqn[NVMF_NQN_FIELD_LEN];
@@ -905,12 +923,23 @@ enum {
NVME_SC_INVALID_VECTOR = 0x108,
NVME_SC_INVALID_LOG_PAGE = 0x109,
NVME_SC_INVALID_FORMAT = 0x10a,
- NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b,
+ NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
NVME_SC_INVALID_QUEUE = 0x10c,
NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
- NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110,
+ NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
+ NVME_SC_FW_NEEDS_RESET = 0x111,
+ NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
+ NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
+ NVME_SC_OVERLAPPING_RANGE = 0x114,
+ NVME_SC_NS_INSUFFICENT_CAP = 0x115,
+ NVME_SC_NS_ID_UNAVAILABLE = 0x116,
+ NVME_SC_NS_ALREADY_ATTACHED = 0x118,
+ NVME_SC_NS_IS_PRIVATE = 0x119,
+ NVME_SC_NS_NOT_ATTACHED = 0x11a,
+ NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
+ NVME_SC_CTRL_LIST_INVALID = 0x11c,
/*
* I/O Command Set Specific - NVM commands:
@@ -941,6 +970,7 @@ enum {
NVME_SC_REFTAG_CHECK = 0x284,
NVME_SC_COMPARE_FAILED = 0x285,
NVME_SC_ACCESS_DENIED = 0x286,
+ NVME_SC_UNWRITTEN_BLOCK = 0x287,
NVME_SC_DNR = 0x4000,
};
@@ -960,6 +990,7 @@ struct nvme_completion {
__le16 status; /* did the command fail, and if so, why? */
};
-#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
+#define NVME_VS(major, minor, tertiary) \
+ (((major) << 16) | ((minor) << 8) | (tertiary))
#endif /* _LINUX_NVME_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 060d0ede88df..4741ecdb9817 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1257,6 +1257,7 @@ extern u64 perf_swevent_set_period(struct perf_event *event);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
extern void perf_event_disable_local(struct perf_event *event);
+extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
index e4c08c1ff0c5..a1bacf1150b2 100644
--- a/include/linux/pkeys.h
+++ b/include/linux/pkeys.h
@@ -25,7 +25,6 @@ static inline int mm_pkey_alloc(struct mm_struct *mm)
static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
{
- WARN_ONCE(1, "free of protection key when disabled");
return -EINVAL;
}
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index f9ae903bbb84..8978a60371f4 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -146,6 +146,7 @@ enum qed_led_mode {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0xFF
+#define QED_DEFAULT_RX_USECS 12
/* forward */
struct qed_dev;
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
index 99fbe6d55acb..f48d64b0e2fb 100644
--- a/include/linux/qed/qede_roce.h
+++ b/include/linux/qed/qede_roce.h
@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
bool qede_roce_supported(struct qede_dev *dev);
-#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+#if IS_ENABLED(CONFIG_QED_RDMA)
int qede_roce_dev_add(struct qede_dev *dev);
void qede_roce_dev_event_open(struct qede_dev *dev);
void qede_roce_dev_event_close(struct qede_dev *dev);
diff --git a/include/linux/random.h b/include/linux/random.h
index f7bb7a355cf7..7bd2403e4fef 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -18,9 +18,20 @@ struct random_ready_callback {
};
extern void add_device_randomness(const void *, unsigned int);
+
+#if defined(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) && !defined(__CHECKER__)
+static inline void add_latent_entropy(void)
+{
+ add_device_randomness((const void *)&latent_entropy,
+ sizeof(latent_entropy));
+}
+#else
+static inline void add_latent_entropy(void) {}
+#endif
+
extern void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value);
-extern void add_interrupt_randomness(int irq, int irq_flags);
+ unsigned int value) __latent_entropy;
+extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 663fda2887f7..cc6e23eaac91 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -936,6 +936,7 @@ struct sk_buff_fclones {
/**
* skb_fclone_busy - check if fclone is busy
+ * @sk: socket
* @skb: buffer
*
* Returns true if skb is a fast clone, and its clone is not freed.
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 0d7abb8b7315..91a740f6b884 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -902,8 +902,5 @@ asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len,
unsigned long prot, int pkey);
asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val);
asmlinkage long sys_pkey_free(int pkey);
-//asmlinkage long sys_pkey_get(int pkey, unsigned long flags);
-//asmlinkage long sys_pkey_set(int pkey, unsigned long access_rights,
-// unsigned long flags);
#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 45f004e9cc59..2873baf5372a 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -14,17 +14,6 @@ struct timespec;
struct compat_timespec;
#ifdef CONFIG_THREAD_INFO_IN_TASK
-struct thread_info {
- unsigned long flags; /* low level flags */
-};
-
-#define INIT_THREAD_INFO(tsk) \
-{ \
- .flags = 0, \
-}
-#endif
-
-#ifdef CONFIG_THREAD_INFO_IN_TASK
#define current_thread_info() ((struct thread_info *)current)
#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index f2d072787947..8f998afc1384 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -174,6 +174,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
+void __ipv6_sock_mc_close(struct sock *sk);
void ipv6_sock_mc_close(struct sock *sk);
bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
const struct in6_addr *src_addr);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index c575583b50fb..2019310cf135 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -820,9 +820,9 @@ enum station_parameters_apply_mask {
* (or NULL for no change)
* @supported_rates_len: number of supported rates
* @sta_flags_mask: station flags that changed
- * (bitmask of BIT(NL80211_STA_FLAG_...))
+ * (bitmask of BIT(%NL80211_STA_FLAG_...))
* @sta_flags_set: station flags values
- * (bitmask of BIT(NL80211_STA_FLAG_...))
+ * (bitmask of BIT(%NL80211_STA_FLAG_...))
* @listen_interval: listen interval or -1 for no change
* @aid: AID or zero for no change
* @peer_aid: mesh peer AID or zero for no change
@@ -3160,47 +3160,54 @@ struct ieee80211_iface_limit {
*
* 1. Allow #STA <= 1, #AP <= 1, matching BI, channels = 1, 2 total:
*
- * struct ieee80211_iface_limit limits1[] = {
- * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
- * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, },
- * };
- * struct ieee80211_iface_combination combination1 = {
- * .limits = limits1,
- * .n_limits = ARRAY_SIZE(limits1),
- * .max_interfaces = 2,
- * .beacon_int_infra_match = true,
- * };
+ * .. code-block:: c
+ *
+ * struct ieee80211_iface_limit limits1[] = {
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_AP}, },
+ * };
+ * struct ieee80211_iface_combination combination1 = {
+ * .limits = limits1,
+ * .n_limits = ARRAY_SIZE(limits1),
+ * .max_interfaces = 2,
+ * .beacon_int_infra_match = true,
+ * };
*
*
* 2. Allow #{AP, P2P-GO} <= 8, channels = 1, 8 total:
*
- * struct ieee80211_iface_limit limits2[] = {
- * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) |
- * BIT(NL80211_IFTYPE_P2P_GO), },
- * };
- * struct ieee80211_iface_combination combination2 = {
- * .limits = limits2,
- * .n_limits = ARRAY_SIZE(limits2),
- * .max_interfaces = 8,
- * .num_different_channels = 1,
- * };
+ * .. code-block:: c
+ *
+ * struct ieee80211_iface_limit limits2[] = {
+ * { .max = 8, .types = BIT(NL80211_IFTYPE_AP) |
+ * BIT(NL80211_IFTYPE_P2P_GO), },
+ * };
+ * struct ieee80211_iface_combination combination2 = {
+ * .limits = limits2,
+ * .n_limits = ARRAY_SIZE(limits2),
+ * .max_interfaces = 8,
+ * .num_different_channels = 1,
+ * };
*
*
* 3. Allow #STA <= 1, #{P2P-client,P2P-GO} <= 3 on two channels, 4 total.
*
- * This allows for an infrastructure connection and three P2P connections.
+ * This allows for an infrastructure connection and three P2P connections.
+ *
+ * .. code-block:: c
+ *
+ * struct ieee80211_iface_limit limits3[] = {
+ * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
+ * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) |
+ * BIT(NL80211_IFTYPE_P2P_CLIENT), },
+ * };
+ * struct ieee80211_iface_combination combination3 = {
+ * .limits = limits3,
+ * .n_limits = ARRAY_SIZE(limits3),
+ * .max_interfaces = 4,
+ * .num_different_channels = 2,
+ * };
*
- * struct ieee80211_iface_limit limits3[] = {
- * { .max = 1, .types = BIT(NL80211_IFTYPE_STATION), },
- * { .max = 3, .types = BIT(NL80211_IFTYPE_P2P_GO) |
- * BIT(NL80211_IFTYPE_P2P_CLIENT), },
- * };
- * struct ieee80211_iface_combination combination3 = {
- * .limits = limits3,
- * .n_limits = ARRAY_SIZE(limits3),
- * .max_interfaces = 4,
- * .num_different_channels = 2,
- * };
*/
struct ieee80211_iface_combination {
const struct ieee80211_iface_limit *limits;
@@ -4120,14 +4127,29 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr);
*/
/**
+ * ieee80211_data_to_8023_exthdr - convert an 802.11 data frame to 802.3
+ * @skb: the 802.11 data frame
+ * @ehdr: pointer to a &struct ethhdr that will get the header, instead
+ * of it being pushed into the SKB
+ * @addr: the device MAC address
+ * @iftype: the virtual interface type
+ * Return: 0 on success. Non-zero on error.
+ */
+int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ const u8 *addr, enum nl80211_iftype iftype);
+
+/**
* ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3
* @skb: the 802.11 data frame
* @addr: the device MAC address
* @iftype: the virtual interface type
* Return: 0 on success. Non-zero on error.
*/
-int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
- enum nl80211_iftype iftype);
+static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+ enum nl80211_iftype iftype)
+{
+ return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype);
+}
/**
* ieee80211_data_from_8023 - convert an 802.3 frame to 802.11
@@ -4145,22 +4167,23 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
/**
* ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
*
- * Decode an IEEE 802.11n A-MSDU frame and convert it to a list of
- * 802.3 frames. The @list will be empty if the decode fails. The
- * @skb is consumed after the function returns.
+ * Decode an IEEE 802.11 A-MSDU and convert it to a list of 802.3 frames.
+ * The @list will be empty if the decode fails. The @skb must be fully
+ * header-less before being passed in here; it is freed in this function.
*
- * @skb: The input IEEE 802.11n A-MSDU frame.
+ * @skb: The input A-MSDU frame without any headers.
* @list: The output list of 802.3 frames. It must be allocated and
* initialized by by the caller.
* @addr: The device MAC address.
* @iftype: The device interface type.
* @extra_headroom: The hardware extra headroom for SKBs in the @list.
- * @has_80211_header: Set it true if SKB is with IEEE 802.11 header.
+ * @check_da: DA to check in the inner ethernet header, or NULL
+ * @check_sa: SA to check in the inner ethernet header, or NULL
*/
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
- bool has_80211_header);
+ const u8 *check_da, const u8 *check_sa);
/**
* cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 515352c6280a..b0576cb2ab25 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -190,8 +190,8 @@ struct inet6_dev {
__u32 if_flags;
int dead;
+ u32 desync_factor;
u8 rndid[8];
- struct timer_list regen_timer;
struct list_head tempaddr_list;
struct in6_addr token;
diff --git a/include/net/ip.h b/include/net/ip.h
index bc43c0fcae12..5413883ac47f 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -38,7 +38,7 @@ struct sock;
struct inet_skb_parm {
int iif;
struct ip_options opt; /* Compiled IP options */
- unsigned char flags;
+ u16 flags;
#define IPSKB_FORWARDED BIT(0)
#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
@@ -48,10 +48,16 @@ struct inet_skb_parm {
#define IPSKB_DOREDIRECT BIT(5)
#define IPSKB_FRAG_PMTU BIT(6)
#define IPSKB_FRAG_SEGS BIT(7)
+#define IPSKB_L3SLAVE BIT(8)
u16 frag_max_size;
};
+static inline bool ipv4_l3mdev_skb(u16 flags)
+{
+ return !!(flags & IPSKB_L3SLAVE);
+}
+
static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
{
return ip_hdr(skb)->ihl * 4;
@@ -572,7 +578,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
*/
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
-void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
struct ipcm_cookie *ipc, bool allow_ipv6);
int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
@@ -594,7 +600,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
{
- ip_cmsg_recv_offset(msg, skb, 0);
+ ip_cmsg_recv_offset(msg, skb, 0, 0);
}
bool icmp_global_allow(void);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index fb961a576abe..a74e2aa40ef4 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -230,6 +230,8 @@ struct fib6_table {
rwlock_t tb6_lock;
struct fib6_node tb6_root;
struct inet_peer_base tb6_peers;
+ unsigned int flags;
+#define RT6_TABLE_HAS_DFLT_ROUTER BIT(0)
};
#define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index e0cd318d5103..f83e78d071a3 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -32,6 +32,7 @@ struct route_info {
#define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008
#define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010
#define RT6_LOOKUP_F_SRCPREF_COA 0x00000020
+#define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040
/* We do not (yet ?) support IPv6 jumbograms (RFC 2675)
* Unlike IPv4, hdr->seg_len doesn't include the IPv6 header
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index b9b24abd9103..5345d358a510 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -811,14 +811,18 @@ enum mac80211_rate_control_flags {
* in the control information, and it will be filled by the rate
* control algorithm according to what should be sent. For example,
* if this array contains, in the format { <idx>, <count> } the
- * information
+ * information::
+ *
* { 3, 2 }, { 2, 2 }, { 1, 4 }, { -1, 0 }, { -1, 0 }
+ *
* then this means that the frame should be transmitted
* up to twice at rate 3, up to twice at rate 2, and up to four
* times at rate 1 if it doesn't get acknowledged. Say it gets
* acknowledged by the peer after the fifth attempt, the status
- * information should then contain
+ * information should then contain::
+ *
* { 3, 2 }, { 2, 2 }, { 1, 1 }, { -1, 0 } ...
+ *
* since it was transmitted twice at rate 3, twice at rate 2
* and once at rate 1 after which we received an acknowledgement.
*/
@@ -1168,8 +1172,8 @@ enum mac80211_rx_vht_flags {
* @rate_idx: index of data rate into band's supported rates or MCS index if
* HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
* @vht_nss: number of streams (VHT only)
- * @flag: %RX_FLAG_*
- * @vht_flag: %RX_VHT_FLAG_*
+ * @flag: %RX_FLAG_\*
+ * @vht_flag: %RX_VHT_FLAG_\*
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
@@ -1432,7 +1436,7 @@ enum ieee80211_vif_flags {
* @probe_req_reg: probe requests should be reported to mac80211 for this
* interface.
* @drv_priv: data area for driver use, will always be aligned to
- * sizeof(void *).
+ * sizeof(void \*).
* @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
*/
struct ieee80211_vif {
@@ -1743,7 +1747,7 @@ struct ieee80211_sta_rates {
* @wme: indicates whether the STA supports QoS/WME (if local devices does,
* otherwise always false)
* @drv_priv: data area for driver use, will always be aligned to
- * sizeof(void *), size is determined in hw information.
+ * sizeof(void \*), size is determined in hw information.
* @uapsd_queues: bitmap of queues configured for uapsd. Only valid
* if wme is supported. The bits order is like in
* IEEE80211_WMM_IE_STA_QOSINFO_AC_*.
@@ -2152,12 +2156,12 @@ enum ieee80211_hw_flags {
*
* @radiotap_mcs_details: lists which MCS information can the HW
* reports, by default it is set to _MCS, _GI and _BW but doesn't
- * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only
+ * include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_\* values, only
* adding _BW is supported today.
*
* @radiotap_vht_details: lists which VHT MCS information the HW reports,
* the default is _GI | _BANDWIDTH.
- * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
+ * Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
*
* @radiotap_timestamp: Information for the radiotap timestamp field; if the
* 'units_pos' member is set to a non-negative value it must be set to
@@ -2492,6 +2496,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* in the software stack cares about, we will, in the future, have mac80211
* tell the driver which information elements are interesting in the sense
* that we want to see changes in them. This will include
+ *
* - a list of information element IDs
* - a list of OUIs for the vendor information element
*
diff --git a/include/net/sock.h b/include/net/sock.h
index 276489553338..f13ac87a8015 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -252,6 +252,7 @@ struct sock_common {
* @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
* @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
* @sk_sndbuf: size of send buffer in bytes
+ * @sk_padding: unused element for alignment
* @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
* @sk_no_check_rx: allow zero checksum in RX packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
@@ -302,7 +303,8 @@ struct sock_common {
* @sk_backlog_rcv: callback to process the backlog
* @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
* @sk_reuseport_cb: reuseport group container
- */
+ * @sk_rcu: used during RCU grace period
+ */
struct sock {
/*
* Now struct inet_timewait_sock also uses sock_common, so please just
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f83b7f220a65..5b82d4d94834 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -794,12 +794,23 @@ struct tcp_skb_cb {
*/
static inline int tcp_v6_iif(const struct sk_buff *skb)
{
- bool l3_slave = skb_l3mdev_slave(TCP_SKB_CB(skb)->header.h6.flags);
+ bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
}
#endif
+/* TCP_SKB_CB reference means this can not be used from early demux */
+static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
+ ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
+ return true;
+#endif
+ return false;
+}
+
/* Due to TSO, an SKB can be composed of multiple actual
* packets. To keep these tracked properly, we use this.
*/
diff --git a/include/net/udp.h b/include/net/udp.h
index 18f1e6b91927..6134f37ba3ab 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -262,6 +262,7 @@ void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
int udp_rcv(struct sk_buff *skb);
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
int udp_init_sock(struct sock *sk);
+int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 0255613a54a4..308adc4154f4 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -225,9 +225,9 @@ struct vxlan_config {
struct vxlan_dev {
struct hlist_node hlist; /* vni hash table */
struct list_head next; /* vxlan's per namespace list */
- struct vxlan_sock *vn4_sock; /* listening socket for IPv4 */
+ struct vxlan_sock __rcu *vn4_sock; /* listening socket for IPv4 */
#if IS_ENABLED(CONFIG_IPV6)
- struct vxlan_sock *vn6_sock; /* listening socket for IPv6 */
+ struct vxlan_sock __rcu *vn6_sock; /* listening socket for IPv6 */
#endif
struct net_device *dev;
struct net *net; /* netns for packet i/o */
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
new file mode 100644
index 000000000000..eaaf56df4086
--- /dev/null
+++ b/include/soc/fsl/bman.h
@@ -0,0 +1,129 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_BMAN_H
+#define __FSL_BMAN_H
+
+/* wrapper for 48-bit buffers */
+struct bm_buffer {
+ union {
+ struct {
+ __be16 bpid; /* hi 8-bits reserved */
+ __be16 hi; /* High 16-bits of 48-bit address */
+ __be32 lo; /* Low 32-bits of 48-bit address */
+ };
+ __be64 data;
+ };
+} __aligned(8);
+/*
+ * Restore the 48 bit address previously stored in BMan
+ * hardware pools as a dma_addr_t
+ */
+static inline dma_addr_t bm_buf_addr(const struct bm_buffer *buf)
+{
+ return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
+}
+
+static inline u64 bm_buffer_get64(const struct bm_buffer *buf)
+{
+ return be64_to_cpu(buf->data) & 0xffffffffffffLLU;
+}
+
+static inline void bm_buffer_set64(struct bm_buffer *buf, u64 addr)
+{
+ buf->hi = cpu_to_be16(upper_32_bits(addr));
+ buf->lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+static inline u8 bm_buffer_get_bpid(const struct bm_buffer *buf)
+{
+ return be16_to_cpu(buf->bpid) & 0xff;
+}
+
+static inline void bm_buffer_set_bpid(struct bm_buffer *buf, int bpid)
+{
+ buf->bpid = cpu_to_be16(bpid & 0xff);
+}
+
+/* Managed portal, high-level i/face */
+
+/* Portal and Buffer Pools */
+struct bman_portal;
+struct bman_pool;
+
+#define BM_POOL_MAX 64 /* max # of buffer pools */
+
+/**
+ * bman_new_pool - Allocates a Buffer Pool object
+ *
+ * Creates a pool object, and returns a reference to it or NULL on error.
+ */
+struct bman_pool *bman_new_pool(void);
+
+/**
+ * bman_free_pool - Deallocates a Buffer Pool object
+ * @pool: the pool object to release
+ */
+void bman_free_pool(struct bman_pool *pool);
+
+/**
+ * bman_get_bpid - Returns a pool object's BPID.
+ * @pool: the pool object
+ *
+ * The returned value is the index of the encapsulated buffer pool,
+ * in the range of [0, @BM_POOL_MAX-1].
+ */
+int bman_get_bpid(const struct bman_pool *pool);
+
+/**
+ * bman_release - Release buffer(s) to the buffer pool
+ * @pool: the buffer pool object to release to
+ * @bufs: an array of buffers to release
+ * @num: the number of buffers in @bufs (1-8)
+ *
+ * Adds the given buffers to RCR entries. If the RCR ring is unresponsive,
+ * the function will return -ETIMEDOUT. Otherwise, it returns zero.
+ */
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
+
+/**
+ * bman_acquire - Acquire buffer(s) from a buffer pool
+ * @pool: the buffer pool object to acquire from
+ * @bufs: array for storing the acquired buffers
+ * @num: the number of buffers desired (@bufs is at least this big)
+ *
+ * Issues an "Acquire" command via the portal's management command interface.
+ * The return value will be the number of buffers obtained from the pool, or a
+ * negative error code if a h/w error or pool starvation was encountered. In
+ * the latter case, the content of @bufs is undefined.
+ */
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
+
+#endif /* __FSL_BMAN_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
new file mode 100644
index 000000000000..37f3eb001a16
--- /dev/null
+++ b/include/soc/fsl/qman.h
@@ -0,0 +1,1074 @@
+/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_QMAN_H
+#define __FSL_QMAN_H
+
+#include <linux/bitops.h>
+
+/* Hardware constants */
+#define QM_CHANNEL_SWPORTAL0 0
+#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_POOL1_REV3 0x401
+extern u16 qm_channel_pool1;
+
+/* Portal processing (interrupt) sources */
+#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
+#define QM_PIRQ_EQCI 0x00080000 /* Enqueue Command Committed */
+#define QM_PIRQ_EQRI 0x00040000 /* EQCR Ring (below threshold) */
+#define QM_PIRQ_DQRI 0x00020000 /* DQRR Ring (non-empty) */
+#define QM_PIRQ_MRI 0x00010000 /* MR Ring (non-empty) */
+/*
+ * This mask contains all the interrupt sources that need handling except DQRI,
+ * ie. that if present should trigger slow-path processing.
+ */
+#define QM_PIRQ_SLOW (QM_PIRQ_CSCI | QM_PIRQ_EQCI | QM_PIRQ_EQRI | \
+ QM_PIRQ_MRI)
+
+/* For qman_static_dequeue_*** APIs */
+#define QM_SDQCR_CHANNELS_POOL_MASK 0x00007fff
+/* for n in [1,15] */
+#define QM_SDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
+/* for conversion from n of qm_channel */
+static inline u32 QM_SDQCR_CHANNELS_POOL_CONV(u16 channel)
+{
+ return QM_SDQCR_CHANNELS_POOL(channel + 1 - qm_channel_pool1);
+}
+
+/* --- QMan data structures (and associated constants) --- */
+
+/* "Frame Descriptor (FD)" */
+struct qm_fd {
+ union {
+ struct {
+ u8 cfg8b_w1;
+ u8 bpid; /* Buffer Pool ID */
+ u8 cfg8b_w3;
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ __be32 addr_lo; /* low 32-bits of 40-bit address */
+ } __packed;
+ __be64 data;
+ };
+ __be32 cfg; /* format, offset, length / congestion */
+ union {
+ __be32 cmd;
+ __be32 status;
+ };
+} __aligned(8);
+
+#define QM_FD_FORMAT_SG BIT(31)
+#define QM_FD_FORMAT_LONG BIT(30)
+#define QM_FD_FORMAT_COMPOUND BIT(29)
+#define QM_FD_FORMAT_MASK GENMASK(31, 29)
+#define QM_FD_OFF_SHIFT 20
+#define QM_FD_OFF_MASK GENMASK(28, 20)
+#define QM_FD_LEN_MASK GENMASK(19, 0)
+#define QM_FD_LEN_BIG_MASK GENMASK(28, 0)
+
+enum qm_fd_format {
+ /*
+ * 'contig' implies a contiguous buffer, whereas 'sg' implies a
+ * scatter-gather table. 'big' implies a 29-bit length with no offset
+ * field, otherwise length is 20-bit and offset is 9-bit. 'compound'
+ * implies a s/g-like table, where each entry itself represents a frame
+ * (contiguous or scatter-gather) and the 29-bit "length" is
+ * interpreted purely for congestion calculations, ie. a "congestion
+ * weight".
+ */
+ qm_fd_contig = 0,
+ qm_fd_contig_big = QM_FD_FORMAT_LONG,
+ qm_fd_sg = QM_FD_FORMAT_SG,
+ qm_fd_sg_big = QM_FD_FORMAT_SG | QM_FD_FORMAT_LONG,
+ qm_fd_compound = QM_FD_FORMAT_COMPOUND
+};
+
+static inline dma_addr_t qm_fd_addr(const struct qm_fd *fd)
+{
+ return be64_to_cpu(fd->data) & 0xffffffffffLLU;
+}
+
+static inline u64 qm_fd_addr_get64(const struct qm_fd *fd)
+{
+ return be64_to_cpu(fd->data) & 0xffffffffffLLU;
+}
+
+static inline void qm_fd_addr_set64(struct qm_fd *fd, u64 addr)
+{
+ fd->addr_hi = upper_32_bits(addr);
+ fd->addr_lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+/*
+ * The 'format' field indicates the interpretation of the remaining
+ * 29 bits of the 32-bit word.
+ * If 'format' is _contig or _sg, 20b length and 9b offset.
+ * If 'format' is _contig_big or _sg_big, 29b length.
+ * If 'format' is _compound, 29b "congestion weight".
+ */
+static inline enum qm_fd_format qm_fd_get_format(const struct qm_fd *fd)
+{
+ return be32_to_cpu(fd->cfg) & QM_FD_FORMAT_MASK;
+}
+
+static inline int qm_fd_get_offset(const struct qm_fd *fd)
+{
+ return (be32_to_cpu(fd->cfg) & QM_FD_OFF_MASK) >> QM_FD_OFF_SHIFT;
+}
+
+static inline int qm_fd_get_length(const struct qm_fd *fd)
+{
+ return be32_to_cpu(fd->cfg) & QM_FD_LEN_MASK;
+}
+
+static inline int qm_fd_get_len_big(const struct qm_fd *fd)
+{
+ return be32_to_cpu(fd->cfg) & QM_FD_LEN_BIG_MASK;
+}
+
+static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
+ int off, int len)
+{
+ fd->cfg = cpu_to_be32(fmt | (len & QM_FD_LEN_BIG_MASK) |
+ ((off << QM_FD_OFF_SHIFT) & QM_FD_OFF_MASK));
+}
+
+#define qm_fd_set_contig(fd, off, len) \
+ qm_fd_set_param(fd, qm_fd_contig, off, len)
+#define qm_fd_set_sg(fd, off, len) qm_fd_set_param(fd, qm_fd_sg, off, len)
+#define qm_fd_set_contig_big(fd, len) \
+ qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
+#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
+
+static inline void qm_fd_clear_fd(struct qm_fd *fd)
+{
+ fd->data = 0;
+ fd->cfg = 0;
+ fd->cmd = 0;
+}
+
+/* Scatter/Gather table entry */
+struct qm_sg_entry {
+ union {
+ struct {
+ u8 __reserved1[3];
+ u8 addr_hi; /* high 8-bits of 40-bit address */
+ __be32 addr_lo; /* low 32-bits of 40-bit address */
+ };
+ __be64 data;
+ };
+ __be32 cfg; /* E bit, F bit, length */
+ u8 __reserved2;
+ u8 bpid;
+ __be16 offset; /* 13-bit, _res[13-15]*/
+} __packed;
+
+#define QM_SG_LEN_MASK GENMASK(29, 0)
+#define QM_SG_OFF_MASK GENMASK(12, 0)
+#define QM_SG_FIN BIT(30)
+#define QM_SG_EXT BIT(31)
+
+static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
+{
+ return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+
+static inline u64 qm_sg_entry_get64(const struct qm_sg_entry *sg)
+{
+ return be64_to_cpu(sg->data) & 0xffffffffffLLU;
+}
+
+static inline void qm_sg_entry_set64(struct qm_sg_entry *sg, u64 addr)
+{
+ sg->addr_hi = upper_32_bits(addr);
+ sg->addr_lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+static inline bool qm_sg_entry_is_final(const struct qm_sg_entry *sg)
+{
+ return be32_to_cpu(sg->cfg) & QM_SG_FIN;
+}
+
+static inline bool qm_sg_entry_is_ext(const struct qm_sg_entry *sg)
+{
+ return be32_to_cpu(sg->cfg) & QM_SG_EXT;
+}
+
+static inline int qm_sg_entry_get_len(const struct qm_sg_entry *sg)
+{
+ return be32_to_cpu(sg->cfg) & QM_SG_LEN_MASK;
+}
+
+static inline void qm_sg_entry_set_len(struct qm_sg_entry *sg, int len)
+{
+ sg->cfg = cpu_to_be32(len & QM_SG_LEN_MASK);
+}
+
+static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len)
+{
+ sg->cfg = cpu_to_be32(QM_SG_FIN | (len & QM_SG_LEN_MASK));
+}
+
+static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg)
+{
+ return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK;
+}
+
+/* "Frame Dequeue Response" */
+struct qm_dqrr_entry {
+ u8 verb;
+ u8 stat;
+ u16 seqnum; /* 15-bit */
+ u8 tok;
+ u8 __reserved2[3];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ struct qm_fd fd;
+ u8 __reserved4[32];
+} __packed;
+#define QM_DQRR_VERB_VBIT 0x80
+#define QM_DQRR_VERB_MASK 0x7f /* where the verb contains; */
+#define QM_DQRR_VERB_FRAME_DEQUEUE 0x60 /* "this format" */
+#define QM_DQRR_STAT_FQ_EMPTY 0x80 /* FQ empty */
+#define QM_DQRR_STAT_FQ_HELDACTIVE 0x40 /* FQ held active */
+#define QM_DQRR_STAT_FQ_FORCEELIGIBLE 0x20 /* FQ was force-eligible'd */
+#define QM_DQRR_STAT_FD_VALID 0x10 /* has a non-NULL FD */
+#define QM_DQRR_STAT_UNSCHEDULED 0x02 /* Unscheduled dequeue */
+#define QM_DQRR_STAT_DQCR_EXPIRED 0x01 /* VDQCR or PDQCR expired*/
+
+/* "ERN Message Response" */
+/* "FQ State Change Notification" */
+union qm_mr_entry {
+ struct {
+ u8 verb;
+ u8 __reserved[63];
+ };
+ struct {
+ u8 verb;
+ u8 dca;
+ u16 seqnum;
+ u8 rc; /* Rej Code: 8-bit */
+ u8 orp_hi; /* ORP: 24-bit */
+ u16 orp_lo;
+ u32 fqid; /* 24-bit */
+ u32 tag;
+ struct qm_fd fd;
+ u8 __reserved1[32];
+ } __packed ern;
+ struct {
+ u8 verb;
+ u8 fqs; /* Frame Queue Status */
+ u8 __reserved1[6];
+ u32 fqid; /* 24-bit */
+ u32 contextB;
+ u8 __reserved2[48];
+ } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
+};
+#define QM_MR_VERB_VBIT 0x80
+/*
+ * ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
+ * which would be invalid as a s/w enqueue verb. A s/w ERN can be distinguished
+ * from the other MR types by noting if the 0x20 bit is unset.
+ */
+#define QM_MR_VERB_TYPE_MASK 0x27
+#define QM_MR_VERB_DC_ERN 0x20
+#define QM_MR_VERB_FQRN 0x21
+#define QM_MR_VERB_FQRNI 0x22
+#define QM_MR_VERB_FQRL 0x23
+#define QM_MR_VERB_FQPN 0x24
+#define QM_MR_RC_MASK 0xf0 /* contains one of; */
+#define QM_MR_RC_CGR_TAILDROP 0x00
+#define QM_MR_RC_WRED 0x10
+#define QM_MR_RC_ERROR 0x20
+#define QM_MR_RC_ORPWINDOW_EARLY 0x30
+#define QM_MR_RC_ORPWINDOW_LATE 0x40
+#define QM_MR_RC_FQ_TAILDROP 0x50
+#define QM_MR_RC_ORPWINDOW_RETIRED 0x60
+#define QM_MR_RC_ORP_ZERO 0x70
+#define QM_MR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */
+#define QM_MR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */
+
+/*
+ * An identical structure of FQD fields is present in the "Init FQ" command and
+ * the "Query FQ" result, it's suctioned out into the "struct qm_fqd" type.
+ * Within that, the 'stashing' and 'taildrop' pieces are also factored out, the
+ * latter has two inlines to assist with converting to/from the mant+exp
+ * representation.
+ */
+struct qm_fqd_stashing {
+ /* See QM_STASHING_EXCL_<...> */
+ u8 exclusive;
+ /* Numbers of cachelines */
+ u8 cl; /* _res[6-7], as[4-5], ds[2-3], cs[0-1] */
+};
+
+struct qm_fqd_oac {
+ /* "Overhead Accounting Control", see QM_OAC_<...> */
+ u8 oac; /* oac[6-7], _res[0-5] */
+ /* Two's-complement value (-128 to +127) */
+ s8 oal; /* "Overhead Accounting Length" */
+};
+
+struct qm_fqd {
+ /* _res[6-7], orprws[3-5], oa[2], olws[0-1] */
+ u8 orpc;
+ u8 cgid;
+ __be16 fq_ctrl; /* See QM_FQCTRL_<...> */
+ __be16 dest_wq; /* channel[3-15], wq[0-2] */
+ __be16 ics_cred; /* 15-bit */
+ /*
+ * For "Initialize Frame Queue" commands, the write-enable mask
+ * determines whether 'td' or 'oac_init' is observed. For query
+ * commands, this field is always 'td', and 'oac_query' (below) reflects
+ * the Overhead ACcounting values.
+ */
+ union {
+ __be16 td; /* "Taildrop": _res[13-15], mant[5-12], exp[0-4] */
+ struct qm_fqd_oac oac_init;
+ };
+ __be32 context_b;
+ union {
+ /* Treat it as 64-bit opaque */
+ __be64 opaque;
+ struct {
+ __be32 hi;
+ __be32 lo;
+ };
+ /* Treat it as s/w portal stashing config */
+ /* see "FQD Context_A field used for [...]" */
+ struct {
+ struct qm_fqd_stashing stashing;
+ /*
+ * 48-bit address of FQ context to
+ * stash, must be cacheline-aligned
+ */
+ __be16 context_hi;
+ __be32 context_lo;
+ } __packed;
+ } context_a;
+ struct qm_fqd_oac oac_query;
+} __packed;
+
+#define QM_FQD_CHAN_OFF 3
+#define QM_FQD_WQ_MASK GENMASK(2, 0)
+#define QM_FQD_TD_EXP_MASK GENMASK(4, 0)
+#define QM_FQD_TD_MANT_OFF 5
+#define QM_FQD_TD_MANT_MASK GENMASK(12, 5)
+#define QM_FQD_TD_MAX 0xe0000000
+#define QM_FQD_TD_MANT_MAX 0xff
+#define QM_FQD_OAC_OFF 6
+#define QM_FQD_AS_OFF 4
+#define QM_FQD_DS_OFF 2
+#define QM_FQD_XS_MASK 0x3
+
+/* 64-bit converters for context_hi/lo */
+static inline u64 qm_fqd_stashing_get64(const struct qm_fqd *fqd)
+{
+ return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
+}
+
+static inline dma_addr_t qm_fqd_stashing_addr(const struct qm_fqd *fqd)
+{
+ return be64_to_cpu(fqd->context_a.opaque) & 0xffffffffffffULL;
+}
+
+static inline u64 qm_fqd_context_a_get64(const struct qm_fqd *fqd)
+{
+ return qm_fqd_stashing_get64(fqd);
+}
+
+static inline void qm_fqd_stashing_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.context_hi = upper_32_bits(addr);
+ fqd->context_a.context_lo = lower_32_bits(addr);
+}
+
+static inline void qm_fqd_context_a_set64(struct qm_fqd *fqd, u64 addr)
+{
+ fqd->context_a.hi = cpu_to_be16(upper_32_bits(addr));
+ fqd->context_a.lo = cpu_to_be32(lower_32_bits(addr));
+}
+
+/* convert a threshold value into mant+exp representation */
+static inline int qm_fqd_set_taildrop(struct qm_fqd *fqd, u32 val,
+ int roundup)
+{
+ u32 e = 0;
+ int td, oddbit = 0;
+
+ if (val > QM_FQD_TD_MAX)
+ return -ERANGE;
+
+ while (val > QM_FQD_TD_MANT_MAX) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+
+ td = (val << QM_FQD_TD_MANT_OFF) & QM_FQD_TD_MANT_MASK;
+ td |= (e & QM_FQD_TD_EXP_MASK);
+ fqd->td = cpu_to_be16(td);
+ return 0;
+}
+/* and the other direction */
+static inline int qm_fqd_get_taildrop(const struct qm_fqd *fqd)
+{
+ int td = be16_to_cpu(fqd->td);
+
+ return ((td & QM_FQD_TD_MANT_MASK) >> QM_FQD_TD_MANT_OFF)
+ << (td & QM_FQD_TD_EXP_MASK);
+}
+
+static inline void qm_fqd_set_stashing(struct qm_fqd *fqd, u8 as, u8 ds, u8 cs)
+{
+ struct qm_fqd_stashing *st = &fqd->context_a.stashing;
+
+ st->cl = ((as & QM_FQD_XS_MASK) << QM_FQD_AS_OFF) |
+ ((ds & QM_FQD_XS_MASK) << QM_FQD_DS_OFF) |
+ (cs & QM_FQD_XS_MASK);
+}
+
+static inline u8 qm_fqd_get_stashing(const struct qm_fqd *fqd)
+{
+ return fqd->context_a.stashing.cl;
+}
+
+static inline void qm_fqd_set_oac(struct qm_fqd *fqd, u8 val)
+{
+ fqd->oac_init.oac = val << QM_FQD_OAC_OFF;
+}
+
+static inline void qm_fqd_set_oal(struct qm_fqd *fqd, s8 val)
+{
+ fqd->oac_init.oal = val;
+}
+
+static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq)
+{
+ fqd->dest_wq = cpu_to_be16((ch << QM_FQD_CHAN_OFF) |
+ (wq & QM_FQD_WQ_MASK));
+}
+
+static inline int qm_fqd_get_chan(const struct qm_fqd *fqd)
+{
+ return be16_to_cpu(fqd->dest_wq) >> QM_FQD_CHAN_OFF;
+}
+
+static inline int qm_fqd_get_wq(const struct qm_fqd *fqd)
+{
+ return be16_to_cpu(fqd->dest_wq) & QM_FQD_WQ_MASK;
+}
+
+/* See "Frame Queue Descriptor (FQD)" */
+/* Frame Queue Descriptor (FQD) field 'fq_ctrl' uses these constants */
+#define QM_FQCTRL_MASK 0x07ff /* 'fq_ctrl' flags; */
+#define QM_FQCTRL_CGE 0x0400 /* Congestion Group Enable */
+#define QM_FQCTRL_TDE 0x0200 /* Tail-Drop Enable */
+#define QM_FQCTRL_CTXASTASHING 0x0080 /* Context-A stashing */
+#define QM_FQCTRL_CPCSTASH 0x0040 /* CPC Stash Enable */
+#define QM_FQCTRL_FORCESFDR 0x0008 /* High-priority SFDRs */
+#define QM_FQCTRL_AVOIDBLOCK 0x0004 /* Don't block active */
+#define QM_FQCTRL_HOLDACTIVE 0x0002 /* Hold active in portal */
+#define QM_FQCTRL_PREFERINCACHE 0x0001 /* Aggressively cache FQD */
+#define QM_FQCTRL_LOCKINCACHE QM_FQCTRL_PREFERINCACHE /* older naming */
+
+/* See "FQD Context_A field used for [...] */
+/* Frame Queue Descriptor (FQD) field 'CONTEXT_A' uses these constants */
+#define QM_STASHING_EXCL_ANNOTATION 0x04
+#define QM_STASHING_EXCL_DATA 0x02
+#define QM_STASHING_EXCL_CTX 0x01
+
+/* See "Intra Class Scheduling" */
+/* FQD field 'OAC' (Overhead ACcounting) uses these constants */
+#define QM_OAC_ICS 0x2 /* Accounting for Intra-Class Scheduling */
+#define QM_OAC_CG 0x1 /* Accounting for Congestion Groups */
+
+/*
+ * This struct represents the 32-bit "WR_PARM_[GYR]" parameters in CGR fields
+ * and associated commands/responses. The WRED parameters are calculated from
+ * these fields as follows;
+ * MaxTH = MA * (2 ^ Mn)
+ * Slope = SA / (2 ^ Sn)
+ * MaxP = 4 * (Pn + 1)
+ */
+struct qm_cgr_wr_parm {
+ /* MA[24-31], Mn[19-23], SA[12-18], Sn[6-11], Pn[0-5] */
+ u32 word;
+};
+/*
+ * This struct represents the 13-bit "CS_THRES" CGR field. In the corresponding
+ * management commands, this is padded to a 16-bit structure field, so that's
+ * how we represent it here. The congestion state threshold is calculated from
+ * these fields as follows;
+ * CS threshold = TA * (2 ^ Tn)
+ */
+struct qm_cgr_cs_thres {
+ /* _res[13-15], TA[5-12], Tn[0-4] */
+ u16 word;
+};
+/*
+ * This identical structure of CGR fields is present in the "Init/Modify CGR"
+ * commands and the "Query CGR" result. It's suctioned out here into its own
+ * struct.
+ */
+struct __qm_mc_cgr {
+ struct qm_cgr_wr_parm wr_parm_g;
+ struct qm_cgr_wr_parm wr_parm_y;
+ struct qm_cgr_wr_parm wr_parm_r;
+ u8 wr_en_g; /* boolean, use QM_CGR_EN */
+ u8 wr_en_y; /* boolean, use QM_CGR_EN */
+ u8 wr_en_r; /* boolean, use QM_CGR_EN */
+ u8 cscn_en; /* boolean, use QM_CGR_EN */
+ union {
+ struct {
+ u16 cscn_targ_upd_ctrl; /* use QM_CSCN_TARG_UDP_ */
+ u16 cscn_targ_dcp_low; /* CSCN_TARG_DCP low-16bits */
+ };
+ u32 cscn_targ; /* use QM_CGR_TARG_* */
+ };
+ u8 cstd_en; /* boolean, use QM_CGR_EN */
+ u8 cs; /* boolean, only used in query response */
+ struct qm_cgr_cs_thres cs_thres; /* use qm_cgr_cs_thres_set64() */
+ u8 mode; /* QMAN_CGR_MODE_FRAME not supported in rev1.0 */
+} __packed;
+#define QM_CGR_EN 0x01 /* For wr_en_*, cscn_en, cstd_en */
+#define QM_CGR_TARG_UDP_CTRL_WRITE_BIT 0x8000 /* value written to portal bit*/
+#define QM_CGR_TARG_UDP_CTRL_DCP 0x4000 /* 0: SWP, 1: DCP */
+#define QM_CGR_TARG_PORTAL(n) (0x80000000 >> (n)) /* s/w portal, 0-9 */
+#define QM_CGR_TARG_FMAN0 0x00200000 /* direct-connect portal: fman0 */
+#define QM_CGR_TARG_FMAN1 0x00100000 /* : fman1 */
+/* Convert CGR thresholds to/from "cs_thres" format */
+static inline u64 qm_cgr_cs_thres_get64(const struct qm_cgr_cs_thres *th)
+{
+ return ((th->word >> 5) & 0xff) << (th->word & 0x1f);
+}
+
+static inline int qm_cgr_cs_thres_set64(struct qm_cgr_cs_thres *th, u64 val,
+ int roundup)
+{
+ u32 e = 0;
+ int oddbit = 0;
+
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ th->word = ((val & 0xff) << 5) | (e & 0x1f);
+ return 0;
+}
+
+/* "Initialize FQ" */
+struct qm_mcc_initfq {
+ u8 __reserved1[2];
+ u16 we_mask; /* Write Enable Mask */
+ u32 fqid; /* 24-bit */
+ u16 count; /* Initialises 'count+1' FQDs */
+ struct qm_fqd fqd; /* the FQD fields go here */
+ u8 __reserved2[30];
+} __packed;
+/* "Initialize/Modify CGR" */
+struct qm_mcc_initcgr {
+ u8 __reserve1[2];
+ u16 we_mask; /* Write Enable Mask */
+ struct __qm_mc_cgr cgr; /* CGR fields */
+ u8 __reserved2[2];
+ u8 cgid;
+ u8 __reserved3[32];
+} __packed;
+
+/* INITFQ-specific flags */
+#define QM_INITFQ_WE_MASK 0x01ff /* 'Write Enable' flags; */
+#define QM_INITFQ_WE_OAC 0x0100
+#define QM_INITFQ_WE_ORPC 0x0080
+#define QM_INITFQ_WE_CGID 0x0040
+#define QM_INITFQ_WE_FQCTRL 0x0020
+#define QM_INITFQ_WE_DESTWQ 0x0010
+#define QM_INITFQ_WE_ICSCRED 0x0008
+#define QM_INITFQ_WE_TDTHRESH 0x0004
+#define QM_INITFQ_WE_CONTEXTB 0x0002
+#define QM_INITFQ_WE_CONTEXTA 0x0001
+/* INITCGR/MODIFYCGR-specific flags */
+#define QM_CGR_WE_MASK 0x07ff /* 'Write Enable Mask'; */
+#define QM_CGR_WE_WR_PARM_G 0x0400
+#define QM_CGR_WE_WR_PARM_Y 0x0200
+#define QM_CGR_WE_WR_PARM_R 0x0100
+#define QM_CGR_WE_WR_EN_G 0x0080
+#define QM_CGR_WE_WR_EN_Y 0x0040
+#define QM_CGR_WE_WR_EN_R 0x0020
+#define QM_CGR_WE_CSCN_EN 0x0010
+#define QM_CGR_WE_CSCN_TARG 0x0008
+#define QM_CGR_WE_CSTD_EN 0x0004
+#define QM_CGR_WE_CS_THRES 0x0002
+#define QM_CGR_WE_MODE 0x0001
+
+#define QMAN_CGR_FLAG_USE_INIT 0x00000001
+
+ /* Portal and Frame Queues */
+/* Represents a managed portal */
+struct qman_portal;
+
+/*
+ * This object type represents QMan frame queue descriptors (FQD), it is
+ * cacheline-aligned, and initialised by qman_create_fq(). The structure is
+ * defined further down.
+ */
+struct qman_fq;
+
+/*
+ * This object type represents a QMan congestion group, it is defined further
+ * down.
+ */
+struct qman_cgr;
+
+/*
+ * This enum, and the callback type that returns it, are used when handling
+ * dequeued frames via DQRR. Note that for "null" callbacks registered with the
+ * portal object (for handling dequeues that do not demux because contextB is
+ * NULL), the return value *MUST* be qman_cb_dqrr_consume.
+ */
+enum qman_cb_dqrr_result {
+ /* DQRR entry can be consumed */
+ qman_cb_dqrr_consume,
+ /* Like _consume, but requests parking - FQ must be held-active */
+ qman_cb_dqrr_park,
+ /* Does not consume, for DCA mode only. */
+ qman_cb_dqrr_defer,
+ /*
+ * Stop processing without consuming this ring entry. Exits the current
+ * qman_p_poll_dqrr() or interrupt-handling, as appropriate. If within
+ * an interrupt handler, the callback would typically call
+ * qman_irqsource_remove(QM_PIRQ_DQRI) before returning this value,
+ * otherwise the interrupt will reassert immediately.
+ */
+ qman_cb_dqrr_stop,
+ /* Like qman_cb_dqrr_stop, but consumes the current entry. */
+ qman_cb_dqrr_consume_stop
+};
+typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr);
+
+/*
+ * This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
+ * are always consumed after the callback returns.
+ */
+typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
+ const union qm_mr_entry *msg);
+
+/*
+ * s/w-visible states. Ie. tentatively scheduled + truly scheduled + active +
+ * held-active + held-suspended are just "sched". Things like "retired" will not
+ * be assumed until it is complete (ie. QMAN_FQ_STATE_CHANGING is set until
+ * then, to indicate it's completing and to gate attempts to retry the retire
+ * command). Note, park commands do not set QMAN_FQ_STATE_CHANGING because it's
+ * technically impossible in the case of enqueue DCAs (which refer to DQRR ring
+ * index rather than the FQ that ring entry corresponds to), so repeated park
+ * commands are allowed (if you're silly enough to try) but won't change FQ
+ * state, and the resulting park notifications move FQs from "sched" to
+ * "parked".
+ */
+enum qman_fq_state {
+ qman_fq_state_oos,
+ qman_fq_state_parked,
+ qman_fq_state_sched,
+ qman_fq_state_retired
+};
+
+#define QMAN_FQ_STATE_CHANGING 0x80000000 /* 'state' is changing */
+#define QMAN_FQ_STATE_NE 0x40000000 /* retired FQ isn't empty */
+#define QMAN_FQ_STATE_ORL 0x20000000 /* retired FQ has ORL */
+#define QMAN_FQ_STATE_BLOCKOOS 0xe0000000 /* if any are set, no OOS */
+#define QMAN_FQ_STATE_CGR_EN 0x10000000 /* CGR enabled */
+#define QMAN_FQ_STATE_VDQCR 0x08000000 /* being volatile dequeued */
+
+/*
+ * Frame queue objects (struct qman_fq) are stored within memory passed to
+ * qman_create_fq(), as this allows stashing of caller-provided demux callback
+ * pointers at no extra cost to stashing of (driver-internal) FQ state. If the
+ * caller wishes to add per-FQ state and have it benefit from dequeue-stashing,
+ * they should;
+ *
+ * (a) extend the qman_fq structure with their state; eg.
+ *
+ * // myfq is allocated and driver_fq callbacks filled in;
+ * struct my_fq {
+ * struct qman_fq base;
+ * int an_extra_field;
+ * [ ... add other fields to be associated with each FQ ...]
+ * } *myfq = some_my_fq_allocator();
+ * struct qman_fq *fq = qman_create_fq(fqid, flags, &myfq->base);
+ *
+ * // in a dequeue callback, access extra fields from 'fq' via a cast;
+ * struct my_fq *myfq = (struct my_fq *)fq;
+ * do_something_with(myfq->an_extra_field);
+ * [...]
+ *
+ * (b) when and if configuring the FQ for context stashing, specify how ever
+ * many cachelines are required to stash 'struct my_fq', to accelerate not
+ * only the QMan driver but the callback as well.
+ */
+
+struct qman_fq_cb {
+ qman_cb_dqrr dqrr; /* for dequeued frames */
+ qman_cb_mr ern; /* for s/w ERNs */
+ qman_cb_mr fqs; /* frame-queue state changes*/
+};
+
+struct qman_fq {
+ /* Caller of qman_create_fq() provides these demux callbacks */
+ struct qman_fq_cb cb;
+ /*
+ * These are internal to the driver, don't touch. In particular, they
+ * may change, be removed, or extended (so you shouldn't rely on
+ * sizeof(qman_fq) being a constant).
+ */
+ u32 fqid, idx;
+ unsigned long flags;
+ enum qman_fq_state state;
+ int cgr_groupid;
+};
+
+/*
+ * This callback type is used when handling congestion group entry/exit.
+ * 'congested' is non-zero on congestion-entry, and zero on congestion-exit.
+ */
+typedef void (*qman_cb_cgr)(struct qman_portal *qm,
+ struct qman_cgr *cgr, int congested);
+
+struct qman_cgr {
+ /* Set these prior to qman_create_cgr() */
+ u32 cgrid; /* 0..255, but u32 to allow specials like -1, 256, etc.*/
+ qman_cb_cgr cb;
+ /* These are private to the driver */
+ u16 chan; /* portal channel this object is created on */
+ struct list_head node;
+};
+
+/* Flags to qman_create_fq() */
+#define QMAN_FQ_FLAG_NO_ENQUEUE 0x00000001 /* can't enqueue */
+#define QMAN_FQ_FLAG_NO_MODIFY 0x00000002 /* can only enqueue */
+#define QMAN_FQ_FLAG_TO_DCPORTAL 0x00000004 /* consumed by CAAM/PME/Fman */
+#define QMAN_FQ_FLAG_DYNAMIC_FQID 0x00000020 /* (de)allocate fqid */
+
+/* Flags to qman_init_fq() */
+#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
+#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
+
+ /* Portal Management */
+/**
+ * qman_p_irqsource_add - add processing sources to be interrupt-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Adds processing sources that should be interrupt-driven (rather than
+ * processed via qman_poll_***() functions).
+ */
+void qman_p_irqsource_add(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_p_irqsource_remove - remove processing sources from being int-driven
+ * @bits: bitmask of QM_PIRQ_**I processing sources
+ *
+ * Removes processing sources from being interrupt-driven, so that they will
+ * instead be processed via qman_poll_***() functions.
+ */
+void qman_p_irqsource_remove(struct qman_portal *p, u32 bits);
+
+/**
+ * qman_affine_cpus - return a mask of cpus that have affine portals
+ */
+const cpumask_t *qman_affine_cpus(void);
+
+/**
+ * qman_affine_channel - return the channel ID of an portal
+ * @cpu: the cpu whose affine portal is the subject of the query
+ *
+ * If @cpu is -1, the affine portal for the current CPU will be used. It is a
+ * bug to call this function for any value of @cpu (other than -1) that is not a
+ * member of the mask returned from qman_affine_cpus().
+ */
+u16 qman_affine_channel(int cpu);
+
+/**
+ * qman_get_affine_portal - return the portal pointer affine to cpu
+ * @cpu: the cpu whose affine portal is the subject of the query
+ */
+struct qman_portal *qman_get_affine_portal(int cpu);
+
+/**
+ * qman_p_poll_dqrr - process DQRR (fast-path) entries
+ * @limit: the maximum number of DQRR entries to process
+ *
+ * Use of this function requires that DQRR processing not be interrupt-driven.
+ * The return value represents the number of DQRR entries processed.
+ */
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit);
+
+/**
+ * qman_p_static_dequeue_add - Add pool channels to the portal SDQCR
+ * @pools: bit-mask of pool channels, using QM_SDQCR_CHANNELS_POOL(n)
+ *
+ * Adds a set of pool channels to the portal's static dequeue command register
+ * (SDQCR). The requested pools are limited to those the portal has dequeue
+ * access to.
+ */
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools);
+
+ /* FQ management */
+/**
+ * qman_create_fq - Allocates a FQ
+ * @fqid: the index of the FQD to encapsulate, must be "Out of Service"
+ * @flags: bit-mask of QMAN_FQ_FLAG_*** options
+ * @fq: memory for storing the 'fq', with callbacks filled in
+ *
+ * Creates a frame queue object for the given @fqid, unless the
+ * QMAN_FQ_FLAG_DYNAMIC_FQID flag is set in @flags, in which case a FQID is
+ * dynamically allocated (or the function fails if none are available). Once
+ * created, the caller should not touch the memory at 'fq' except as extended to
+ * adjacent memory for user-defined fields (see the definition of "struct
+ * qman_fq" for more info). NO_MODIFY is only intended for enqueuing to
+ * pre-existing frame-queues that aren't to be otherwise interfered with, it
+ * prevents all other modifications to the frame queue. The TO_DCPORTAL flag
+ * causes the driver to honour any contextB modifications requested in the
+ * qm_init_fq() API, as this indicates the frame queue will be consumed by a
+ * direct-connect portal (PME, CAAM, or Fman). When frame queues are consumed by
+ * software portals, the contextB field is controlled by the driver and can't be
+ * modified by the caller.
+ */
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
+
+/**
+ * qman_destroy_fq - Deallocates a FQ
+ * @fq: the frame queue object to release
+ *
+ * The memory for this frame queue object ('fq' provided in qman_create_fq()) is
+ * not deallocated but the caller regains ownership, to do with as desired. The
+ * FQ must be in the 'out-of-service' or in the 'parked' state.
+ */
+void qman_destroy_fq(struct qman_fq *fq);
+
+/**
+ * qman_fq_fqid - Queries the frame queue ID of a FQ object
+ * @fq: the frame queue object to query
+ */
+u32 qman_fq_fqid(struct qman_fq *fq);
+
+/**
+ * qman_init_fq - Initialises FQ fields, leaves the FQ "parked" or "scheduled"
+ * @fq: the frame queue object to modify, must be 'parked' or new.
+ * @flags: bit-mask of QMAN_INITFQ_FLAG_*** options
+ * @opts: the FQ-modification settings, as defined in the low-level API
+ *
+ * The @opts parameter comes from the low-level portal API. Select
+ * QMAN_INITFQ_FLAG_SCHED in @flags to cause the frame queue to be scheduled
+ * rather than parked. NB, @opts can be NULL.
+ *
+ * Note that some fields and options within @opts may be ignored or overwritten
+ * by the driver;
+ * 1. the 'count' and 'fqid' fields are always ignored (this operation only
+ * affects one frame queue: @fq).
+ * 2. the QM_INITFQ_WE_CONTEXTB option of the 'we_mask' field and the associated
+ * 'fqd' structure's 'context_b' field are sometimes overwritten;
+ * - if @fq was not created with QMAN_FQ_FLAG_TO_DCPORTAL, then context_b is
+ * initialised to a value used by the driver for demux.
+ * - if context_b is initialised for demux, so is context_a in case stashing
+ * is requested (see item 4).
+ * (So caller control of context_b is only possible for TO_DCPORTAL frame queue
+ * objects.)
+ * 3. if @flags contains QMAN_INITFQ_FLAG_LOCAL, the 'fqd' structure's
+ * 'dest::channel' field will be overwritten to match the portal used to issue
+ * the command. If the WE_DESTWQ write-enable bit had already been set by the
+ * caller, the channel workqueue will be left as-is, otherwise the write-enable
+ * bit is set and the workqueue is set to a default of 4. If the "LOCAL" flag
+ * isn't set, the destination channel/workqueue fields and the write-enable bit
+ * are left as-is.
+ * 4. if the driver overwrites context_a/b for demux, then if
+ * QM_INITFQ_WE_CONTEXTA is set, the driver will only overwrite
+ * context_a.address fields and will leave the stashing fields provided by the
+ * user alone, otherwise it will zero out the context_a.stashing fields.
+ */
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
+
+/**
+ * qman_schedule_fq - Schedules a FQ
+ * @fq: the frame queue object to schedule, must be 'parked'
+ *
+ * Schedules the frame queue, which must be Parked, which takes it to
+ * Tentatively-Scheduled or Truly-Scheduled depending on its fill-level.
+ */
+int qman_schedule_fq(struct qman_fq *fq);
+
+/**
+ * qman_retire_fq - Retires a FQ
+ * @fq: the frame queue object to retire
+ * @flags: FQ flags (QMAN_FQ_STATE*) if retirement completes immediately
+ *
+ * Retires the frame queue. This returns zero if it succeeds immediately, +1 if
+ * the retirement was started asynchronously, otherwise it returns negative for
+ * failure. When this function returns zero, @flags is set to indicate whether
+ * the retired FQ is empty and/or whether it has any ORL fragments (to show up
+ * as ERNs). Otherwise the corresponding flags will be known when a subsequent
+ * FQRN message shows up on the portal's message ring.
+ *
+ * NB, if the retirement is asynchronous (the FQ was in the Truly Scheduled or
+ * Active state), the completion will be via the message ring as a FQRN - but
+ * the corresponding callback may occur before this function returns!! Ie. the
+ * caller should be prepared to accept the callback as the function is called,
+ * not only once it has returned.
+ */
+int qman_retire_fq(struct qman_fq *fq, u32 *flags);
+
+/**
+ * qman_oos_fq - Puts a FQ "out of service"
+ * @fq: the frame queue object to be put out-of-service, must be 'retired'
+ *
+ * The frame queue must be retired and empty, and if any order restoration list
+ * was released as ERNs at the time of retirement, they must all be consumed.
+ */
+int qman_oos_fq(struct qman_fq *fq);
+
+/**
+ * qman_enqueue - Enqueue a frame to a frame queue
+ * @fq: the frame queue object to enqueue to
+ * @fd: a descriptor of the frame to be enqueued
+ *
+ * Fills an entry in the EQCR of portal @qm to enqueue the frame described by
+ * @fd. The descriptor details are copied from @fd to the EQCR entry, the 'pid'
+ * field is ignored. The return value is non-zero on error, such as ring full.
+ */
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd);
+
+/**
+ * qman_alloc_fqid_range - Allocate a contiguous range of FQIDs
+ * @result: is set by the API to the base FQID of the allocated range
+ * @count: the number of FQIDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_fqid_range(u32 *result, u32 count);
+#define qman_alloc_fqid(result) qman_alloc_fqid_range(result, 1)
+
+/**
+ * qman_release_fqid - Release the specified frame queue ID
+ * @fqid: the FQID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * FQID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_fqid(u32 fqid);
+
+ /* Pool-channel management */
+/**
+ * qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
+ * @result: is set by the API to the base pool-channel ID of the allocated range
+ * @count: the number of pool-channel IDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_pool_range(u32 *result, u32 count);
+#define qman_alloc_pool(result) qman_alloc_pool_range(result, 1)
+
+/**
+ * qman_release_pool - Release the specified pool-channel ID
+ * @id: the pool-chan ID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * pool-channel ID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_pool(u32 id);
+
+ /* CGR management */
+/**
+ * qman_create_cgr - Register a congestion group object
+ * @cgr: the 'cgr' object, with fields filled in
+ * @flags: QMAN_CGR_FLAG_* values
+ * @opts: optional state of CGR settings
+ *
+ * Registers this object to receiving congestion entry/exit callbacks on the
+ * portal affine to the cpu portal on which this API is executed. If opts is
+ * NULL then only the callback (cgr->cb) function is registered. If @flags
+ * contains QMAN_CGR_FLAG_USE_INIT, then an init hw command (which will reset
+ * any unspecified parameters) will be used rather than a modify hw hardware
+ * (which only modifies the specified parameters).
+ */
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts);
+
+/**
+ * qman_delete_cgr - Deregisters a congestion group object
+ * @cgr: the 'cgr' object to deregister
+ *
+ * "Unplugs" this CGR object from the portal affine to the cpu on which this API
+ * is executed. This must be excuted on the same affine portal on which it was
+ * created.
+ */
+int qman_delete_cgr(struct qman_cgr *cgr);
+
+/**
+ * qman_delete_cgr_safe - Deregisters a congestion group object from any CPU
+ * @cgr: the 'cgr' object to deregister
+ *
+ * This will select the proper CPU and run there qman_delete_cgr().
+ */
+void qman_delete_cgr_safe(struct qman_cgr *cgr);
+
+/**
+ * qman_query_cgr_congested - Queries CGR's congestion status
+ * @cgr: the 'cgr' object to query
+ * @result: returns 'cgr's congestion status, 1 (true) if congested
+ */
+int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result);
+
+/**
+ * qman_alloc_cgrid_range - Allocate a contiguous range of CGR IDs
+ * @result: is set by the API to the base CGR ID of the allocated range
+ * @count: the number of CGR IDs required
+ *
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_alloc_cgrid_range(u32 *result, u32 count);
+#define qman_alloc_cgrid(result) qman_alloc_cgrid_range(result, 1)
+
+/**
+ * qman_release_cgrid - Release the specified CGR ID
+ * @id: the CGR ID to be released back to the resource pool
+ *
+ * This function can also be used to seed the allocator with
+ * CGR ID ranges that it can subsequently allocate from.
+ * Returns 0 on success, or a negative error code.
+ */
+int qman_release_cgrid(u32 id);
+
+#endif /* __FSL_QMAN_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index fb8e3b6febdf..c2119008990a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -177,6 +177,7 @@ enum tcm_sense_reason_table {
TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15),
TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
+ TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18),
#undef R
};
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
new file mode 100644
index 000000000000..ab68640a18d0
--- /dev/null
+++ b/include/trace/events/cgroup.h
@@ -0,0 +1,163 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cgroup
+
+#if !defined(_TRACE_CGROUP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CGROUP_H
+
+#include <linux/cgroup.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(cgroup_root,
+
+ TP_PROTO(struct cgroup_root *root),
+
+ TP_ARGS(root),
+
+ TP_STRUCT__entry(
+ __field( int, root )
+ __field( u16, ss_mask )
+ __string( name, root->name )
+ ),
+
+ TP_fast_assign(
+ __entry->root = root->hierarchy_id;
+ __entry->ss_mask = root->subsys_mask;
+ __assign_str(name, root->name);
+ ),
+
+ TP_printk("root=%d ss_mask=%#x name=%s",
+ __entry->root, __entry->ss_mask, __get_str(name))
+);
+
+DEFINE_EVENT(cgroup_root, cgroup_setup_root,
+
+ TP_PROTO(struct cgroup_root *root),
+
+ TP_ARGS(root)
+);
+
+DEFINE_EVENT(cgroup_root, cgroup_destroy_root,
+
+ TP_PROTO(struct cgroup_root *root),
+
+ TP_ARGS(root)
+);
+
+DEFINE_EVENT(cgroup_root, cgroup_remount,
+
+ TP_PROTO(struct cgroup_root *root),
+
+ TP_ARGS(root)
+);
+
+DECLARE_EVENT_CLASS(cgroup,
+
+ TP_PROTO(struct cgroup *cgrp),
+
+ TP_ARGS(cgrp),
+
+ TP_STRUCT__entry(
+ __field( int, root )
+ __field( int, id )
+ __field( int, level )
+ __dynamic_array(char, path,
+ cgrp->kn ? cgroup_path(cgrp, NULL, 0) + 1
+ : strlen("(null)"))
+ ),
+
+ TP_fast_assign(
+ __entry->root = cgrp->root->hierarchy_id;
+ __entry->id = cgrp->id;
+ __entry->level = cgrp->level;
+ if (cgrp->kn)
+ cgroup_path(cgrp, __get_dynamic_array(path),
+ __get_dynamic_array_len(path));
+ else
+ __assign_str(path, "(null)");
+ ),
+
+ TP_printk("root=%d id=%d level=%d path=%s",
+ __entry->root, __entry->id, __entry->level, __get_str(path))
+);
+
+DEFINE_EVENT(cgroup, cgroup_mkdir,
+
+ TP_PROTO(struct cgroup *cgroup),
+
+ TP_ARGS(cgroup)
+);
+
+DEFINE_EVENT(cgroup, cgroup_rmdir,
+
+ TP_PROTO(struct cgroup *cgroup),
+
+ TP_ARGS(cgroup)
+);
+
+DEFINE_EVENT(cgroup, cgroup_release,
+
+ TP_PROTO(struct cgroup *cgroup),
+
+ TP_ARGS(cgroup)
+);
+
+DEFINE_EVENT(cgroup, cgroup_rename,
+
+ TP_PROTO(struct cgroup *cgroup),
+
+ TP_ARGS(cgroup)
+);
+
+DECLARE_EVENT_CLASS(cgroup_migrate,
+
+ TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+
+ TP_ARGS(dst_cgrp, task, threadgroup),
+
+ TP_STRUCT__entry(
+ __field( int, dst_root )
+ __field( int, dst_id )
+ __field( int, dst_level )
+ __dynamic_array(char, dst_path,
+ dst_cgrp->kn ? cgroup_path(dst_cgrp, NULL, 0) + 1
+ : strlen("(null)"))
+ __field( int, pid )
+ __string( comm, task->comm )
+ ),
+
+ TP_fast_assign(
+ __entry->dst_root = dst_cgrp->root->hierarchy_id;
+ __entry->dst_id = dst_cgrp->id;
+ __entry->dst_level = dst_cgrp->level;
+ if (dst_cgrp->kn)
+ cgroup_path(dst_cgrp, __get_dynamic_array(dst_path),
+ __get_dynamic_array_len(dst_path));
+ else
+ __assign_str(dst_path, "(null)");
+ __entry->pid = task->pid;
+ __assign_str(comm, task->comm);
+ ),
+
+ TP_printk("dst_root=%d dst_id=%d dst_level=%d dst_path=%s pid=%d comm=%s",
+ __entry->dst_root, __entry->dst_id, __entry->dst_level,
+ __get_str(dst_path), __entry->pid, __get_str(comm))
+);
+
+DEFINE_EVENT(cgroup_migrate, cgroup_attach_task,
+
+ TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+
+ TP_ARGS(dst_cgrp, task, threadgroup)
+);
+
+DEFINE_EVENT(cgroup_migrate, cgroup_transfer_tasks,
+
+ TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+
+ TP_ARGS(dst_cgrp, task, threadgroup)
+);
+
+#endif /* _TRACE_CGROUP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index dbfee7e86ba6..9b1462e38b82 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -730,10 +730,6 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
#define __NR_pkey_free 290
__SYSCALL(__NR_pkey_free, sys_pkey_free)
-#define __NR_pkey_get 291
-//__SYSCALL(__NR_pkey_get, sys_pkey_get)
-#define __NR_pkey_set 292
-//__SYSCALL(__NR_pkey_set, sys_pkey_set)
#undef __NR_syscalls
#define __NR_syscalls 291
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 6965d0909554..cd2be1c8e9fb 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -75,6 +75,7 @@ header-y += bpf_perf_event.h
header-y += bpf.h
header-y += bpqether.h
header-y += bsg.h
+header-y += bt-bmc.h
header-y += btrfs.h
header-y += can.h
header-y += capability.h
diff --git a/include/uapi/linux/bt-bmc.h b/include/uapi/linux/bt-bmc.h
new file mode 100644
index 000000000000..d9ec766a63d0
--- /dev/null
+++ b/include/uapi/linux/bt-bmc.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_BT_BMC_H
+#define _UAPI_LINUX_BT_BMC_H
+
+#include <linux/ioctl.h>
+
+#define __BT_BMC_IOCTL_MAGIC 0xb1
+#define BT_BMC_IOCTL_SMS_ATN _IO(__BT_BMC_IOCTL_MAGIC, 0x00)
+
+#endif /* _UAPI_LINUX_BT_BMC_H */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index ac5eacd3055b..db4c253f8011 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -239,7 +239,17 @@ struct btrfs_ioctl_fs_info_args {
* Used by:
* struct btrfs_ioctl_feature_flags
*/
-#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0)
+#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0)
+/*
+ * Older kernels (< 4.9) on big-endian systems produced broken free space tree
+ * bitmaps, and btrfs-progs also used to corrupt the free space tree (versions
+ * < 4.7.3). If this bit is clear, then the free space tree cannot be trusted.
+ * btrfs-progs can also intentionally clear this bit to ask the kernel to
+ * rebuild the free space tree, however this might not work on older kernels
+ * that do not know about this bit. If not sure, clear the cache manually on
+ * first mount when booting older kernel versions.
+ */
+#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID (1ULL << 1)
#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0)
#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 099a4200732c..8e547231c1b7 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -119,8 +119,7 @@ struct ethtool_cmd {
static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep,
__u32 speed)
{
-
- ep->speed = (__u16)speed;
+ ep->speed = (__u16)(speed & 0xFFFF);
ep->speed_hi = (__u16)(speed >> 16);
}
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index d812172d1d7b..e5a2e68b2236 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -612,6 +612,8 @@
*/
#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
+#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
+#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* Atomic 64-bit compare */
#define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */
#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
@@ -619,6 +621,7 @@
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
+#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
#define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */
#define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */
#define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 262f0379d83a..5a78be518101 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -350,7 +350,7 @@ struct rtnexthop {
#define RTNH_F_OFFLOAD 8 /* offloaded route */
#define RTNH_F_LINKDOWN 16 /* carrier-down on nexthop */
-#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN)
+#define RTNH_COMPARE_MASK (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
/* Macros to handle hexthops */
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
new file mode 100644
index 000000000000..75c270d839c8
--- /dev/null
+++ b/include/uapi/rdma/qedr-abi.h
@@ -0,0 +1,106 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __QEDR_USER_H__
+#define __QEDR_USER_H__
+
+#include <linux/types.h>
+
+#define QEDR_ABI_VERSION (8)
+
+/* user kernel communication data structures. */
+
+struct qedr_alloc_ucontext_resp {
+ __u64 db_pa;
+ __u32 db_size;
+
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_srq_wr;
+ __u32 sges_per_send_wr;
+ __u32 sges_per_recv_wr;
+ __u32 sges_per_srq_wr;
+ __u32 max_cqes;
+};
+
+struct qedr_alloc_pd_ureq {
+ __u64 rsvd1;
+};
+
+struct qedr_alloc_pd_uresp {
+ __u32 pd_id;
+};
+
+struct qedr_create_cq_ureq {
+ __u64 addr;
+ __u64 len;
+};
+
+struct qedr_create_cq_uresp {
+ __u32 db_offset;
+ __u16 icid;
+};
+
+struct qedr_create_qp_ureq {
+ __u32 qp_handle_hi;
+ __u32 qp_handle_lo;
+
+ /* SQ */
+ /* user space virtual address of SQ buffer */
+ __u64 sq_addr;
+
+ /* length of SQ buffer */
+ __u64 sq_len;
+
+ /* RQ */
+ /* user space virtual address of RQ buffer */
+ __u64 rq_addr;
+
+ /* length of RQ buffer */
+ __u64 rq_len;
+};
+
+struct qedr_create_qp_uresp {
+ __u32 qp_id;
+ __u32 atomic_supported;
+
+ /* SQ */
+ __u32 sq_db_offset;
+ __u16 sq_icid;
+
+ /* RQ */
+ __u32 rq_db_offset;
+ __u16 rq_icid;
+
+ __u32 rq_db2_offset;
+};
+
+#endif /* __QEDR_USER_H__ */
diff --git a/init/Makefile b/init/Makefile
index 7bc47ee31c36..c4fb45525d08 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -2,6 +2,8 @@
# Makefile for the linux kernel.
#
+ccflags-y := -fno-function-sections -fno-data-sections
+
obj-y := main.o version.o mounts.o
ifneq ($(CONFIG_BLK_DEV_INITRD),y)
obj-y += noinitramfs.o
diff --git a/init/main.c b/init/main.c
index a8a58e2794a5..2858be732f6d 100644
--- a/init/main.c
+++ b/init/main.c
@@ -789,6 +789,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
}
WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
+ add_latent_entropy();
return ret;
}
diff --git a/ipc/msgutil.c b/ipc/msgutil.c
index a521999de4f1..bf74eaa5c39f 100644
--- a/ipc/msgutil.c
+++ b/ipc/msgutil.c
@@ -53,7 +53,7 @@ static struct msg_msg *alloc_msg(size_t len)
size_t alen;
alen = min(len, DATALEN_MSG);
- msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL);
+ msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL_ACCOUNT);
if (msg == NULL)
return NULL;
@@ -65,7 +65,7 @@ static struct msg_msg *alloc_msg(size_t len)
while (len > 0) {
struct msg_msgseg *seg;
alen = min(len, DATALEN_SEG);
- seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL);
+ seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL_ACCOUNT);
if (seg == NULL)
goto out_err;
*pseg = seg;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 44066158f0d1..85bc9beb046d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -64,6 +64,9 @@
#include <linux/file.h>
#include <net/sock.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/cgroup.h>
+
/*
* pidlists linger the following amount before being destroyed. The goal
* is avoiding frequent destruction in the middle of consecutive read calls
@@ -1176,6 +1179,8 @@ static void cgroup_destroy_root(struct cgroup_root *root)
struct cgroup *cgrp = &root->cgrp;
struct cgrp_cset_link *link, *tmp_link;
+ trace_cgroup_destroy_root(root);
+
cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
BUG_ON(atomic_read(&root->nr_cgrps));
@@ -1874,6 +1879,9 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
strcpy(root->release_agent_path, opts.release_agent);
spin_unlock(&release_agent_path_lock);
}
+
+ trace_cgroup_remount(root);
+
out_unlock:
kfree(opts.release_agent);
kfree(opts.name);
@@ -2031,6 +2039,8 @@ static int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
if (ret)
goto destroy_root;
+ trace_cgroup_setup_root(root);
+
/*
* There must be no failure case after here, since rebinding takes
* care of subsystems' refcounts, which are explicitly dropped in
@@ -2315,22 +2325,18 @@ static struct file_system_type cgroup2_fs_type = {
.fs_flags = FS_USERNS_MOUNT,
};
-static char *cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
- struct cgroup_namespace *ns)
+static int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
+ struct cgroup_namespace *ns)
{
struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root);
- int ret;
- ret = kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
- if (ret < 0 || ret >= buflen)
- return NULL;
- return buf;
+ return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
}
-char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
- struct cgroup_namespace *ns)
+int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
+ struct cgroup_namespace *ns)
{
- char *ret;
+ int ret;
mutex_lock(&cgroup_mutex);
spin_lock_irq(&css_set_lock);
@@ -2357,12 +2363,12 @@ EXPORT_SYMBOL_GPL(cgroup_path_ns);
*
* Return value is the same as kernfs_path().
*/
-char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
+int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
{
struct cgroup_root *root;
struct cgroup *cgrp;
int hierarchy_id = 1;
- char *path = NULL;
+ int ret;
mutex_lock(&cgroup_mutex);
spin_lock_irq(&css_set_lock);
@@ -2371,16 +2377,15 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
if (root) {
cgrp = task_cgroup_from_root(task, root);
- path = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns);
+ ret = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns);
} else {
/* if no hierarchy exists, everyone is in "/" */
- if (strlcpy(buf, "/", buflen) < buflen)
- path = buf;
+ ret = strlcpy(buf, "/", buflen);
}
spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
- return path;
+ return ret;
}
EXPORT_SYMBOL_GPL(task_cgroup_path);
@@ -2830,6 +2835,10 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
ret = cgroup_migrate(leader, threadgroup, dst_cgrp->root);
cgroup_migrate_finish(&preloaded_csets);
+
+ if (!ret)
+ trace_cgroup_attach_task(dst_cgrp, leader, threadgroup);
+
return ret;
}
@@ -3611,6 +3620,8 @@ static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
mutex_lock(&cgroup_mutex);
ret = kernfs_rename(kn, new_parent, new_name_str);
+ if (!ret)
+ trace_cgroup_rename(cgrp);
mutex_unlock(&cgroup_mutex);
@@ -4381,6 +4392,8 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
if (task) {
ret = cgroup_migrate(task, false, to->root);
+ if (!ret)
+ trace_cgroup_transfer_tasks(to, task, false);
put_task_struct(task);
}
} while (task && !ret);
@@ -5046,6 +5059,8 @@ static void css_release_work_fn(struct work_struct *work)
ss->css_released(css);
} else {
/* cgroup release path */
+ trace_cgroup_release(cgrp);
+
cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
cgrp->id = -1;
@@ -5332,6 +5347,8 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
if (ret)
goto out_destroy;
+ trace_cgroup_mkdir(cgrp);
+
/* let's create and online css's */
kernfs_activate(kn);
@@ -5507,6 +5524,9 @@ static int cgroup_rmdir(struct kernfs_node *kn)
ret = cgroup_destroy_locked(cgrp);
+ if (!ret)
+ trace_cgroup_rmdir(cgrp);
+
cgroup_kn_unlock(kn);
return ret;
}
@@ -5743,7 +5763,7 @@ core_initcall(cgroup_wq_init);
int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk)
{
- char *buf, *path;
+ char *buf;
int retval;
struct cgroup_root *root;
@@ -5786,18 +5806,18 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
* " (deleted)" is appended to the cgroup path.
*/
if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
- path = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
+ retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
current->nsproxy->cgroup_ns);
- if (!path) {
+ if (retval >= PATH_MAX)
retval = -ENAMETOOLONG;
+ if (retval < 0)
goto out_unlock;
- }
+
+ seq_puts(m, buf);
} else {
- path = "/";
+ seq_puts(m, "/");
}
- seq_puts(m, path);
-
if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
seq_puts(m, " (deleted)\n");
else
@@ -6062,8 +6082,9 @@ static void cgroup_release_agent(struct work_struct *work)
{
struct cgroup *cgrp =
container_of(work, struct cgroup, release_agent_work);
- char *pathbuf = NULL, *agentbuf = NULL, *path;
+ char *pathbuf = NULL, *agentbuf = NULL;
char *argv[3], *envp[3];
+ int ret;
mutex_lock(&cgroup_mutex);
@@ -6073,13 +6094,13 @@ static void cgroup_release_agent(struct work_struct *work)
goto out;
spin_lock_irq(&css_set_lock);
- path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
+ ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
spin_unlock_irq(&css_set_lock);
- if (!path)
+ if (ret < 0 || ret >= PATH_MAX)
goto out;
argv[0] = agentbuf;
- argv[1] = path;
+ argv[1] = pathbuf;
argv[2] = NULL;
/* minimal command environment */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5df20d6d1520..29de1a9352c0 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -228,7 +228,7 @@ static struct {
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- .dep_map = {.name = "cpu_hotplug.lock" },
+ .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
#endif
};
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 2b4c20ab5bbe..29f815d2ef7e 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2715,7 +2715,7 @@ void __cpuset_memory_pressure_bump(void)
int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *tsk)
{
- char *buf, *p;
+ char *buf;
struct cgroup_subsys_state *css;
int retval;
@@ -2724,14 +2724,15 @@ int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
if (!buf)
goto out;
- retval = -ENAMETOOLONG;
css = task_get_css(tsk, cpuset_cgrp_id);
- p = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
- current->nsproxy->cgroup_ns);
+ retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
+ current->nsproxy->cgroup_ns);
css_put(css);
- if (!p)
+ if (retval >= PATH_MAX)
+ retval = -ENAMETOOLONG;
+ if (retval < 0)
goto out_free;
- seq_puts(m, p);
+ seq_puts(m, buf);
seq_putc(m, '\n');
retval = 0;
out_free:
diff --git a/kernel/events/core.c b/kernel/events/core.c
index c6e47e97b33f..0e292132efac 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1960,6 +1960,12 @@ void perf_event_disable(struct perf_event *event)
}
EXPORT_SYMBOL_GPL(perf_event_disable);
+void perf_event_disable_inatomic(struct perf_event *event)
+{
+ event->pending_disable = 1;
+ irq_work_queue(&event->pending);
+}
+
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
u64 tstamp)
@@ -7075,8 +7081,8 @@ static int __perf_event_overflow(struct perf_event *event,
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
- event->pending_disable = 1;
- irq_work_queue(&event->pending);
+
+ perf_event_disable_inatomic(event);
}
READ_ONCE(event->overflow_handler)(event, data, regs);
@@ -8855,7 +8861,10 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
void perf_pmu_unregister(struct pmu *pmu)
{
+ int remove_device;
+
mutex_lock(&pmus_lock);
+ remove_device = pmu_bus_running;
list_del_rcu(&pmu->entry);
mutex_unlock(&pmus_lock);
@@ -8869,10 +8878,12 @@ void perf_pmu_unregister(struct pmu *pmu)
free_percpu(pmu->pmu_disable_count);
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
- if (pmu->nr_addr_filters)
- device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
- device_del(pmu->dev);
- put_device(pmu->dev);
+ if (remove_device) {
+ if (pmu->nr_addr_filters)
+ device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
+ device_del(pmu->dev);
+ put_device(pmu->dev);
+ }
free_pmu_context(pmu);
}
EXPORT_SYMBOL_GPL(perf_pmu_unregister);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index d4129bb05e5d..f9ec9add2164 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -300,7 +300,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
retry:
/* Read the page with vaddr into memory */
- ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
+ ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
+ &vma);
if (ret <= 0)
return ret;
@@ -1710,7 +1711,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
* but we treat this as a 'remote' access since it is
* essentially a kernel access to the memory.
*/
- result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
+ result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
+ NULL);
if (result < 0)
return result;
diff --git a/kernel/fork.c b/kernel/fork.c
index 6d42242485cb..623259fc794d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -547,7 +547,8 @@ free_tsk:
}
#ifdef CONFIG_MMU
-static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
+static __latent_entropy int dup_mmap(struct mm_struct *mm,
+ struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
struct rb_node **rb_link, *rb_parent;
@@ -1441,7 +1442,8 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
-static struct task_struct *copy_process(unsigned long clone_flags,
+static __latent_entropy struct task_struct *copy_process(
+ unsigned long clone_flags,
unsigned long stack_start,
unsigned long stack_size,
int __user *child_tidptr,
@@ -1926,6 +1928,7 @@ long _do_fork(unsigned long clone_flags,
p = copy_process(clone_flags, stack_start, stack_size,
child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
+ add_latent_entropy();
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0c5f1a5db654..9c4d30483264 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -721,6 +721,7 @@ int irq_set_parent(int irq, int parent_irq)
irq_put_desc_unlock(desc, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(irq_set_parent);
#endif
/*
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 8d44b3fea9d0..30e6d05aa5a9 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -53,8 +53,15 @@ void notrace __sanitizer_cov_trace_pc(void)
/*
* We are interested in code coverage as a function of a syscall inputs,
* so we ignore code executed in interrupts.
+ * The checks for whether we are in an interrupt are open-coded, because
+ * 1. We can't use in_interrupt() here, since it also returns true
+ * when we are inside local_bh_disable() section.
+ * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
+ * since that leads to slower generated code (three separate tests,
+ * one for each of the flags).
*/
- if (!t || in_interrupt())
+ if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
+ | NMI_MASK)))
return;
mode = READ_ONCE(t->kcov_mode);
if (mode == KCOV_MODE_TRACE) {
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 1e7f5da648d9..6ccb08f57fcb 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -498,9 +498,9 @@ static int enter_state(suspend_state_t state)
#ifndef CONFIG_SUSPEND_SKIP_SYNC
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
- printk(KERN_INFO "PM: Syncing filesystems ... ");
+ pr_info("PM: Syncing filesystems ... ");
sys_sync();
- printk("done.\n");
+ pr_cont("done.\n");
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
#endif
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index d5e397315473..de08fc90baaf 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1769,6 +1769,10 @@ static size_t log_output(int facility, int level, enum log_flags lflags, const c
cont_flush();
}
+ /* Skip empty continuation lines that couldn't be added - they just flush */
+ if (!text_len && (lflags & LOG_CONT))
+ return 0;
+
/* If it doesn't end in a newline, try to buffer the current line */
if (!(lflags & LOG_NEWLINE)) {
if (cont_add(facility, level, lflags, text, text_len))
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 2a99027312a6..e6474f7272ec 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
int this_len, retval;
this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
- retval = access_process_vm(tsk, src, buf, this_len, 0);
+ retval = access_process_vm(tsk, src, buf, this_len, FOLL_FORCE);
if (!retval) {
if (copied)
break;
@@ -564,7 +564,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
if (copy_from_user(buf, src, this_len))
return -EFAULT;
- retval = access_process_vm(tsk, dst, buf, this_len, 1);
+ retval = access_process_vm(tsk, dst, buf, this_len,
+ FOLL_FORCE | FOLL_WRITE);
if (!retval) {
if (copied)
break;
@@ -1127,7 +1128,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
unsigned long tmp;
int copied;
- copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+ copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
return -EIO;
return put_user(tmp, (unsigned long __user *)data);
@@ -1138,7 +1139,8 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
{
int copied;
- copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
+ copied = access_process_vm(tsk, addr, &data, sizeof(data),
+ FOLL_FORCE | FOLL_WRITE);
return (copied == sizeof(data)) ? 0 : -EIO;
}
@@ -1155,7 +1157,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
switch (request) {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
- ret = access_process_vm(child, addr, &word, sizeof(word), 0);
+ ret = access_process_vm(child, addr, &word, sizeof(word),
+ FOLL_FORCE);
if (ret != sizeof(word))
ret = -EIO;
else
@@ -1164,7 +1167,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
- ret = access_process_vm(child, addr, &data, sizeof(data), 1);
+ ret = access_process_vm(child, addr, &data, sizeof(data),
+ FOLL_FORCE | FOLL_WRITE);
ret = (ret != sizeof(data) ? -EIO : 0);
break;
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 944b1b491ed8..1898559e6b60 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -170,7 +170,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
false));
}
-static void rcu_process_callbacks(struct softirq_action *unused)
+static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
{
__rcu_process_callbacks(&rcu_sched_ctrlblk);
__rcu_process_callbacks(&rcu_bh_ctrlblk);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 7e2e03879c2e..69a5611a7e7c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3013,7 +3013,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
/*
* Do RCU core processing for the current CPU.
*/
-static void rcu_process_callbacks(struct softirq_action *unused)
+static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
{
struct rcu_state *rsp;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 94732d1ab00a..42d4027f9e26 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7515,11 +7515,27 @@ static struct kmem_cache *task_group_cache __read_mostly;
DECLARE_PER_CPU(cpumask_var_t, load_balance_mask);
DECLARE_PER_CPU(cpumask_var_t, select_idle_mask);
+#define WAIT_TABLE_BITS 8
+#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
+static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
+
+wait_queue_head_t *bit_waitqueue(void *word, int bit)
+{
+ const int shift = BITS_PER_LONG == 32 ? 5 : 6;
+ unsigned long val = (unsigned long)word << shift | bit;
+
+ return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
+}
+EXPORT_SYMBOL(bit_waitqueue);
+
void __init sched_init(void)
{
int i, j;
unsigned long alloc_size = 0, ptr;
+ for (i = 0; i < WAIT_TABLE_SIZE; i++)
+ init_waitqueue_head(bit_wait_table + i);
+
#ifdef CONFIG_FAIR_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 13935886a471..fa178b62ea79 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -415,7 +415,8 @@ static char *task_group_path(struct task_group *tg)
if (autogroup_path(tg, group_path, PATH_MAX))
return group_path;
- return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
+ cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
+ return group_path;
}
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 502e95a6e927..c242944f5cbd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -690,7 +690,14 @@ void init_entity_runnable_average(struct sched_entity *se)
* will definitely be update (after enqueue).
*/
sa->period_contrib = 1023;
- sa->load_avg = scale_load_down(se->load.weight);
+ /*
+ * Tasks are intialized with full load to be seen as heavy tasks until
+ * they get a chance to stabilize to their real load level.
+ * Group entities are intialized with zero load to reflect the fact that
+ * nothing has been attached to the task group yet.
+ */
+ if (entity_is_task(se))
+ sa->load_avg = scale_load_down(se->load.weight);
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
/*
* At this point, util_avg won't be used in select_task_rq_fair anyway
@@ -5471,13 +5478,18 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd
*/
static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
{
- struct sched_domain *this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
- u64 avg_idle = this_rq()->avg_idle;
- u64 avg_cost = this_sd->avg_scan_cost;
+ struct sched_domain *this_sd;
+ u64 avg_cost, avg_idle = this_rq()->avg_idle;
u64 time, cost;
s64 delta;
int cpu, wrap;
+ this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
+ if (!this_sd)
+ return -1;
+
+ avg_cost = this_sd->avg_scan_cost;
+
/*
* Due to large variance we need a large fuzz factor; hackbench in
* particularly is sensitive here.
@@ -8522,7 +8534,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
* run_rebalance_domains is triggered when needed from the scheduler tick.
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).
*/
-static void run_rebalance_domains(struct softirq_action *h)
+static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
{
struct rq *this_rq = this_rq();
enum cpu_idle_type idle = this_rq->idle_balance ?
@@ -8827,7 +8839,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
{
struct sched_entity *se;
struct cfs_rq *cfs_rq;
- struct rq *rq;
int i;
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
@@ -8842,8 +8853,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
init_cfs_bandwidth(tg_cfs_bandwidth(tg));
for_each_possible_cpu(i) {
- rq = cpu_rq(i);
-
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
GFP_KERNEL, cpu_to_node(i));
if (!cfs_rq)
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 4f7053579fe3..9453efe9b25a 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -480,16 +480,6 @@ void wake_up_bit(void *word, int bit)
}
EXPORT_SYMBOL(wake_up_bit);
-wait_queue_head_t *bit_waitqueue(void *word, int bit)
-{
- const int shift = BITS_PER_LONG == 32 ? 5 : 6;
- const struct zone *zone = page_zone(virt_to_page(word));
- unsigned long val = (unsigned long)word << shift | bit;
-
- return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
-}
-EXPORT_SYMBOL(bit_waitqueue);
-
/*
* Manipulate the atomic_t address to produce a better bit waitqueue table hash
* index (we're keying off bit -1, but that would produce a horrible hash
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 66762645f9e8..744fa611cae0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -58,7 +58,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
const char * const softirq_to_name[NR_SOFTIRQS] = {
- "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
+ "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
@@ -496,7 +496,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
}
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
-static void tasklet_action(struct softirq_action *a)
+static __latent_entropy void tasklet_action(struct softirq_action *a)
{
struct tasklet_struct *list;
@@ -532,7 +532,7 @@ static void tasklet_action(struct softirq_action *a)
}
}
-static void tasklet_hi_action(struct softirq_action *a)
+static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
{
struct tasklet_struct *list;
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index c3aad685bbc0..12dd190634ab 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -542,7 +542,6 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
static int alarm_timer_create(struct k_itimer *new_timer)
{
enum alarmtimer_type type;
- struct alarm_base *base;
if (!alarmtimer_get_rtcdev())
return -ENOTSUPP;
@@ -551,7 +550,6 @@ static int alarm_timer_create(struct k_itimer *new_timer)
return -EPERM;
type = clock2alarm(new_timer->it_clock);
- base = &alarm_bases[type];
alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer);
return 0;
}
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 32bf6f75a8fe..c611c47de884 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -878,7 +878,7 @@ static inline struct timer_base *get_timer_base(u32 tflags)
#ifdef CONFIG_NO_HZ_COMMON
static inline struct timer_base *
-__get_target_base(struct timer_base *base, unsigned tflags)
+get_target_base(struct timer_base *base, unsigned tflags)
{
#ifdef CONFIG_SMP
if ((tflags & TIMER_PINNED) || !base->migration_enabled)
@@ -891,25 +891,27 @@ __get_target_base(struct timer_base *base, unsigned tflags)
static inline void forward_timer_base(struct timer_base *base)
{
+ unsigned long jnow = READ_ONCE(jiffies);
+
/*
* We only forward the base when it's idle and we have a delta between
* base clock and jiffies.
*/
- if (!base->is_idle || (long) (jiffies - base->clk) < 2)
+ if (!base->is_idle || (long) (jnow - base->clk) < 2)
return;
/*
* If the next expiry value is > jiffies, then we fast forward to
* jiffies otherwise we forward to the next expiry value.
*/
- if (time_after(base->next_expiry, jiffies))
- base->clk = jiffies;
+ if (time_after(base->next_expiry, jnow))
+ base->clk = jnow;
else
base->clk = base->next_expiry;
}
#else
static inline struct timer_base *
-__get_target_base(struct timer_base *base, unsigned tflags)
+get_target_base(struct timer_base *base, unsigned tflags)
{
return get_timer_this_cpu_base(tflags);
}
@@ -917,14 +919,6 @@ __get_target_base(struct timer_base *base, unsigned tflags)
static inline void forward_timer_base(struct timer_base *base) { }
#endif
-static inline struct timer_base *
-get_target_base(struct timer_base *base, unsigned tflags)
-{
- struct timer_base *target = __get_target_base(base, tflags);
-
- forward_timer_base(target);
- return target;
-}
/*
* We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
@@ -943,7 +937,14 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
{
for (;;) {
struct timer_base *base;
- u32 tf = timer->flags;
+ u32 tf;
+
+ /*
+ * We need to use READ_ONCE() here, otherwise the compiler
+ * might re-read @tf between the check for TIMER_MIGRATING
+ * and spin_lock().
+ */
+ tf = READ_ONCE(timer->flags);
if (!(tf & TIMER_MIGRATING)) {
base = get_timer_base(tf);
@@ -964,6 +965,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
unsigned long clk = 0, flags;
int ret = 0;
+ BUG_ON(!timer->function);
+
/*
* This is a common optimization triggered by the networking code - if
* the timer is re-modified to have the same timeout or ends up in the
@@ -972,13 +975,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
if (timer_pending(timer)) {
if (timer->expires == expires)
return 1;
+
/*
- * Take the current timer_jiffies of base, but without holding
- * the lock!
+ * We lock timer base and calculate the bucket index right
+ * here. If the timer ends up in the same bucket, then we
+ * just update the expiry time and avoid the whole
+ * dequeue/enqueue dance.
*/
- base = get_timer_base(timer->flags);
- clk = base->clk;
+ base = lock_timer_base(timer, &flags);
+ clk = base->clk;
idx = calc_wheel_index(expires, clk);
/*
@@ -988,14 +994,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
*/
if (idx == timer_get_idx(timer)) {
timer->expires = expires;
- return 1;
+ ret = 1;
+ goto out_unlock;
}
+ } else {
+ base = lock_timer_base(timer, &flags);
}
timer_stats_timer_set_start_info(timer);
- BUG_ON(!timer->function);
-
- base = lock_timer_base(timer, &flags);
ret = detach_if_pending(timer, base, false);
if (!ret && pending_only)
@@ -1025,12 +1031,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
}
}
+ /* Try to forward a stale timer base clock */
+ forward_timer_base(base);
+
timer->expires = expires;
/*
* If 'idx' was calculated above and the base time did not advance
- * between calculating 'idx' and taking the lock, only enqueue_timer()
- * and trigger_dyntick_cpu() is required. Otherwise we need to
- * (re)calculate the wheel index via internal_add_timer().
+ * between calculating 'idx' and possibly switching the base, only
+ * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
+ * we need to (re)calculate the wheel index via
+ * internal_add_timer().
*/
if (idx != UINT_MAX && clk == base->clk) {
enqueue_timer(base, timer, idx);
@@ -1510,12 +1520,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
base->next_expiry = nextevt;
/*
- * We have a fresh next event. Check whether we can forward the base:
+ * We have a fresh next event. Check whether we can forward the
+ * base. We can only do that when @basej is past base->clk
+ * otherwise we might rewind base->clk.
*/
- if (time_after(nextevt, jiffies))
- base->clk = jiffies;
- else if (time_after(nextevt, base->clk))
- base->clk = nextevt;
+ if (time_after(basej, base->clk)) {
+ if (time_after(nextevt, basej))
+ base->clk = basej;
+ else if (time_after(nextevt, base->clk))
+ base->clk = nextevt;
+ }
if (time_before_eq(nextevt, basej)) {
expires = basem;
@@ -1633,7 +1647,7 @@ static inline void __run_timers(struct timer_base *base)
/*
* This function runs timers and the timer-tq in bottom half context.
*/
-static void run_timer_softirq(struct softirq_action *h)
+static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 39d07e754822..b01e547d4d04 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -198,6 +198,7 @@ config FRAME_WARN
int "Warn for stack frames larger than (needs gcc 4.4)"
range 0 8192
default 0 if KASAN
+ default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 1024 if !64BIT
default 2048 if 64BIT
help
@@ -1857,15 +1858,6 @@ config PROVIDE_OHCI1394_DMA_INIT
See Documentation/debugging-via-ohci1394.txt for more information.
-config BUILD_DOCSRC
- bool "Build targets in Documentation/ tree"
- depends on HEADERS_CHECK
- help
- This option attempts to build objects from the source files in the
- kernel Documentation/ tree.
-
- Say N if you are unsure.
-
config DMA_API_DEBUG
bool "Enable debugging of DMA-API usage"
depends on HAVE_DMA_API_DEBUG
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 0a1139644d32..144fe6b1a03e 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -292,7 +292,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
struct gen_pool_chunk *chunk;
unsigned long addr = 0;
int order = pool->min_alloc_order;
- int nbits, start_bit = 0, end_bit, remain;
+ int nbits, start_bit, end_bit, remain;
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
BUG_ON(in_nmi());
@@ -307,6 +307,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
if (size > atomic_read(&chunk->avail))
continue;
+ start_bit = 0;
end_bit = chunk_size(chunk) >> order;
retry:
start_bit = algo(chunk->bits, end_bit, start_bit,
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 7312e7784611..f0c7f1481bae 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1139,6 +1139,28 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
}
EXPORT_SYMBOL(dup_iter);
+/**
+ * import_iovec() - Copy an array of &struct iovec from userspace
+ * into the kernel, check that it is valid, and initialize a new
+ * &struct iov_iter iterator to access it.
+ *
+ * @type: One of %READ or %WRITE.
+ * @uvector: Pointer to the userspace array.
+ * @nr_segs: Number of elements in userspace array.
+ * @fast_segs: Number of elements in @iov.
+ * @iov: (input and output parameter) Pointer to pointer to (usually small
+ * on-stack) kernel array.
+ * @i: Pointer to iterator that will be initialized on success.
+ *
+ * If the array pointed to by *@iov is large enough to hold all @nr_segs,
+ * then this function places %NULL in *@iov on return. Otherwise, a new
+ * array will be allocated and the result placed in *@iov. This means that
+ * the caller may call kfree() on *@iov regardless of whether the small
+ * on-stack array was used or not (and regardless of whether this function
+ * returns an error or not).
+ *
+ * Return: 0 on success or negative error code on error.
+ */
int import_iovec(int type, const struct iovec __user * uvector,
unsigned nr_segs, unsigned fast_segs,
struct iovec **iov, struct iov_iter *i)
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 2be55692aa43..1d6565e81030 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -74,7 +74,7 @@ void irq_poll_complete(struct irq_poll *iop)
}
EXPORT_SYMBOL(irq_poll_complete);
-static void irq_poll_softirq(struct softirq_action *h)
+static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
{
struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
int rearm = 0, budget = irq_poll_budget;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 27fe74948882..9ac959ef4cae 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -33,6 +33,7 @@
#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
+static DEFINE_SPINLOCK(percpu_ref_switch_lock);
static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
@@ -82,6 +83,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
atomic_long_set(&ref->count, start_count);
ref->release = release;
+ ref->confirm_switch = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(percpu_ref_init);
@@ -101,6 +103,8 @@ void percpu_ref_exit(struct percpu_ref *ref)
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
if (percpu_count) {
+ /* non-NULL confirm_switch indicates switching in progress */
+ WARN_ON_ONCE(ref->confirm_switch);
free_percpu(percpu_count);
ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
}
@@ -161,66 +165,23 @@ static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
- if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) {
- /* switching from percpu to atomic */
- ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
-
- /*
- * Non-NULL ->confirm_switch is used to indicate that
- * switching is in progress. Use noop one if unspecified.
- */
- WARN_ON_ONCE(ref->confirm_switch);
- ref->confirm_switch =
- confirm_switch ?: percpu_ref_noop_confirm_switch;
-
- percpu_ref_get(ref); /* put after confirmation */
- call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
- } else if (confirm_switch) {
- /*
- * Somebody already set ATOMIC. Switching may still be in
- * progress. @confirm_switch must be invoked after the
- * switching is complete and a full sched RCU grace period
- * has passed. Wait synchronously for the previous
- * switching and schedule @confirm_switch invocation.
- */
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
- ref->confirm_switch = confirm_switch;
-
- percpu_ref_get(ref); /* put after confirmation */
- call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu);
+ if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
+ if (confirm_switch)
+ confirm_switch(ref);
+ return;
}
-}
-/**
- * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
- * @ref: percpu_ref to switch to atomic mode
- * @confirm_switch: optional confirmation callback
- *
- * There's no reason to use this function for the usual reference counting.
- * Use percpu_ref_kill[_and_confirm]().
- *
- * Schedule switching of @ref to atomic mode. All its percpu counts will
- * be collected to the main atomic counter. On completion, when all CPUs
- * are guaraneed to be in atomic mode, @confirm_switch, which may not
- * block, is invoked. This function may be invoked concurrently with all
- * the get/put operations and can safely be mixed with kill and reinit
- * operations. Note that @ref will stay in atomic mode across kill/reinit
- * cycles until percpu_ref_switch_to_percpu() is called.
- *
- * This function normally doesn't block and can be called from any context
- * but it may block if @confirm_kill is specified and @ref is already in
- * the process of switching to atomic mode. In such cases, @confirm_switch
- * will be invoked after the switching is complete.
- *
- * Due to the way percpu_ref is implemented, @confirm_switch will be called
- * after at least one full sched RCU grace period has passed but this is an
- * implementation detail and must not be depended upon.
- */
-void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
- percpu_ref_func_t *confirm_switch)
-{
- ref->force_atomic = true;
- __percpu_ref_switch_to_atomic(ref, confirm_switch);
+ /* switching from percpu to atomic */
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+
+ /*
+ * Non-NULL ->confirm_switch is used to indicate that switching is
+ * in progress. Use noop one if unspecified.
+ */
+ ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
+
+ percpu_ref_get(ref); /* put after confirmation */
+ call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
}
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
@@ -233,8 +194,6 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
return;
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
-
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
/*
@@ -250,6 +209,58 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
}
+static void __percpu_ref_switch_mode(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ lockdep_assert_held(&percpu_ref_switch_lock);
+
+ /*
+ * If the previous ATOMIC switching hasn't finished yet, wait for
+ * its completion. If the caller ensures that ATOMIC switching
+ * isn't in progress, this function can be called from any context.
+ */
+ wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
+ percpu_ref_switch_lock);
+
+ if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+ __percpu_ref_switch_to_atomic(ref, confirm_switch);
+ else
+ __percpu_ref_switch_to_percpu(ref);
+}
+
+/**
+ * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
+ * @ref: percpu_ref to switch to atomic mode
+ * @confirm_switch: optional confirmation callback
+ *
+ * There's no reason to use this function for the usual reference counting.
+ * Use percpu_ref_kill[_and_confirm]().
+ *
+ * Schedule switching of @ref to atomic mode. All its percpu counts will
+ * be collected to the main atomic counter. On completion, when all CPUs
+ * are guaraneed to be in atomic mode, @confirm_switch, which may not
+ * block, is invoked. This function may be invoked concurrently with all
+ * the get/put operations and can safely be mixed with kill and reinit
+ * operations. Note that @ref will stay in atomic mode across kill/reinit
+ * cycles until percpu_ref_switch_to_percpu() is called.
+ *
+ * This function may block if @ref is in the process of switching to atomic
+ * mode. If the caller ensures that @ref is not in the process of
+ * switching to atomic mode, this function can be called from any context.
+ */
+void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
+ ref->force_atomic = true;
+ __percpu_ref_switch_mode(ref, confirm_switch);
+
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
+}
+
/**
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
* @ref: percpu_ref to switch to percpu mode
@@ -264,17 +275,20 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
* dying or dead, the actual switching takes place on the following
* percpu_ref_reinit().
*
- * This function normally doesn't block and can be called from any context
- * but it may block if @ref is in the process of switching to atomic mode
- * by percpu_ref_switch_atomic().
+ * This function may block if @ref is in the process of switching to atomic
+ * mode. If the caller ensures that @ref is not in the process of
+ * switching to atomic mode, this function can be called from any context.
*/
void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
ref->force_atomic = false;
+ __percpu_ref_switch_mode(ref, NULL);
- /* a dying or dead ref can't be switched to percpu mode w/o reinit */
- if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
- __percpu_ref_switch_to_percpu(ref);
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
/**
@@ -290,21 +304,23 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
*
* This function normally doesn't block and can be called from any context
* but it may block if @confirm_kill is specified and @ref is in the
- * process of switching to atomic mode by percpu_ref_switch_atomic().
- *
- * Due to the way percpu_ref is implemented, @confirm_switch will be called
- * after at least one full sched RCU grace period has passed but this is an
- * implementation detail and must not be depended upon.
+ * process of switching to atomic mode by percpu_ref_switch_to_atomic().
*/
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
"%s called more than once on %pf!", __func__, ref->release);
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
- __percpu_ref_switch_to_atomic(ref, confirm_kill);
+ __percpu_ref_switch_mode(ref, confirm_kill);
percpu_ref_put(ref);
+
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
@@ -321,11 +337,16 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
*/
void percpu_ref_reinit(struct percpu_ref *ref)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&percpu_ref_switch_lock, flags);
+
WARN_ON_ONCE(!percpu_ref_is_zero(ref));
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
percpu_ref_get(ref);
- if (!ref->force_atomic)
- __percpu_ref_switch_to_percpu(ref);
+ __percpu_ref_switch_mode(ref, NULL);
+
+ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
diff --git a/lib/random32.c b/lib/random32.c
index 915982b304bb..fa594b1140e6 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -47,7 +47,7 @@ static inline void prandom_state_selftest(void)
}
#endif
-static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
+static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
/**
* prandom_u32_state - seeded pseudo-random number generator.
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 60f77f1d470a..4d830e299989 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -50,7 +50,7 @@
STACK_ALLOC_ALIGN)
#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
-#define STACK_ALLOC_SLABS_CAP 1024
+#define STACK_ALLOC_SLABS_CAP 8192
#define STACK_ALLOC_MAX_SLABS \
(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
(1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 94346b4d8984..0362da0b66c3 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -4831,7 +4831,7 @@ static struct bpf_test tests[] = {
{ },
INTERNAL,
{ 0x34 },
- { { 1, 0xbef } },
+ { { ETH_HLEN, 0xbef } },
.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
},
/*
diff --git a/mm/Kconfig b/mm/Kconfig
index be0ee11fa0d9..86e3e0e74d20 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -187,7 +187,7 @@ config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
depends on SPARSEMEM || X86_64_ACPI_NUMA
depends on ARCH_ENABLE_MEMORY_HOTPLUG
- depends on !KASAN
+ depends on COMPILE_TEST || !KASAN
config MEMORY_HOTPLUG_SPARSE
def_bool y
diff --git a/mm/filemap.c b/mm/filemap.c
index 849f459ad078..c7fe2f16503f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -790,9 +790,7 @@ EXPORT_SYMBOL(__page_cache_alloc);
*/
wait_queue_head_t *page_waitqueue(struct page *page)
{
- const struct zone *zone = page_zone(page);
-
- return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
+ return bit_waitqueue(page, 0);
}
EXPORT_SYMBOL(page_waitqueue);
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index 381bb07ed14f..db77dcb38afd 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -11,10 +11,7 @@
* get_vaddr_frames() - map virtual addresses to pfns
* @start: starting user address
* @nr_frames: number of pages / pfns from start to map
- * @write: whether pages will be written to by the caller
- * @force: whether to force write access even if user mapping is
- * readonly. See description of the same argument of
- get_user_pages().
+ * @gup_flags: flags modifying lookup behaviour
* @vec: structure which receives pages / pfns of the addresses mapped.
* It should have space for at least nr_frames entries.
*
@@ -34,7 +31,7 @@
* This function takes care of grabbing mmap_sem as necessary.
*/
int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
- bool write, bool force, struct frame_vector *vec)
+ unsigned int gup_flags, struct frame_vector *vec)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -59,7 +56,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
vec->got_ref = true;
vec->is_pfns = false;
ret = get_user_pages_locked(start, nr_frames,
- write, force, (struct page **)(vec->ptrs), &locked);
+ gup_flags, (struct page **)(vec->ptrs), &locked);
goto out;
}
diff --git a/mm/gup.c b/mm/gup.c
index 96b2b2fd0fbd..ec4f82704b6f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -60,6 +60,16 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
return -EEXIST;
}
+/*
+ * FOLL_FORCE can write to even unwritable pte's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+{
+ return pte_write(pte) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+}
+
static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags)
{
@@ -95,7 +105,7 @@ retry:
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
- if ((flags & FOLL_WRITE) && !pte_write(pte)) {
+ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
@@ -412,7 +422,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
* reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
- *flags &= ~FOLL_WRITE;
+ *flags |= FOLL_COW;
return 0;
}
@@ -516,7 +526,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
* instead of __get_user_pages. __get_user_pages should be used only if
* you need some special @gup_flags.
*/
-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *nonblocking)
@@ -621,7 +631,6 @@ next_page:
} while (nr_pages);
return i;
}
-EXPORT_SYMBOL(__get_user_pages);
bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags)
{
@@ -729,7 +738,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
- int write, int force,
struct page **pages,
struct vm_area_struct **vmas,
int *locked, bool notify_drop,
@@ -747,10 +755,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
if (pages)
flags |= FOLL_GET;
- if (write)
- flags |= FOLL_WRITE;
- if (force)
- flags |= FOLL_FORCE;
pages_done = 0;
lock_dropped = false;
@@ -843,12 +847,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
* up_read(&mm->mmap_sem);
*/
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
int *locked)
{
return __get_user_pages_locked(current, current->mm, start, nr_pages,
- write, force, pages, NULL, locked, true,
- FOLL_TOUCH);
+ pages, NULL, locked, true,
+ gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_locked);
@@ -864,14 +868,14 @@ EXPORT_SYMBOL(get_user_pages_locked);
*/
__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags)
+ struct page **pages, unsigned int gup_flags)
{
long ret;
int locked = 1;
+
down_read(&mm->mmap_sem);
- ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
- pages, NULL, &locked, false, gup_flags);
+ ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
+ &locked, false, gup_flags);
if (locked)
up_read(&mm->mmap_sem);
return ret;
@@ -896,10 +900,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
* "force" parameter).
*/
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages)
+ struct page **pages, unsigned int gup_flags)
{
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
- write, force, pages, FOLL_TOUCH);
+ pages, gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_unlocked);
@@ -910,9 +914,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* @mm: mm_struct of target mm
* @start: starting user address
* @nr_pages: number of pages from start to pin
- * @write: whether pages will be written to by the caller
- * @force: whether to force access even when user mapping is currently
- * protected (but never forces write access to shared mapping).
+ * @gup_flags: flags modifying lookup behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
@@ -941,9 +943,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* or similar operation cannot guarantee anything stronger anyway because
* locks can't be held over the syscall boundary.
*
- * If write=0, the page must not be written to. If the page is written to,
- * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
- * after the page is finished with, and before put_page is called.
+ * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
+ * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
+ * be called after the page is finished with, and before put_page is called.
*
* get_user_pages is typically used for fewer-copy IO operations, to get a
* handle on the memory by some means other than accesses via the user virtual
@@ -960,12 +962,12 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
*/
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
- return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
- pages, vmas, NULL, false,
- FOLL_TOUCH | FOLL_REMOTE);
+ return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
+ NULL, false,
+ gup_flags | FOLL_TOUCH | FOLL_REMOTE);
}
EXPORT_SYMBOL(get_user_pages_remote);
@@ -976,12 +978,12 @@ EXPORT_SYMBOL(get_user_pages_remote);
* obviously don't pass FOLL_REMOTE in here.
*/
long get_user_pages(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
return __get_user_pages_locked(current, current->mm, start, nr_pages,
- write, force, pages, vmas, NULL, false,
- FOLL_TOUCH);
+ pages, vmas, NULL, false,
+ gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages);
@@ -1505,7 +1507,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
start += nr << PAGE_SHIFT;
pages += nr;
- ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
+ ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 88af13c00d3c..70c009741aab 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -34,6 +34,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
+#include <linux/bug.h>
#include "kasan.h"
#include "../slab.h"
@@ -62,7 +63,7 @@ void kasan_unpoison_shadow(const void *address, size_t size)
}
}
-static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
+static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
{
void *base = task_stack_page(task);
size_t size = sp - base;
@@ -77,9 +78,24 @@ void kasan_unpoison_task_stack(struct task_struct *task)
}
/* Unpoison the stack for the current task beyond a watermark sp value. */
-asmlinkage void kasan_unpoison_remaining_stack(void *sp)
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
{
- __kasan_unpoison_stack(current, sp);
+ __kasan_unpoison_stack(current, watermark);
+}
+
+/*
+ * Clear all poison for the region between the current SP and a provided
+ * watermark value, as is sometimes required prior to hand-crafted asm function
+ * returns in the middle of functions.
+ */
+void kasan_unpoison_stack_above_sp_to(const void *watermark)
+{
+ const void *sp = __builtin_frame_address(0);
+ size_t size = watermark - sp;
+
+ if (WARN_ON(sp > watermark))
+ return;
+ kasan_unpoison_shadow(sp, size);
}
/*
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index a5e453cf05c4..e5355a5b423f 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1453,8 +1453,11 @@ static void kmemleak_scan(void)
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- scan_block(task_stack_page(p), task_stack_page(p) +
- THREAD_SIZE, NULL);
+ void *stack = try_get_task_stack(p);
+ if (stack) {
+ scan_block(stack, stack + THREAD_SIZE, NULL);
+ put_task_stack(p);
+ }
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 1d05cb9d363d..234676e31edd 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
err = memcg_init_list_lru(lru, memcg_aware);
if (err) {
kfree(lru->node);
+ /* Do this so a list_lru_destroy() doesn't crash: */
+ lru->node = NULL;
goto out;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ae052b5e3315..0f870ba43942 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1917,6 +1917,15 @@ retry:
current->flags & PF_EXITING))
goto force;
+ /*
+ * Prevent unbounded recursion when reclaim operations need to
+ * allocate memory. This might exceed the limits temporarily,
+ * but we prefer facilitating memory reclaim and getting back
+ * under the limit over triggering OOM kills in these cases.
+ */
+ if (unlikely(current->flags & PF_MEMALLOC))
+ goto force;
+
if (unlikely(task_in_memcg_oom(current)))
goto nomem;
diff --git a/mm/memory.c b/mm/memory.c
index fc1987dfd8cc..e18c57bdc75c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3869,10 +3869,11 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
* given task for page fault accounting.
*/
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, void *buf, int len, int write)
+ unsigned long addr, void *buf, int len, unsigned int gup_flags)
{
struct vm_area_struct *vma;
void *old_buf = buf;
+ int write = gup_flags & FOLL_WRITE;
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
@@ -3882,7 +3883,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
struct page *page = NULL;
ret = get_user_pages_remote(tsk, mm, addr, 1,
- write, 1, &page, &vma);
+ gup_flags, &page, &vma);
if (ret <= 0) {
#ifndef CONFIG_HAVE_IOREMAP_PROT
break;
@@ -3934,14 +3935,14 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
* @addr: start address to access
* @buf: source or destination buffer
* @len: number of bytes to transfer
- * @write: whether the access is a write
+ * @gup_flags: flags modifying lookup behaviour
*
* The caller must hold a reference on @mm.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, int write)
+ void *buf, int len, unsigned int gup_flags)
{
- return __access_remote_vm(NULL, mm, addr, buf, len, write);
+ return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
}
/*
@@ -3950,7 +3951,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Do not walk the page table directly, use get_user_pages
*/
int access_process_vm(struct task_struct *tsk, unsigned long addr,
- void *buf, int len, int write)
+ void *buf, int len, unsigned int gup_flags)
{
struct mm_struct *mm;
int ret;
@@ -3959,7 +3960,8 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr,
if (!mm)
return 0;
- ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
+ ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
+
mmput(mm);
return ret;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 962927309b6e..cad4b9125695 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -268,7 +268,6 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
unsigned long i, pfn, end_pfn, nr_pages;
int node = pgdat->node_id;
struct page *page;
- struct zone *zone;
nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
page = virt_to_page(pgdat);
@@ -276,19 +275,6 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
for (i = 0; i < nr_pages; i++, page++)
get_page_bootmem(node, page, NODE_INFO);
- zone = &pgdat->node_zones[0];
- for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
- if (zone_is_initialized(zone)) {
- nr_pages = zone->wait_table_hash_nr_entries
- * sizeof(wait_queue_head_t);
- nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
- page = virt_to_page(zone->wait_table);
-
- for (i = 0; i < nr_pages; i++, page++)
- get_page_bootmem(node, page, NODE_INFO);
- }
- }
-
pfn = pgdat->node_start_pfn;
end_pfn = pgdat_end_pfn(pgdat);
@@ -2131,7 +2117,6 @@ void try_offline_node(int nid)
unsigned long start_pfn = pgdat->node_start_pfn;
unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
unsigned long pfn;
- int i;
for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
unsigned long section_nr = pfn_to_section_nr(pfn);
@@ -2158,20 +2143,6 @@ void try_offline_node(int nid)
*/
node_set_offline(nid);
unregister_one_node(nid);
-
- /* free waittable in each zone */
- for (i = 0; i < MAX_NR_ZONES; i++) {
- struct zone *zone = pgdat->node_zones + i;
-
- /*
- * wait_table may be allocated from boot memory,
- * here only free if it's allocated by vmalloc.
- */
- if (is_vmalloc_addr(zone->wait_table)) {
- vfree(zone->wait_table);
- zone->wait_table = NULL;
- }
- }
}
EXPORT_SYMBOL(try_offline_node);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ad1c96ac313c..0b859af06b87 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -850,7 +850,7 @@ static int lookup_node(unsigned long addr)
struct page *p;
int err;
- err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL);
+ err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
if (err >= 0) {
err = page_to_nid(p);
put_page(p);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index bcdbe62f3e6d..11936526b08b 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -25,7 +25,6 @@
#include <linux/perf_event.h>
#include <linux/pkeys.h>
#include <linux/ksm.h>
-#include <linux/pkeys.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
diff --git a/mm/nommu.c b/mm/nommu.c
index 95daf81a4855..8b8faaf2a9e9 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -109,7 +109,7 @@ unsigned int kobjsize(const void *objp)
return PAGE_SIZE << compound_order(page);
}
-long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int foll_flags, struct page **pages,
struct vm_area_struct **vmas, int *nonblocking)
@@ -160,33 +160,25 @@ finish_or_fault:
* - don't permit access to VMAs that don't support it, such as I/O mappings
*/
long get_user_pages(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
- int flags = 0;
-
- if (write)
- flags |= FOLL_WRITE;
- if (force)
- flags |= FOLL_FORCE;
-
- return __get_user_pages(current, current->mm, start, nr_pages, flags,
- pages, vmas, NULL);
+ return __get_user_pages(current, current->mm, start, nr_pages,
+ gup_flags, pages, vmas, NULL);
}
EXPORT_SYMBOL(get_user_pages);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
int *locked)
{
- return get_user_pages(start, nr_pages, write, force, pages, NULL);
+ return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
}
EXPORT_SYMBOL(get_user_pages_locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags)
+ struct page **pages, unsigned int gup_flags)
{
long ret;
down_read(&mm->mmap_sem);
@@ -198,10 +190,10 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
EXPORT_SYMBOL(__get_user_pages_unlocked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages)
+ struct page **pages, unsigned int gup_flags)
{
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
- write, force, pages, 0);
+ pages, gup_flags);
}
EXPORT_SYMBOL(get_user_pages_unlocked);
@@ -1817,9 +1809,10 @@ void filemap_map_pages(struct fault_env *fe,
EXPORT_SYMBOL(filemap_map_pages);
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, void *buf, int len, int write)
+ unsigned long addr, void *buf, int len, unsigned int gup_flags)
{
struct vm_area_struct *vma;
+ int write = gup_flags & FOLL_WRITE;
down_read(&mm->mmap_sem);
@@ -1854,21 +1847,22 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
* @addr: start address to access
* @buf: source or destination buffer
* @len: number of bytes to transfer
- * @write: whether the access is a write
+ * @gup_flags: flags modifying lookup behaviour
*
* The caller must hold a reference on @mm.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, int write)
+ void *buf, int len, unsigned int gup_flags)
{
- return __access_remote_vm(NULL, mm, addr, buf, len, write);
+ return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
}
/*
* Access another process' address space.
* - source/target buffer must be kernel space
*/
-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
+ unsigned int gup_flags)
{
struct mm_struct *mm;
@@ -1879,7 +1873,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
if (!mm)
return 0;
- len = __access_remote_vm(tsk, mm, addr, buf, len, write);
+ len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
mmput(mm);
return len;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ca423cc20b59..8fd42aa7c4bd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -91,6 +91,11 @@ EXPORT_PER_CPU_SYMBOL(_numa_mem_);
int _node_numa_mem_[MAX_NUMNODES];
#endif
+#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
+volatile u64 latent_entropy __latent_entropy;
+EXPORT_SYMBOL(latent_entropy);
+#endif
+
/*
* Array of node states.
*/
@@ -4219,7 +4224,7 @@ static void show_migration_types(unsigned char type)
}
*p = '\0';
- printk("(%s) ", tmp);
+ printk(KERN_CONT "(%s) ", tmp);
}
/*
@@ -4330,7 +4335,8 @@ void show_free_areas(unsigned int filter)
free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
show_node(zone);
- printk("%s"
+ printk(KERN_CONT
+ "%s"
" free:%lukB"
" min:%lukB"
" low:%lukB"
@@ -4377,8 +4383,8 @@ void show_free_areas(unsigned int filter)
K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++)
- printk(" %ld", zone->lowmem_reserve[i]);
- printk("\n");
+ printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
+ printk(KERN_CONT "\n");
}
for_each_populated_zone(zone) {
@@ -4389,7 +4395,7 @@ void show_free_areas(unsigned int filter)
if (skip_free_areas_node(filter, zone_to_nid(zone)))
continue;
show_node(zone);
- printk("%s: ", zone->name);
+ printk(KERN_CONT "%s: ", zone->name);
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
@@ -4407,11 +4413,12 @@ void show_free_areas(unsigned int filter)
}
spin_unlock_irqrestore(&zone->lock, flags);
for (order = 0; order < MAX_ORDER; order++) {
- printk("%lu*%lukB ", nr[order], K(1UL) << order);
+ printk(KERN_CONT "%lu*%lukB ",
+ nr[order], K(1UL) << order);
if (nr[order])
show_migration_types(types[order]);
}
- printk("= %lukB\n", K(total));
+ printk(KERN_CONT "= %lukB\n", K(total));
}
hugetlb_show_meminfo();
@@ -4972,72 +4979,6 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
}
/*
- * Helper functions to size the waitqueue hash table.
- * Essentially these want to choose hash table sizes sufficiently
- * large so that collisions trying to wait on pages are rare.
- * But in fact, the number of active page waitqueues on typical
- * systems is ridiculously low, less than 200. So this is even
- * conservative, even though it seems large.
- *
- * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
- * waitqueues, i.e. the size of the waitq table given the number of pages.
- */
-#define PAGES_PER_WAITQUEUE 256
-
-#ifndef CONFIG_MEMORY_HOTPLUG
-static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
-{
- unsigned long size = 1;
-
- pages /= PAGES_PER_WAITQUEUE;
-
- while (size < pages)
- size <<= 1;
-
- /*
- * Once we have dozens or even hundreds of threads sleeping
- * on IO we've got bigger problems than wait queue collision.
- * Limit the size of the wait table to a reasonable size.
- */
- size = min(size, 4096UL);
-
- return max(size, 4UL);
-}
-#else
-/*
- * A zone's size might be changed by hot-add, so it is not possible to determine
- * a suitable size for its wait_table. So we use the maximum size now.
- *
- * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
- *
- * i386 (preemption config) : 4096 x 16 = 64Kbyte.
- * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
- * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
- *
- * The maximum entries are prepared when a zone's memory is (512K + 256) pages
- * or more by the traditional way. (See above). It equals:
- *
- * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
- * ia64(16K page size) : = ( 8G + 4M)byte.
- * powerpc (64K page size) : = (32G +16M)byte.
- */
-static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
-{
- return 4096UL;
-}
-#endif
-
-/*
- * This is an integer logarithm so that shifts can be used later
- * to extract the more random high bits from the multiplicative
- * hash function before the remainder is taken.
- */
-static inline unsigned long wait_table_bits(unsigned long size)
-{
- return ffz(~size);
-}
-
-/*
* Initially all pages are reserved - free ones are freed
* up by free_all_bootmem() once the early boot process is
* done. Non-atomic initialization, single-pass.
@@ -5299,49 +5240,6 @@ void __init setup_per_cpu_pageset(void)
alloc_percpu(struct per_cpu_nodestat);
}
-static noinline __ref
-int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
-{
- int i;
- size_t alloc_size;
-
- /*
- * The per-page waitqueue mechanism uses hashed waitqueues
- * per zone.
- */
- zone->wait_table_hash_nr_entries =
- wait_table_hash_nr_entries(zone_size_pages);
- zone->wait_table_bits =
- wait_table_bits(zone->wait_table_hash_nr_entries);
- alloc_size = zone->wait_table_hash_nr_entries
- * sizeof(wait_queue_head_t);
-
- if (!slab_is_available()) {
- zone->wait_table = (wait_queue_head_t *)
- memblock_virt_alloc_node_nopanic(
- alloc_size, zone->zone_pgdat->node_id);
- } else {
- /*
- * This case means that a zone whose size was 0 gets new memory
- * via memory hot-add.
- * But it may be the case that a new node was hot-added. In
- * this case vmalloc() will not be able to use this new node's
- * memory - this wait_table must be initialized to use this new
- * node itself as well.
- * To use this new node's memory, further consideration will be
- * necessary.
- */
- zone->wait_table = vmalloc(alloc_size);
- }
- if (!zone->wait_table)
- return -ENOMEM;
-
- for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
- init_waitqueue_head(zone->wait_table + i);
-
- return 0;
-}
-
static __meminit void zone_pcp_init(struct zone *zone)
{
/*
@@ -5362,10 +5260,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
unsigned long size)
{
struct pglist_data *pgdat = zone->zone_pgdat;
- int ret;
- ret = zone_wait_table_init(zone, size);
- if (ret)
- return ret;
+
pgdat->nr_zones = zone_idx(zone) + 1;
zone->zone_start_pfn = zone_start_pfn;
@@ -5377,6 +5272,7 @@ int __meminit init_currently_empty_zone(struct zone *zone,
zone_start_pfn, (zone_start_pfn + size));
zone_init_free_lists(zone);
+ zone->initialized = 1;
return 0;
}
diff --git a/mm/percpu.c b/mm/percpu.c
index 9903830aaebb..255714302394 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1961,8 +1961,9 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
void *base = (void *)ULONG_MAX;
void **areas = NULL;
struct pcpu_alloc_info *ai;
- size_t size_sum, areas_size, max_distance;
- int group, i, rc;
+ size_t size_sum, areas_size;
+ unsigned long max_distance;
+ int group, i, highest_group, rc;
ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
cpu_distance_fn);
@@ -1978,7 +1979,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
goto out_free;
}
- /* allocate, copy and determine base address */
+ /* allocate, copy and determine base address & max_distance */
+ highest_group = 0;
for (group = 0; group < ai->nr_groups; group++) {
struct pcpu_group_info *gi = &ai->groups[group];
unsigned int cpu = NR_CPUS;
@@ -1999,6 +2001,21 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
areas[group] = ptr;
base = min(ptr, base);
+ if (ptr > areas[highest_group])
+ highest_group = group;
+ }
+ max_distance = areas[highest_group] - base;
+ max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
+
+ /* warn if maximum distance is further than 75% of vmalloc space */
+ if (max_distance > VMALLOC_TOTAL * 3 / 4) {
+ pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
+ max_distance, VMALLOC_TOTAL);
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+ /* and fail if we have fallback */
+ rc = -EINVAL;
+ goto out_free_areas;
+#endif
}
/*
@@ -2023,23 +2040,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
}
/* base address is now known, determine group base offsets */
- max_distance = 0;
for (group = 0; group < ai->nr_groups; group++) {
ai->groups[group].base_offset = areas[group] - base;
- max_distance = max_t(size_t, max_distance,
- ai->groups[group].base_offset);
- }
- max_distance += ai->unit_size;
-
- /* warn if maximum distance is further than 75% of vmalloc space */
- if (max_distance > VMALLOC_TOTAL * 3 / 4) {
- pr_warn("max_distance=0x%zx too large for vmalloc space 0x%lx\n",
- max_distance, VMALLOC_TOTAL);
-#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
- /* and fail if we have fallback */
- rc = -EINVAL;
- goto out_free;
-#endif
}
pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 07514d41ebcc..be8dc8d1edb9 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -88,12 +88,16 @@ static int process_vm_rw_single_vec(unsigned long addr,
ssize_t rc = 0;
unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
/ sizeof(struct pages *);
+ unsigned int flags = FOLL_REMOTE;
/* Work out address and page range required */
if (len == 0)
return 0;
nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
+ if (vm_write)
+ flags |= FOLL_WRITE;
+
while (!rc && nr_pages && iov_iter_count(iter)) {
int pages = min(nr_pages, max_pages_per_loop);
size_t bytes;
@@ -104,8 +108,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
* current/current->mm
*/
pages = __get_user_pages_unlocked(task, mm, pa, pages,
- vm_write, 0, process_pages,
- FOLL_REMOTE);
+ process_pages, flags);
if (pages <= 0)
return -EFAULT;
diff --git a/mm/slab.c b/mm/slab.c
index 090fb26b3a39..0b0550ca85b4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -233,6 +233,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
spin_lock_init(&parent->list_lock);
parent->free_objects = 0;
parent->free_touched = 0;
+ parent->num_slabs = 0;
}
#define MAKE_LIST(cachep, listp, slab, nodeid) \
@@ -966,7 +967,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
* guaranteed to be valid until irq is re-enabled, because it will be
* freed after synchronize_sched().
*/
- if (force_change)
+ if (old_shared && force_change)
synchronize_sched();
fail:
@@ -1382,24 +1383,27 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
for_each_kmem_cache_node(cachep, node, n) {
unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
unsigned long active_slabs = 0, num_slabs = 0;
+ unsigned long num_slabs_partial = 0, num_slabs_free = 0;
+ unsigned long num_slabs_full;
spin_lock_irqsave(&n->list_lock, flags);
- list_for_each_entry(page, &n->slabs_full, lru) {
- active_objs += cachep->num;
- active_slabs++;
- }
+ num_slabs = n->num_slabs;
list_for_each_entry(page, &n->slabs_partial, lru) {
active_objs += page->active;
- active_slabs++;
+ num_slabs_partial++;
}
list_for_each_entry(page, &n->slabs_free, lru)
- num_slabs++;
+ num_slabs_free++;
free_objects += n->free_objects;
spin_unlock_irqrestore(&n->list_lock, flags);
- num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
+ active_slabs = num_slabs - num_slabs_free;
+ num_slabs_full = num_slabs -
+ (num_slabs_partial + num_slabs_free);
+ active_objs += (num_slabs_full * cachep->num);
+
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
node, active_slabs, num_slabs, active_objs, num_objs,
free_objects);
@@ -2314,6 +2318,7 @@ static int drain_freelist(struct kmem_cache *cache,
page = list_entry(p, struct page, lru);
list_del(&page->lru);
+ n->num_slabs--;
/*
* Safe to drop the lock. The slab is no longer linked
* to the cache.
@@ -2752,6 +2757,8 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
list_add_tail(&page->lru, &(n->slabs_free));
else
fixup_slab_list(cachep, n, page, &list);
+
+ n->num_slabs++;
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - page->active;
spin_unlock(&n->list_lock);
@@ -3443,6 +3450,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
page = list_last_entry(&n->slabs_free, struct page, lru);
list_move(&page->lru, list);
+ n->num_slabs--;
}
}
@@ -4099,6 +4107,8 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
unsigned long num_objs;
unsigned long active_slabs = 0;
unsigned long num_slabs, free_objects = 0, shared_avail = 0;
+ unsigned long num_slabs_partial = 0, num_slabs_free = 0;
+ unsigned long num_slabs_full = 0;
const char *name;
char *error = NULL;
int node;
@@ -4111,33 +4121,34 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
check_irq_on();
spin_lock_irq(&n->list_lock);
- list_for_each_entry(page, &n->slabs_full, lru) {
- if (page->active != cachep->num && !error)
- error = "slabs_full accounting error";
- active_objs += cachep->num;
- active_slabs++;
- }
+ num_slabs += n->num_slabs;
+
list_for_each_entry(page, &n->slabs_partial, lru) {
if (page->active == cachep->num && !error)
error = "slabs_partial accounting error";
if (!page->active && !error)
error = "slabs_partial accounting error";
active_objs += page->active;
- active_slabs++;
+ num_slabs_partial++;
}
+
list_for_each_entry(page, &n->slabs_free, lru) {
if (page->active && !error)
error = "slabs_free accounting error";
- num_slabs++;
+ num_slabs_free++;
}
+
free_objects += n->free_objects;
if (n->shared)
shared_avail += n->shared->avail;
spin_unlock_irq(&n->list_lock);
}
- num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
+ active_slabs = num_slabs - num_slabs_free;
+ num_slabs_full = num_slabs - (num_slabs_partial + num_slabs_free);
+ active_objs += (num_slabs_full * cachep->num);
+
if (num_objs - active_objs != free_objects && !error)
error = "free_objects accounting error";
diff --git a/mm/slab.h b/mm/slab.h
index 9653f2e2591a..bc05fdc3edce 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -432,6 +432,7 @@ struct kmem_cache_node {
struct list_head slabs_partial; /* partial list first, better asm code */
struct list_head slabs_full;
struct list_head slabs_free;
+ unsigned long num_slabs;
unsigned long free_objects;
unsigned int free_limit;
unsigned int colour_next; /* Per-node cache coloring */
diff --git a/mm/util.c b/mm/util.c
index 662cddf914af..1a41553db866 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -230,8 +230,10 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
}
/* Check if the vma is being used as a stack by this task */
-int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
+int vma_is_stack_for_current(struct vm_area_struct *vma)
{
+ struct task_struct * __maybe_unused t = current;
+
return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
}
@@ -283,7 +285,8 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast);
int __weak get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages)
{
- return get_user_pages_unlocked(start, nr_pages, write, 0, pages);
+ return get_user_pages_unlocked(start, nr_pages, pages,
+ write ? FOLL_WRITE : 0);
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);
@@ -623,7 +626,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
if (len > buflen)
len = buflen;
- res = access_process_vm(task, arg_start, buffer, len, 0);
+ res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
/*
* If the nul at the end of args has been overwritten, then
@@ -638,7 +641,8 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
if (len > buflen - res)
len = buflen - res;
res += access_process_vm(task, env_start,
- buffer+res, len, 0);
+ buffer+res, len,
+ FOLL_FORCE);
res = strnlen(buffer, res);
}
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 744f926af442..76fda2268148 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3043,7 +3043,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
sc.gfp_mask,
sc.reclaim_idx);
+ current->flags |= PF_MEMALLOC;
nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+ current->flags &= ~PF_MEMALLOC;
trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 9accde339601..a79365574531 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -663,7 +663,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*vhdr));
skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 08ce36147c4c..e034afbd1bb0 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -652,7 +652,6 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
batadv_softif_destroy_sysfs(hard_iface->soft_iface);
}
- hard_iface->soft_iface = NULL;
batadv_hardif_put(hard_iface);
out:
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
index ab47acf2eb01..3284a7b0325d 100644
--- a/net/batman-adv/log.h
+++ b/net/batman-adv/log.h
@@ -63,7 +63,7 @@ enum batadv_dbg_level {
BATADV_DBG_NC = BIT(5),
BATADV_DBG_MCAST = BIT(6),
BATADV_DBG_TP_METER = BIT(7),
- BATADV_DBG_ALL = 127,
+ BATADV_DBG_ALL = 255,
};
#ifdef CONFIG_BATMAN_ADV_DEBUG
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 518b1ed87b64..c6e7e1e39b70 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -544,7 +544,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
if (bat_priv->algo_ops->neigh.hardif_init)
bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
- hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
+ hlist_add_head_rcu(&hardif_neigh->list, &hard_iface->neigh_list);
out:
spin_unlock_bh(&hard_iface->neigh_list_lock);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e2288421fe6b..1015d9c8d97d 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -969,41 +969,38 @@ void __hci_req_enable_advertising(struct hci_request *req)
hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
}
-static u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
+u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
{
- size_t complete_len;
size_t short_len;
- int max_len;
-
- max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
- complete_len = strlen(hdev->dev_name);
- short_len = strlen(hdev->short_name);
-
- /* no space left for name */
- if (max_len < 1)
- return ad_len;
+ size_t complete_len;
- /* no name set */
- if (!complete_len)
+ /* no space left for name (+ NULL + type + len) */
+ if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
return ad_len;
- /* complete name fits and is eq to max short name len or smaller */
- if (complete_len <= max_len &&
- complete_len <= HCI_MAX_SHORT_NAME_LENGTH) {
+ /* use complete name if present and fits */
+ complete_len = strlen(hdev->dev_name);
+ if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
- hdev->dev_name, complete_len);
- }
+ hdev->dev_name, complete_len + 1);
- /* short name set and fits */
- if (short_len && short_len <= max_len) {
+ /* use short name if present */
+ short_len = strlen(hdev->short_name);
+ if (short_len)
return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
- hdev->short_name, short_len);
- }
+ hdev->short_name, short_len + 1);
- /* no short name set so shorten complete name */
- if (!short_len) {
- return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
- hdev->dev_name, max_len);
+ /* use shortened full name if present, we already know that name
+ * is longer then HCI_MAX_SHORT_NAME_LENGTH
+ */
+ if (complete_len) {
+ u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
+
+ memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
+ name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
+
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
+ sizeof(name));
}
return ad_len;
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index 6b06629245a8..dde77bd59f91 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -106,6 +106,8 @@ static inline void hci_update_background_scan(struct hci_dev *hdev)
void hci_request_setup(struct hci_dev *hdev);
void hci_request_cancel_all(struct hci_dev *hdev);
+u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len);
+
static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type,
u8 *data, u8 data_len)
{
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 736038085feb..1fba2a03f8ae 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -6017,7 +6017,15 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
return err;
}
-static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
+static u8 calculate_name_len(struct hci_dev *hdev)
+{
+ u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
+
+ return append_local_name(hdev, buf, 0);
+}
+
+static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
+ bool is_adv_data)
{
u8 max_len = HCI_MAX_AD_LENGTH;
@@ -6030,9 +6038,8 @@ static u8 tlv_data_max_len(u32 adv_flags, bool is_adv_data)
if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
max_len -= 3;
} else {
- /* at least 1 byte of name should fit in */
if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
- max_len -= 3;
+ max_len -= calculate_name_len(hdev);
if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
max_len -= 4;
@@ -6063,12 +6070,13 @@ static bool appearance_managed(u32 adv_flags)
return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
}
-static bool tlv_data_is_valid(u32 adv_flags, u8 *data, u8 len, bool is_adv_data)
+static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
+ u8 len, bool is_adv_data)
{
int i, cur_len;
u8 max_len;
- max_len = tlv_data_max_len(adv_flags, is_adv_data);
+ max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
if (len > max_len)
return false;
@@ -6215,8 +6223,8 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- if (!tlv_data_is_valid(flags, cp->data, cp->adv_data_len, true) ||
- !tlv_data_is_valid(flags, cp->data + cp->adv_data_len,
+ if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
+ !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
cp->scan_rsp_len, false)) {
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
@@ -6429,8 +6437,8 @@ static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
rp.instance = cp->instance;
rp.flags = cp->flags;
- rp.max_adv_data_len = tlv_data_max_len(flags, true);
- rp.max_scan_rsp_len = tlv_data_max_len(flags, false);
+ rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
+ rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index c5fea9393946..2136e45f5277 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -972,13 +972,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
mod_timer(&query->timer, jiffies);
}
-void br_multicast_enable_port(struct net_bridge_port *port)
+static void __br_multicast_enable_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
- spin_lock(&br->multicast_lock);
if (br->multicast_disabled || !netif_running(br->dev))
- goto out;
+ return;
br_multicast_enable(&port->ip4_own_query);
#if IS_ENABLED(CONFIG_IPV6)
@@ -987,8 +986,14 @@ void br_multicast_enable_port(struct net_bridge_port *port)
if (port->multicast_router == MDB_RTR_TYPE_PERM &&
hlist_unhashed(&port->rlist))
br_multicast_add_router(br, port);
+}
-out:
+void br_multicast_enable_port(struct net_bridge_port *port)
+{
+ struct net_bridge *br = port->br;
+
+ spin_lock(&br->multicast_lock);
+ __br_multicast_enable_port(port);
spin_unlock(&br->multicast_lock);
}
@@ -1994,8 +1999,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
{
- int err = 0;
struct net_bridge_mdb_htable *mdb;
+ struct net_bridge_port *port;
+ int err = 0;
spin_lock_bh(&br->multicast_lock);
if (br->multicast_disabled == !val)
@@ -2023,10 +2029,9 @@ rollback:
goto rollback;
}
- br_multicast_start_querier(br, &br->ip4_own_query);
-#if IS_ENABLED(CONFIG_IPV6)
- br_multicast_start_querier(br, &br->ip6_own_query);
-#endif
+ br_multicast_open(br);
+ list_for_each_entry(port, &br->port_list, list)
+ __br_multicast_enable_port(port);
unlock:
spin_unlock_bh(&br->multicast_lock);
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 00d2601407c5..1a7c9a79a53c 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -26,7 +26,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
while (got < num_pages) {
rc = get_user_pages_unlocked(
(unsigned long)data + ((unsigned long)got * PAGE_SIZE),
- num_pages - got, write_page, 0, pages + got);
+ num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
if (rc < 0)
break;
BUG_ON(rc == 0);
diff --git a/net/core/dev.c b/net/core/dev.c
index 6aa43cd8cbb5..8341dadf5e94 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2899,6 +2899,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
}
return head;
}
+EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
static void qdisc_pkt_len_init(struct sk_buff *skb)
{
@@ -3709,7 +3710,7 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
-static void net_tx_action(struct softirq_action *h)
+static __latent_entropy void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -4375,6 +4376,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->flush = 0;
NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->encap_mark = 0;
+ NAPI_GRO_CB(skb)->recursion_counter = 0;
NAPI_GRO_CB(skb)->is_fou = 0;
NAPI_GRO_CB(skb)->is_atomic = 1;
NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
@@ -5062,7 +5064,7 @@ out_unlock:
return work;
}
-static void net_rx_action(struct softirq_action *h)
+static __latent_entropy void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 0cc607d05fc8..87e01815ec85 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -246,15 +246,13 @@ ipv6:
case htons(ETH_P_8021AD):
case htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
+ struct vlan_hdr _vlan;
+ bool vlan_tag_present = skb && skb_vlan_tag_present(skb);
- if (skb_vlan_tag_present(skb))
+ if (vlan_tag_present)
proto = skb->protocol;
- if (!skb_vlan_tag_present(skb) ||
- proto == cpu_to_be16(ETH_P_8021Q) ||
- proto == cpu_to_be16(ETH_P_8021AD)) {
- struct vlan_hdr _vlan;
-
+ if (!vlan_tag_present || eth_type_vlan(skb->protocol)) {
vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
data, hlen, &_vlan);
if (!vlan)
@@ -272,7 +270,7 @@ ipv6:
FLOW_DISSECTOR_KEY_VLAN,
target_container);
- if (skb_vlan_tag_present(skb)) {
+ if (vlan_tag_present) {
key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
key_vlan->vlan_priority =
(skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index b9243b14af17..1309d78e2a64 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -215,13 +215,14 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id);
*/
int peernet2id_alloc(struct net *net, struct net *peer)
{
+ unsigned long flags;
bool alloc;
int id;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
alloc = atomic_read(&peer->count) == 0 ? false : true;
id = __peernet2id_alloc(net, peer, &alloc);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
if (alloc && id >= 0)
rtnl_net_notifyid(net, RTM_NEWNSID, id);
return id;
@@ -230,11 +231,12 @@ int peernet2id_alloc(struct net *net, struct net *peer)
/* This function returns, if assigned, the id of a peer netns. */
int peernet2id(struct net *net, struct net *peer)
{
+ unsigned long flags;
int id;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
id = __peernet2id(net, peer);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
return id;
}
EXPORT_SYMBOL(peernet2id);
@@ -249,17 +251,18 @@ bool peernet_has_id(struct net *net, struct net *peer)
struct net *get_net_ns_by_id(struct net *net, int id)
{
+ unsigned long flags;
struct net *peer;
if (id < 0)
return NULL;
rcu_read_lock();
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
peer = idr_find(&net->netns_ids, id);
if (peer)
get_net(peer);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
rcu_read_unlock();
return peer;
@@ -429,17 +432,17 @@ static void cleanup_net(struct work_struct *work)
for_each_net(tmp) {
int id;
- spin_lock_bh(&tmp->nsid_lock);
+ spin_lock_irq(&tmp->nsid_lock);
id = __peernet2id(tmp, net);
if (id >= 0)
idr_remove(&tmp->netns_ids, id);
- spin_unlock_bh(&tmp->nsid_lock);
+ spin_unlock_irq(&tmp->nsid_lock);
if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id);
}
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irq(&net->nsid_lock);
idr_destroy(&net->netns_ids);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irq(&net->nsid_lock);
}
rtnl_unlock();
@@ -568,6 +571,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
+ unsigned long flags;
struct net *peer;
int nsid, err;
@@ -588,15 +592,15 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
if (IS_ERR(peer))
return PTR_ERR(peer);
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
if (__peernet2id(net, peer) >= 0) {
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
err = -EEXIST;
goto out;
}
err = alloc_netid(net, peer, nsid);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err);
err = 0;
@@ -718,10 +722,11 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
.idx = 0,
.s_idx = cb->args[0],
};
+ unsigned long flags;
- spin_lock_bh(&net->nsid_lock);
+ spin_lock_irqsave(&net->nsid_lock, flags);
idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
- spin_unlock_bh(&net->nsid_lock);
+ spin_unlock_irqrestore(&net->nsid_lock, flags);
cb->args[0] = net_cb.idx;
return skb->len;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 5219a9e2127a..306b8f0e03c1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -216,8 +216,8 @@
#define M_QUEUE_XMIT 2 /* Inject packet into qdisc */
/* If lock -- protects updating of if_list */
-#define if_lock(t) spin_lock(&(t->if_lock));
-#define if_unlock(t) spin_unlock(&(t->if_lock));
+#define if_lock(t) mutex_lock(&(t->if_lock));
+#define if_unlock(t) mutex_unlock(&(t->if_lock));
/* Used to help with determining the pkts on receive */
#define PKTGEN_MAGIC 0xbe9be955
@@ -423,7 +423,7 @@ struct pktgen_net {
};
struct pktgen_thread {
- spinlock_t if_lock; /* for list of devices */
+ struct mutex if_lock; /* for list of devices */
struct list_head if_list; /* All device here */
struct list_head th_list;
struct task_struct *tsk;
@@ -2010,11 +2010,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
{
struct pktgen_thread *t;
+ mutex_lock(&pktgen_thread_lock);
+
list_for_each_entry(t, &pn->pktgen_threads, th_list) {
struct pktgen_dev *pkt_dev;
- rcu_read_lock();
- list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
+ if_lock(t);
+ list_for_each_entry(pkt_dev, &t->if_list, list) {
if (pkt_dev->odev != dev)
continue;
@@ -2029,8 +2031,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
dev->name);
break;
}
- rcu_read_unlock();
+ if_unlock(t);
}
+ mutex_unlock(&pktgen_thread_lock);
}
static int pktgen_device_event(struct notifier_block *unused,
@@ -3762,7 +3765,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
return -ENOMEM;
}
- spin_lock_init(&t->if_lock);
+ mutex_init(&t->if_lock);
t->cpu = cpu;
INIT_LIST_HEAD(&t->if_list);
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index e92b759d906c..9a1a352fd1eb 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -129,7 +129,6 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
return 0;
}
-EXPORT_SYMBOL(reuseport_add_sock);
static void reuseport_free_rcu(struct rcu_head *head)
{
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f983c102ebe3..d9e2fe1da724 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -440,7 +440,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
skb_gro_pull(skb, sizeof(*eh));
skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 5ee1d43f1310..4ebe2aa3e7d3 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -300,10 +300,6 @@ static void hsr_forward_do(struct hsr_frame_info *frame)
static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
struct hsr_frame_info *frame)
{
- struct net_device *master_dev;
-
- master_dev = hsr_port_get_hsr(hsr, HSR_PT_MASTER)->dev;
-
if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
frame->is_local_exclusive = true;
skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1effc986739e..9648c97e541f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1391,7 +1391,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 6cb57bb8692d..805f6607f8d9 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -249,7 +249,7 @@ static struct sk_buff **fou_gro_receive(struct sock *sk,
if (!ops || !ops->callbacks.gro_receive)
goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
@@ -441,7 +441,7 @@ next_proto:
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
goto out_unlock;
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 96e0efecefa6..d5cac99170b1 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -229,7 +229,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
skb_gro_postpull_rcsum(skb, greh, grehlen);
- pp = ptype->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
out_unlock:
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 77c20a489218..ca97835bfec4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -25,6 +25,7 @@
#include <net/inet_hashtables.h>
#include <net/secure_seq.h>
#include <net/ip.h>
+#include <net/tcp.h>
#include <net/sock_reuseport.h>
static u32 inet_ehashfn(const struct net *net, const __be32 laddr,
@@ -172,7 +173,7 @@ EXPORT_SYMBOL_GPL(__inet_inherit_port);
static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum, const __be32 daddr,
- const int dif)
+ const int dif, bool exact_dif)
{
int score = -1;
struct inet_sock *inet = inet_sk(sk);
@@ -186,7 +187,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
return -1;
score += 4;
}
- if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if || exact_dif) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 4;
@@ -215,11 +216,12 @@ struct sock *__inet_lookup_listener(struct net *net,
unsigned int hash = inet_lhashfn(net, hnum);
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
int score, hiscore = 0, matches = 0, reuseport = 0;
+ bool exact_dif = inet_exact_dif_match(net, skb);
struct sock *sk, *result = NULL;
u32 phash = 0;
sk_for_each_rcu(sk, &ilb->head) {
- score = compute_score(sk, net, hnum, daddr, dif);
+ score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
if (score > hiscore) {
reuseport = sk->sk_reuseport;
if (reuseport) {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 05d105832bdb..03e7f7310423 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -538,7 +538,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
{
struct iphdr *iph;
int ptr;
- struct net_device *dev;
struct sk_buff *skb2;
unsigned int mtu, hlen, left, len, ll_rs;
int offset;
@@ -546,8 +545,6 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct rtable *rt = skb_rtable(skb);
int err = 0;
- dev = rt->dst.dev;
-
/* for offloaded checksums cleanup checksum before fragmentation */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(err = skb_checksum_help(skb)))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index af4919792b6a..b8a2d63d1fb8 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
}
static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
- int offset)
+ int tlen, int offset)
{
__wsum csum = skb->csum;
@@ -106,8 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
return;
if (offset != 0)
- csum = csum_sub(csum, csum_partial(skb_transport_header(skb),
- offset, 0));
+ csum = csum_sub(csum,
+ csum_partial(skb_transport_header(skb) + tlen,
+ offset, 0));
put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
}
@@ -153,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
}
void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
- int offset)
+ int tlen, int offset)
{
struct inet_sock *inet = inet_sk(skb->sk);
unsigned int flags = inet->cmsg_flags;
@@ -216,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
}
if (flags & IP_CMSG_CHECKSUM)
- ip_cmsg_recv_checksum(msg, skb, offset);
+ ip_cmsg_recv_checksum(msg, skb, tlen, offset);
}
EXPORT_SYMBOL(ip_cmsg_recv_offset);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 7cf7d6e380c2..205e2000d395 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -994,7 +994,7 @@ struct proto ping_prot = {
.init = ping_init_sock,
.close = ping_close,
.connect = ip4_datagram_connect,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.sendmsg = ping_v4_sendmsg,
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 03618ed03532..d78d738fb172 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -934,7 +934,7 @@ struct proto raw_prot = {
.close = raw_close,
.destroy = raw_destroy,
.connect = ip4_datagram_connect,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.ioctl = raw_ioctl,
.init = raw_init,
.setsockopt = raw_setsockopt,
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1cb67de106fe..80bc36b25de2 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -96,11 +96,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
container_of(table->data, struct net, ipv4.ping_group_range.range);
unsigned int seq;
do {
- seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
+ seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
*low = data[0];
*high = data[1];
- } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
+ } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
}
/* Update system visible IP port range */
@@ -109,10 +109,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
kgid_t *data = table->data;
struct net *net =
container_of(table->data, struct net, ipv4.ping_group_range.range);
- write_seqlock(&net->ipv4.ip_local_ports.lock);
+ write_seqlock(&net->ipv4.ping_group_range.lock);
data[0] = low;
data[1] = high;
- write_sequnlock(&net->ipv4.ip_local_ports.lock);
+ write_sequnlock(&net->ipv4.ping_group_range.lock);
}
/* Validate changes from /proc interface. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 83b3d0b8c481..b9b8282633d4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -86,7 +86,6 @@
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
-EXPORT_SYMBOL(sysctl_tcp_low_latency);
#ifdef CONFIG_TCP_MD5SIG
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
@@ -1887,7 +1886,6 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
struct inet_listen_hashbucket *ilb;
- struct inet_connection_sock *icsk;
struct sock *sk = cur;
if (!sk) {
@@ -1909,7 +1907,6 @@ get_sk:
continue;
if (sk->sk_family == st->family)
return sk;
- icsk = inet_csk(sk);
}
spin_unlock(&ilb->lock);
st->offset = 0;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index c8332715ee2d..195992e0440d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1420,7 +1420,7 @@ try_again:
*addr_len = sizeof(*sin);
}
if (inet->cmsg_flags)
- ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr) + off);
+ ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
err = copied;
if (flags & MSG_TRUNC)
@@ -1442,7 +1442,7 @@ csum_copy_err:
goto try_again;
}
-int udp_disconnect(struct sock *sk, int flags)
+int __udp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
/*
@@ -1464,6 +1464,15 @@ int udp_disconnect(struct sock *sk, int flags)
sk_dst_reset(sk);
return 0;
}
+EXPORT_SYMBOL(__udp_disconnect);
+
+int udp_disconnect(struct sock *sk, int flags)
+{
+ lock_sock(sk);
+ __udp_disconnect(sk, flags);
+ release_sock(sk);
+ return 0;
+}
EXPORT_SYMBOL(udp_disconnect);
void udp_lib_unhash(struct sock *sk)
@@ -2272,7 +2281,7 @@ int udp_abort(struct sock *sk, int err)
sk->sk_err = err;
sk->sk_error_report(sk);
- udp_disconnect(sk, 0);
+ __udp_disconnect(sk, 0);
release_sock(sk);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index f9333c963607..b2be1d9757ef 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -295,7 +295,7 @@ unflush:
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
- pp = udp_sk(sk)->gro_receive(sk, head, skb);
+ pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d8983e15f859..060dd9922018 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -147,9 +147,8 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
}
#endif
-static void __ipv6_regen_rndid(struct inet6_dev *idev);
-static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
-static void ipv6_regen_rndid(unsigned long data);
+static void ipv6_regen_rndid(struct inet6_dev *idev);
+static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
static int ipv6_count_addresses(struct inet6_dev *idev);
@@ -409,9 +408,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
goto err_release;
}
- /* One reference from device. We must do this before
- * we invoke __ipv6_regen_rndid().
- */
+ /* One reference from device. */
in6_dev_hold(ndev);
if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
@@ -425,17 +422,15 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
#endif
INIT_LIST_HEAD(&ndev->tempaddr_list);
- setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
+ ndev->desync_factor = U32_MAX;
if ((dev->flags&IFF_LOOPBACK) ||
dev->type == ARPHRD_TUNNEL ||
dev->type == ARPHRD_TUNNEL6 ||
dev->type == ARPHRD_SIT ||
dev->type == ARPHRD_NONE) {
ndev->cnf.use_tempaddr = -1;
- } else {
- in6_dev_hold(ndev);
- ipv6_regen_rndid((unsigned long) ndev);
- }
+ } else
+ ipv6_regen_rndid(ndev);
ndev->token = in6addr_any;
@@ -447,7 +442,6 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
err = addrconf_sysctl_register(ndev);
if (err) {
ipv6_mc_destroy_dev(ndev);
- del_timer(&ndev->regen_timer);
snmp6_unregister_dev(ndev);
goto err_release;
}
@@ -1190,6 +1184,8 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
int ret = 0;
u32 addr_flags;
unsigned long now = jiffies;
+ long max_desync_factor;
+ s32 cnf_temp_preferred_lft;
write_lock_bh(&idev->lock);
if (ift) {
@@ -1222,23 +1218,42 @@ retry:
}
in6_ifa_hold(ifp);
memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
- __ipv6_try_regen_rndid(idev, tmpaddr);
+ ipv6_try_regen_rndid(idev, tmpaddr);
memcpy(&addr.s6_addr[8], idev->rndid, 8);
age = (now - ifp->tstamp) / HZ;
+
+ regen_advance = idev->cnf.regen_max_retry *
+ idev->cnf.dad_transmits *
+ NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
+
+ /* recalculate max_desync_factor each time and update
+ * idev->desync_factor if it's larger
+ */
+ cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
+ max_desync_factor = min_t(__u32,
+ idev->cnf.max_desync_factor,
+ cnf_temp_preferred_lft - regen_advance);
+
+ if (unlikely(idev->desync_factor > max_desync_factor)) {
+ if (max_desync_factor > 0) {
+ get_random_bytes(&idev->desync_factor,
+ sizeof(idev->desync_factor));
+ idev->desync_factor %= max_desync_factor;
+ } else {
+ idev->desync_factor = 0;
+ }
+ }
+
tmp_valid_lft = min_t(__u32,
ifp->valid_lft,
idev->cnf.temp_valid_lft + age);
- tmp_prefered_lft = min_t(__u32,
- ifp->prefered_lft,
- idev->cnf.temp_prefered_lft + age -
- idev->cnf.max_desync_factor);
+ tmp_prefered_lft = cnf_temp_preferred_lft + age -
+ idev->desync_factor;
+ tmp_prefered_lft = min_t(__u32, ifp->prefered_lft, tmp_prefered_lft);
tmp_plen = ifp->prefix_len;
tmp_tstamp = ifp->tstamp;
spin_unlock_bh(&ifp->lock);
- regen_advance = idev->cnf.regen_max_retry *
- idev->cnf.dad_transmits *
- NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
write_unlock_bh(&idev->lock);
/* A temporary address is created only if this calculated Preferred
@@ -2150,7 +2165,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
}
/* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
-static void __ipv6_regen_rndid(struct inet6_dev *idev)
+static void ipv6_regen_rndid(struct inet6_dev *idev)
{
regen:
get_random_bytes(idev->rndid, sizeof(idev->rndid));
@@ -2179,43 +2194,10 @@ regen:
}
}
-static void ipv6_regen_rndid(unsigned long data)
-{
- struct inet6_dev *idev = (struct inet6_dev *) data;
- unsigned long expires;
-
- rcu_read_lock_bh();
- write_lock_bh(&idev->lock);
-
- if (idev->dead)
- goto out;
-
- __ipv6_regen_rndid(idev);
-
- expires = jiffies +
- idev->cnf.temp_prefered_lft * HZ -
- idev->cnf.regen_max_retry * idev->cnf.dad_transmits *
- NEIGH_VAR(idev->nd_parms, RETRANS_TIME) -
- idev->cnf.max_desync_factor * HZ;
- if (time_before(expires, jiffies)) {
- pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
- __func__, idev->dev->name);
- goto out;
- }
-
- if (!mod_timer(&idev->regen_timer, expires))
- in6_dev_hold(idev);
-
-out:
- write_unlock_bh(&idev->lock);
- rcu_read_unlock_bh();
- in6_dev_put(idev);
-}
-
-static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
+static void ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
{
if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
- __ipv6_regen_rndid(idev);
+ ipv6_regen_rndid(idev);
}
/*
@@ -2356,7 +2338,7 @@ static void manage_tempaddrs(struct inet6_dev *idev,
max_valid = 0;
max_prefered = idev->cnf.temp_prefered_lft -
- idev->cnf.max_desync_factor - age;
+ idev->desync_factor - age;
if (max_prefered < 0)
max_prefered = 0;
@@ -3018,7 +3000,7 @@ static void init_loopback(struct net_device *dev)
* lo device down, release this obsolete dst and
* reallocate a new router for ifa.
*/
- if (sp_ifa->rt->dst.obsolete > 0) {
+ if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
ip6_rt_put(sp_ifa->rt);
sp_ifa->rt = NULL;
} else {
@@ -3594,9 +3576,6 @@ restart:
if (!how)
idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
- if (how && del_timer(&idev->regen_timer))
- in6_dev_put(idev);
-
/* Step 3: clear tempaddr list */
while (!list_empty(&idev->tempaddr_list)) {
ifa = list_first_entry(&idev->tempaddr_list,
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 00cf28ad4565..02761c9fe43e 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -96,7 +96,7 @@ EXPORT_SYMBOL(__inet6_lookup_established);
static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum,
const struct in6_addr *daddr,
- const int dif)
+ const int dif, bool exact_dif)
{
int score = -1;
@@ -109,7 +109,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
return -1;
score++;
}
- if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if || exact_dif) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
@@ -131,11 +131,12 @@ struct sock *inet6_lookup_listener(struct net *net,
unsigned int hash = inet_lhashfn(net, hnum);
struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
int score, hiscore = 0, matches = 0, reuseport = 0;
+ bool exact_dif = inet6_exact_dif_match(net, skb);
struct sock *sk, *result = NULL;
u32 phash = 0;
sk_for_each(sk, &ilb->head) {
- score = compute_score(sk, net, hnum, daddr, dif);
+ score = compute_score(sk, net, hnum, daddr, dif, exact_dif);
if (score > hiscore) {
reuseport = sk->sk_reuseport;
if (reuseport) {
@@ -263,13 +264,15 @@ EXPORT_SYMBOL_GPL(inet6_hash_connect);
int inet6_hash(struct sock *sk)
{
+ int err = 0;
+
if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
- __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
+ err = __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
local_bh_enable();
}
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(inet6_hash);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e7bfd55899a3..1fcf61f1cbc3 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -246,7 +246,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
skb_gro_postpull_rcsum(skb, iph, nlen);
- pp = ops->callbacks.gro_receive(head, skb);
+ pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3a70567846aa..03e050d22508 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -157,6 +157,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
hash = HASH(&any, local);
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
+ ipv6_addr_any(&t->parms.raddr) &&
(t->dev->flags & IFF_UP))
return t;
}
@@ -164,6 +165,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
hash = HASH(remote, &any);
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(remote, &t->parms.raddr) &&
+ ipv6_addr_any(&t->parms.laddr) &&
(t->dev->flags & IFF_UP))
return t;
}
@@ -1170,6 +1172,7 @@ route_lookup:
if (err)
return err;
+ skb->protocol = htons(ETH_P_IPV6);
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 5330262ab673..636ec56f5f50 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -120,6 +120,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
static bool setsockopt_needs_rtnl(int optname)
{
switch (optname) {
+ case IPV6_ADDRFORM:
case IPV6_ADD_MEMBERSHIP:
case IPV6_DROP_MEMBERSHIP:
case IPV6_JOIN_ANYCAST:
@@ -198,7 +199,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
}
fl6_free_socklist(sk);
- ipv6_sock_mc_close(sk);
+ __ipv6_sock_mc_close(sk);
/*
* Sock is moving from IPv6 to IPv4 (sk_prot), so
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 75c1fc54f188..14a3903f1c82 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -276,16 +276,14 @@ static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
return idev;
}
-void ipv6_sock_mc_close(struct sock *sk)
+void __ipv6_sock_mc_close(struct sock *sk)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_mc_socklist *mc_lst;
struct net *net = sock_net(sk);
- if (!rcu_access_pointer(np->ipv6_mc_list))
- return;
+ ASSERT_RTNL();
- rtnl_lock();
while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
struct net_device *dev;
@@ -303,8 +301,17 @@ void ipv6_sock_mc_close(struct sock *sk)
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
kfree_rcu(mc_lst, rcu);
-
}
+}
+
+void ipv6_sock_mc_close(struct sock *sk)
+{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ if (!rcu_access_pointer(np->ipv6_mc_list))
+ return;
+ rtnl_lock();
+ __ipv6_sock_mc_close(sk);
rtnl_unlock();
}
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 0e983b694ee8..66e2d9dfc43a 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -180,7 +180,7 @@ struct proto pingv6_prot = {
.init = ping_init_sock,
.close = ping_close,
.connect = ip6_datagram_connect_v6_only,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
.sendmsg = ping_v6_sendmsg,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d7e8b955ade8..610e09354b2e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1243,7 +1243,7 @@ struct proto rawv6_prot = {
.close = rawv6_close,
.destroy = raw6_destroy,
.connect = ip6_datagram_connect_v6_only,
- .disconnect = udp_disconnect,
+ .disconnect = __udp_disconnect,
.ioctl = rawv6_ioctl,
.init = rawv6_init_sk,
.setsockopt = rawv6_setsockopt,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 2160d5d009cb..3815e8505ed2 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -456,7 +456,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
memmove(head->head + sizeof(struct frag_hdr), head->head,
(head->data - head->head) - sizeof(struct frag_hdr));
- head->mac_header += sizeof(struct frag_hdr);
+ if (skb_mac_header_was_set(head))
+ head->mac_header += sizeof(struct frag_hdr);
head->network_header += sizeof(struct frag_hdr);
skb_reset_transport_header(head);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index bdbc38e8bf29..947ed1ded026 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -102,11 +102,13 @@ static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_add_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex,
+ const struct in6_addr *gwaddr,
+ struct net_device *dev,
unsigned int pref);
static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex);
+ const struct in6_addr *gwaddr,
+ struct net_device *dev);
#endif
struct uncached_list {
@@ -656,7 +658,8 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
struct net_device *dev = rt->dst.dev;
if (dev && !netif_carrier_ok(dev) &&
- idev->cnf.ignore_routes_with_linkdown)
+ idev->cnf.ignore_routes_with_linkdown &&
+ !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
goto out;
if (rt6_check_expired(rt))
@@ -803,7 +806,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
rt = rt6_get_dflt_router(gwaddr, dev);
else
rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
- gwaddr, dev->ifindex);
+ gwaddr, dev);
if (rt && !lifetime) {
ip6_del_rt(rt);
@@ -811,8 +814,8 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
}
if (!rt && lifetime)
- rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
- pref);
+ rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
+ dev, pref);
else if (rt)
rt->rt6i_flags = RTF_ROUTEINFO |
(rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
@@ -1050,6 +1053,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
int strict = 0;
strict |= flags & RT6_LOOKUP_F_IFACE;
+ strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
if (net->ipv6.devconf_all->forwarding == 0)
strict |= RT6_LOOKUP_F_REACHABLE;
@@ -1789,7 +1793,7 @@ static struct rt6_info *ip6_nh_lookup_table(struct net *net,
};
struct fib6_table *table;
struct rt6_info *rt;
- int flags = RT6_LOOKUP_F_IFACE;
+ int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
table = fib6_get_table(net, cfg->fc_table);
if (!table)
@@ -2325,13 +2329,16 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex)
+ const struct in6_addr *gwaddr,
+ struct net_device *dev)
{
+ u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
+ int ifindex = dev->ifindex;
struct fib6_node *fn;
struct rt6_info *rt = NULL;
struct fib6_table *table;
- table = fib6_get_table(net, RT6_TABLE_INFO);
+ table = fib6_get_table(net, tb_id);
if (!table)
return NULL;
@@ -2357,12 +2364,13 @@ out:
static struct rt6_info *rt6_add_route_info(struct net *net,
const struct in6_addr *prefix, int prefixlen,
- const struct in6_addr *gwaddr, int ifindex,
+ const struct in6_addr *gwaddr,
+ struct net_device *dev,
unsigned int pref)
{
struct fib6_config cfg = {
.fc_metric = IP6_RT_PRIO_USER,
- .fc_ifindex = ifindex,
+ .fc_ifindex = dev->ifindex,
.fc_dst_len = prefixlen,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
RTF_UP | RTF_PREF(pref),
@@ -2371,7 +2379,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
.fc_nlinfo.nl_net = net,
};
- cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
+ cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
cfg.fc_dst = *prefix;
cfg.fc_gateway = *gwaddr;
@@ -2381,16 +2389,17 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
ip6_route_add(&cfg);
- return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
+ return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
}
#endif
struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
{
+ u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
struct rt6_info *rt;
struct fib6_table *table;
- table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
+ table = fib6_get_table(dev_net(dev), tb_id);
if (!table)
return NULL;
@@ -2424,20 +2433,20 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
cfg.fc_gateway = *gwaddr;
- ip6_route_add(&cfg);
+ if (!ip6_route_add(&cfg)) {
+ struct fib6_table *table;
+
+ table = fib6_get_table(dev_net(dev), cfg.fc_table);
+ if (table)
+ table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
+ }
return rt6_get_dflt_router(gwaddr, dev);
}
-void rt6_purge_dflt_routers(struct net *net)
+static void __rt6_purge_dflt_routers(struct fib6_table *table)
{
struct rt6_info *rt;
- struct fib6_table *table;
-
- /* NOTE: Keep consistent with rt6_get_dflt_router */
- table = fib6_get_table(net, RT6_TABLE_DFLT);
- if (!table)
- return;
restart:
read_lock_bh(&table->tb6_lock);
@@ -2451,6 +2460,27 @@ restart:
}
}
read_unlock_bh(&table->tb6_lock);
+
+ table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
+}
+
+void rt6_purge_dflt_routers(struct net *net)
+{
+ struct fib6_table *table;
+ struct hlist_head *head;
+ unsigned int h;
+
+ rcu_read_lock();
+
+ for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
+ head = &net->ipv6.fib_table_hash[h];
+ hlist_for_each_entry_rcu(table, head, tb6_hlist) {
+ if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
+ __rt6_purge_dflt_routers(table);
+ }
+ }
+
+ rcu_read_unlock();
}
static void rtmsg_to_fib6_config(struct net *net,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 71963b23d5a5..a7700bbf6788 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -425,7 +425,8 @@ try_again:
if (is_udp4) {
if (inet->cmsg_flags)
- ip_cmsg_recv(msg, skb);
+ ip_cmsg_recv_offset(msg, skb,
+ sizeof(struct udphdr), off);
} else {
if (np->rxopt.all)
ip6_datagram_recv_specific_ctl(sk, msg, skb);
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 42de4ccd159f..fce25afb652a 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -338,7 +338,7 @@ static int l2tp_ip_disconnect(struct sock *sk, int flags)
if (sock_flag(sk, SOCK_ZAPPED))
return 0;
- return udp_disconnect(sk, flags);
+ return __udp_disconnect(sk, flags);
}
static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index ea2ae6664cc8..ad3468c32b53 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -410,7 +410,7 @@ static int l2tp_ip6_disconnect(struct sock *sk, int flags)
if (sock_flag(sk, SOCK_ZAPPED))
return 0;
- return udp_disconnect(sk, flags);
+ return __udp_disconnect(sk, flags);
}
static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 7663c28ba353..a4e0d59a40dd 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -18,21 +18,24 @@
#include "key.h"
#include "aes_ccm.h"
-void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic,
- size_t mic_len)
+int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic,
+ size_t mic_len)
{
struct scatterlist sg[3];
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ u8 *__aad;
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *) aead_req_data;
+ aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
- memset(aead_req, 0, sizeof(aead_req_data));
+ __aad = (u8 *)aead_req + reqsize;
+ memcpy(__aad, aad, CCM_AAD_LEN);
sg_init_table(sg, 3);
- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, mic_len);
@@ -41,6 +44,9 @@ void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
aead_request_set_ad(aead_req, sg[0].length);
crypto_aead_encrypt(aead_req);
+ kzfree(aead_req);
+
+ return 0;
}
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
@@ -48,18 +54,23 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
size_t mic_len)
{
struct scatterlist sg[3];
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *) aead_req_data;
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ u8 *__aad;
+ int err;
if (data_len == 0)
return -EINVAL;
- memset(aead_req, 0, sizeof(aead_req_data));
+ aead_req = kzalloc(reqsize + CCM_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
+
+ __aad = (u8 *)aead_req + reqsize;
+ memcpy(__aad, aad, CCM_AAD_LEN);
sg_init_table(sg, 3);
- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, mic_len);
@@ -67,7 +78,10 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0);
aead_request_set_ad(aead_req, sg[0].length);
- return crypto_aead_decrypt(aead_req);
+ err = crypto_aead_decrypt(aead_req);
+ kzfree(aead_req);
+
+ return err;
}
struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
index 6a73d1e4d186..fcd3254c5cf0 100644
--- a/net/mac80211/aes_ccm.h
+++ b/net/mac80211/aes_ccm.h
@@ -12,12 +12,14 @@
#include <linux/crypto.h>
+#define CCM_AAD_LEN 32
+
struct crypto_aead *ieee80211_aes_key_setup_encrypt(const u8 key[],
size_t key_len,
size_t mic_len);
-void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic,
- size_t mic_len);
+int ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic,
+ size_t mic_len);
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
u8 *data, size_t data_len, u8 *mic,
size_t mic_len);
diff --git a/net/mac80211/aes_gcm.c b/net/mac80211/aes_gcm.c
index 3afe361fd27c..8a4397cc1b08 100644
--- a/net/mac80211/aes_gcm.c
+++ b/net/mac80211/aes_gcm.c
@@ -15,20 +15,23 @@
#include "key.h"
#include "aes_gcm.h"
-void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic)
+int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic)
{
struct scatterlist sg[3];
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ u8 *__aad;
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *)aead_req_data;
+ aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
- memset(aead_req, 0, sizeof(aead_req_data));
+ __aad = (u8 *)aead_req + reqsize;
+ memcpy(__aad, aad, GCM_AAD_LEN);
sg_init_table(sg, 3);
- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
@@ -37,24 +40,31 @@ void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
aead_request_set_ad(aead_req, sg[0].length);
crypto_aead_encrypt(aead_req);
+ kzfree(aead_req);
+ return 0;
}
int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
u8 *data, size_t data_len, u8 *mic)
{
struct scatterlist sg[3];
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *)aead_req_data;
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
+ u8 *__aad;
+ int err;
if (data_len == 0)
return -EINVAL;
- memset(aead_req, 0, sizeof(aead_req_data));
+ aead_req = kzalloc(reqsize + GCM_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
+
+ __aad = (u8 *)aead_req + reqsize;
+ memcpy(__aad, aad, GCM_AAD_LEN);
sg_init_table(sg, 3);
- sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
+ sg_set_buf(&sg[0], &__aad[2], be16_to_cpup((__be16 *)__aad));
sg_set_buf(&sg[1], data, data_len);
sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);
@@ -63,7 +73,10 @@ int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
data_len + IEEE80211_GCMP_MIC_LEN, j_0);
aead_request_set_ad(aead_req, sg[0].length);
- return crypto_aead_decrypt(aead_req);
+ err = crypto_aead_decrypt(aead_req);
+ kzfree(aead_req);
+
+ return err;
}
struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_gcm.h b/net/mac80211/aes_gcm.h
index 1347fda6b76a..55aed5352494 100644
--- a/net/mac80211/aes_gcm.h
+++ b/net/mac80211/aes_gcm.h
@@ -11,8 +11,10 @@
#include <linux/crypto.h>
-void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
- u8 *data, size_t data_len, u8 *mic);
+#define GCM_AAD_LEN 32
+
+int ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
+ u8 *data, size_t data_len, u8 *mic);
int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
u8 *data, size_t data_len, u8 *mic);
struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
diff --git a/net/mac80211/aes_gmac.c b/net/mac80211/aes_gmac.c
index 3ddd927aaf30..bd72a862ddb7 100644
--- a/net/mac80211/aes_gmac.c
+++ b/net/mac80211/aes_gmac.c
@@ -17,28 +17,27 @@
#include "key.h"
#include "aes_gmac.h"
-#define GMAC_MIC_LEN 16
-#define GMAC_NONCE_LEN 12
-#define AAD_LEN 20
-
int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
const u8 *data, size_t data_len, u8 *mic)
{
struct scatterlist sg[4];
- char aead_req_data[sizeof(struct aead_request) +
- crypto_aead_reqsize(tfm)]
- __aligned(__alignof__(struct aead_request));
- struct aead_request *aead_req = (void *)aead_req_data;
- u8 zero[GMAC_MIC_LEN], iv[AES_BLOCK_SIZE];
+ u8 *zero, *__aad, iv[AES_BLOCK_SIZE];
+ struct aead_request *aead_req;
+ int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
if (data_len < GMAC_MIC_LEN)
return -EINVAL;
- memset(aead_req, 0, sizeof(aead_req_data));
+ aead_req = kzalloc(reqsize + GMAC_MIC_LEN + GMAC_AAD_LEN, GFP_ATOMIC);
+ if (!aead_req)
+ return -ENOMEM;
+
+ zero = (u8 *)aead_req + reqsize;
+ __aad = zero + GMAC_MIC_LEN;
+ memcpy(__aad, aad, GMAC_AAD_LEN);
- memset(zero, 0, GMAC_MIC_LEN);
sg_init_table(sg, 4);
- sg_set_buf(&sg[0], aad, AAD_LEN);
+ sg_set_buf(&sg[0], __aad, GMAC_AAD_LEN);
sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN);
sg_set_buf(&sg[2], zero, GMAC_MIC_LEN);
sg_set_buf(&sg[3], mic, GMAC_MIC_LEN);
@@ -49,9 +48,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
aead_request_set_tfm(aead_req, tfm);
aead_request_set_crypt(aead_req, sg, sg, 0, iv);
- aead_request_set_ad(aead_req, AAD_LEN + data_len);
+ aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len);
crypto_aead_encrypt(aead_req);
+ kzfree(aead_req);
return 0;
}
diff --git a/net/mac80211/aes_gmac.h b/net/mac80211/aes_gmac.h
index d328204d73a8..32e6442c95be 100644
--- a/net/mac80211/aes_gmac.h
+++ b/net/mac80211/aes_gmac.h
@@ -11,6 +11,10 @@
#include <linux/crypto.h>
+#define GMAC_AAD_LEN 20
+#define GMAC_MIC_LEN 16
+#define GMAC_NONCE_LEN 12
+
struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
size_t key_len);
int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index c3f610bba3fe..eede5c6db8d5 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -820,7 +820,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
break;
rcu_read_lock();
- sta = sta_info_get(sdata, mgmt->da);
+ sta = sta_info_get_bss(sdata, mgmt->da);
rcu_read_unlock();
if (!sta)
return -ENOLINK;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 21a8947651e1..eeab7250f4b9 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2301,6 +2301,8 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
__le16 fc = hdr->frame_control;
struct sk_buff_head frame_list;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+ struct ethhdr ethhdr;
+ const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
if (unlikely(!ieee80211_is_data(fc)))
return RX_CONTINUE;
@@ -2311,24 +2313,53 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (!(status->rx_flags & IEEE80211_RX_AMSDU))
return RX_CONTINUE;
- if (ieee80211_has_a4(hdr->frame_control) &&
- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- !rx->sdata->u.vlan.sta)
- return RX_DROP_UNUSABLE;
+ if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
+ switch (rx->sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ if (!rx->sdata->u.vlan.sta)
+ return RX_DROP_UNUSABLE;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!rx->sdata->u.mgd.use_4addr)
+ return RX_DROP_UNUSABLE;
+ break;
+ default:
+ return RX_DROP_UNUSABLE;
+ }
+ check_da = NULL;
+ check_sa = NULL;
+ } else switch (rx->sdata->vif.type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ check_da = NULL;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (!rx->sta ||
+ !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
+ check_sa = NULL;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ check_sa = NULL;
+ break;
+ default:
+ break;
+ }
- if (is_multicast_ether_addr(hdr->addr1) &&
- ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- rx->sdata->u.vlan.sta) ||
- (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
- rx->sdata->u.mgd.use_4addr)))
+ if (is_multicast_ether_addr(hdr->addr1))
return RX_DROP_UNUSABLE;
skb->dev = dev;
__skb_queue_head_init(&frame_list);
+ if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
+ rx->sdata->vif.addr,
+ rx->sdata->vif.type))
+ return RX_DROP_UNUSABLE;
+
ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
rx->sdata->vif.type,
- rx->local->hw.extra_tx_headroom, true);
+ rx->local->hw.extra_tx_headroom,
+ check_da, check_sa);
while (!skb_queue_empty(&frame_list)) {
rx->skb = __skb_dequeue(&frame_list);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index c24934544f9c..8af6dd388d11 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -405,7 +405,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
u8 *pos;
u8 pn[6];
u64 pn64;
- u8 aad[2 * AES_BLOCK_SIZE];
+ u8 aad[CCM_AAD_LEN];
u8 b_0[AES_BLOCK_SIZE];
if (info->control.hw_key &&
@@ -461,10 +461,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
pos += IEEE80211_CCMP_HDR_LEN;
ccmp_special_blocks(skb, pn, b_0, aad);
- ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
- skb_put(skb, mic_len), mic_len);
-
- return 0;
+ return ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
+ skb_put(skb, mic_len), mic_len);
}
@@ -639,7 +637,7 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
u8 *pos;
u8 pn[6];
u64 pn64;
- u8 aad[2 * AES_BLOCK_SIZE];
+ u8 aad[GCM_AAD_LEN];
u8 j_0[AES_BLOCK_SIZE];
if (info->control.hw_key &&
@@ -696,10 +694,8 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
pos += IEEE80211_GCMP_HDR_LEN;
gcmp_special_blocks(skb, pn, j_0, aad);
- ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
- skb_put(skb, IEEE80211_GCMP_MIC_LEN));
-
- return 0;
+ return ieee80211_aes_gcm_encrypt(key->u.gcmp.tfm, j_0, aad, pos, len,
+ skb_put(skb, IEEE80211_GCMP_MIC_LEN));
}
ieee80211_tx_result
@@ -1123,9 +1119,9 @@ ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx)
struct ieee80211_key *key = tx->key;
struct ieee80211_mmie_16 *mmie;
struct ieee80211_hdr *hdr;
- u8 aad[20];
+ u8 aad[GMAC_AAD_LEN];
u64 pn64;
- u8 nonce[12];
+ u8 nonce[GMAC_NONCE_LEN];
if (WARN_ON(skb_queue_len(&tx->skbs) != 1))
return TX_DROP;
@@ -1171,7 +1167,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx)
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_key *key = rx->key;
struct ieee80211_mmie_16 *mmie;
- u8 aad[20], mic[16], ipn[6], nonce[12];
+ u8 aad[GMAC_AAD_LEN], mic[GMAC_MIC_LEN], ipn[6], nonce[GMAC_NONCE_LEN];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_is_mgmt(hdr->frame_control))
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 13290a70fa71..1308a56f2591 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -246,6 +246,7 @@ enum {
ncsi_dev_state_config_gls,
ncsi_dev_state_config_done,
ncsi_dev_state_suspend_select = 0x0401,
+ ncsi_dev_state_suspend_gls,
ncsi_dev_state_suspend_dcnt,
ncsi_dev_state_suspend_dc,
ncsi_dev_state_suspend_deselect,
@@ -264,6 +265,7 @@ struct ncsi_dev_priv {
#endif
unsigned int package_num; /* Number of packages */
struct list_head packages; /* List of packages */
+ struct ncsi_channel *hot_channel; /* Channel was ever active */
struct ncsi_request requests[256]; /* Request table */
unsigned int request_id; /* Last used request ID */
#define NCSI_REQ_START_IDX 1
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index b41a6617d498..6898e7229285 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -141,23 +141,35 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
return -ENODEV;
/* If the channel is active one, we need reconfigure it */
+ spin_lock_irqsave(&nc->lock, flags);
ncm = &nc->modes[NCSI_MODE_LINK];
hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
ncm->data[3] = ntohl(hncdsc->status);
if (!list_empty(&nc->link) ||
- nc->state != NCSI_CHANNEL_ACTIVE ||
- (ncm->data[3] & 0x1))
+ nc->state != NCSI_CHANNEL_ACTIVE) {
+ spin_unlock_irqrestore(&nc->lock, flags);
return 0;
+ }
- if (ndp->flags & NCSI_DEV_HWA)
+ spin_unlock_irqrestore(&nc->lock, flags);
+ if (!(ndp->flags & NCSI_DEV_HWA) && !(ncm->data[3] & 0x1))
ndp->flags |= NCSI_DEV_RESHUFFLE;
/* If this channel is the active one and the link doesn't
* work, we have to choose another channel to be active one.
* The logic here is exactly similar to what we do when link
* is down on the active channel.
+ *
+ * On the other hand, we need configure it when host driver
+ * state on the active channel becomes ready.
*/
ncsi_stop_channel_monitor(nc);
+
+ spin_lock_irqsave(&nc->lock, flags);
+ nc->state = (ncm->data[3] & 0x1) ? NCSI_CHANNEL_INACTIVE :
+ NCSI_CHANNEL_ACTIVE;
+ spin_unlock_irqrestore(&nc->lock, flags);
+
spin_lock_irqsave(&ndp->lock, flags);
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 5e509e547c2d..a3bd5fa8ad09 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -540,42 +540,86 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_suspend_select;
/* Fall through */
case ncsi_dev_state_suspend_select:
- case ncsi_dev_state_suspend_dcnt:
- case ncsi_dev_state_suspend_dc:
- case ncsi_dev_state_suspend_deselect:
ndp->pending_req_num = 1;
- np = ndp->active_package;
- nc = ndp->active_channel;
+ nca.type = NCSI_PKT_CMD_SP;
nca.package = np->id;
- if (nd->state == ncsi_dev_state_suspend_select) {
- nca.type = NCSI_PKT_CMD_SP;
- nca.channel = NCSI_RESERVED_CHANNEL;
- if (ndp->flags & NCSI_DEV_HWA)
- nca.bytes[0] = 0;
- else
- nca.bytes[0] = 1;
+ nca.channel = NCSI_RESERVED_CHANNEL;
+ if (ndp->flags & NCSI_DEV_HWA)
+ nca.bytes[0] = 0;
+ else
+ nca.bytes[0] = 1;
+
+ /* To retrieve the last link states of channels in current
+ * package when current active channel needs fail over to
+ * another one. It means we will possibly select another
+ * channel as next active one. The link states of channels
+ * are most important factor of the selection. So we need
+ * accurate link states. Unfortunately, the link states on
+ * inactive channels can't be updated with LSC AEN in time.
+ */
+ if (ndp->flags & NCSI_DEV_RESHUFFLE)
+ nd->state = ncsi_dev_state_suspend_gls;
+ else
nd->state = ncsi_dev_state_suspend_dcnt;
- } else if (nd->state == ncsi_dev_state_suspend_dcnt) {
- nca.type = NCSI_PKT_CMD_DCNT;
- nca.channel = nc->id;
- nd->state = ncsi_dev_state_suspend_dc;
- } else if (nd->state == ncsi_dev_state_suspend_dc) {
- nca.type = NCSI_PKT_CMD_DC;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+
+ break;
+ case ncsi_dev_state_suspend_gls:
+ ndp->pending_req_num = np->channel_num;
+
+ nca.type = NCSI_PKT_CMD_GLS;
+ nca.package = np->id;
+
+ nd->state = ncsi_dev_state_suspend_dcnt;
+ NCSI_FOR_EACH_CHANNEL(np, nc) {
nca.channel = nc->id;
- nca.bytes[0] = 1;
- nd->state = ncsi_dev_state_suspend_deselect;
- } else if (nd->state == ncsi_dev_state_suspend_deselect) {
- nca.type = NCSI_PKT_CMD_DP;
- nca.channel = NCSI_RESERVED_CHANNEL;
- nd->state = ncsi_dev_state_suspend_done;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
}
+ break;
+ case ncsi_dev_state_suspend_dcnt:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_DCNT;
+ nca.package = np->id;
+ nca.channel = nc->id;
+
+ nd->state = ncsi_dev_state_suspend_dc;
ret = ncsi_xmit_cmd(&nca);
- if (ret) {
- nd->state = ncsi_dev_state_functional;
- return;
- }
+ if (ret)
+ goto error;
+
+ break;
+ case ncsi_dev_state_suspend_dc:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_DC;
+ nca.package = np->id;
+ nca.channel = nc->id;
+ nca.bytes[0] = 1;
+
+ nd->state = ncsi_dev_state_suspend_deselect;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+
+ break;
+ case ncsi_dev_state_suspend_deselect:
+ ndp->pending_req_num = 1;
+
+ nca.type = NCSI_PKT_CMD_DP;
+ nca.package = np->id;
+ nca.channel = NCSI_RESERVED_CHANNEL;
+
+ nd->state = ncsi_dev_state_suspend_done;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
break;
case ncsi_dev_state_suspend_done:
@@ -589,6 +633,10 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
nd->state);
}
+
+ return;
+error:
+ nd->state = ncsi_dev_state_functional;
}
static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
@@ -597,6 +645,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
struct net_device *dev = nd->dev;
struct ncsi_package *np = ndp->active_package;
struct ncsi_channel *nc = ndp->active_channel;
+ struct ncsi_channel *hot_nc = NULL;
struct ncsi_cmd_arg nca;
unsigned char index;
unsigned long flags;
@@ -702,12 +751,20 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
break;
case ncsi_dev_state_config_done:
spin_lock_irqsave(&nc->lock, flags);
- if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1)
+ if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
+ hot_nc = nc;
nc->state = NCSI_CHANNEL_ACTIVE;
- else
+ } else {
+ hot_nc = NULL;
nc->state = NCSI_CHANNEL_INACTIVE;
+ }
spin_unlock_irqrestore(&nc->lock, flags);
+ /* Update the hot channel */
+ spin_lock_irqsave(&ndp->lock, flags);
+ ndp->hot_channel = hot_nc;
+ spin_unlock_irqrestore(&ndp->lock, flags);
+
ncsi_start_channel_monitor(nc);
ncsi_process_next_channel(ndp);
break;
@@ -725,10 +782,14 @@ error:
static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_package *np;
- struct ncsi_channel *nc, *found;
+ struct ncsi_channel *nc, *found, *hot_nc;
struct ncsi_channel_mode *ncm;
unsigned long flags;
+ spin_lock_irqsave(&ndp->lock, flags);
+ hot_nc = ndp->hot_channel;
+ spin_unlock_irqrestore(&ndp->lock, flags);
+
/* The search is done once an inactive channel with up
* link is found.
*/
@@ -746,6 +807,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
if (!found)
found = nc;
+ if (nc == hot_nc)
+ found = nc;
+
ncm = &nc->modes[NCSI_MODE_LINK];
if (ncm->data[2] & 0x1) {
spin_unlock_irqrestore(&nc->lock, flags);
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index fcb5d1df11e9..004af030ef1a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -361,16 +361,9 @@ next_hook:
if (ret == 0)
ret = -EPERM;
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
- int err;
-
- RCU_INIT_POINTER(state->hook_entries, entry);
- err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
- if (err < 0) {
- if (err == -ESRCH &&
- (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
- goto next_hook;
- kfree_skb(skb);
- }
+ ret = nf_queue(skb, state, &entry, verdict);
+ if (ret == 1 && entry)
+ goto next_hook;
}
return ret;
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ba6a1d421222..df2f5a3901df 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -983,7 +983,7 @@ static void gc_worker(struct work_struct *work)
return;
ratio = scanned ? expired_count * 100 / scanned : 0;
- if (ratio >= 90)
+ if (ratio >= 90 || expired_count == GC_MAX_EVICTS)
next_run = 0;
gc_work->last_bucket = i;
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index e0adb5959342..9fdb655f85bc 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -18,7 +18,7 @@ unsigned int nf_iterate(struct sk_buff *skb, struct nf_hook_state *state,
/* nf_queue.c */
int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
- unsigned int queuenum);
+ struct nf_hook_entry **entryp, unsigned int verdict);
void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry);
int __init netfilter_queue_init(void);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 96964a0070e1..8f08d759844a 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -107,13 +107,8 @@ void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry)
rcu_read_unlock();
}
-/*
- * Any packet that leaves via this function must come back
- * through nf_reinject().
- */
-int nf_queue(struct sk_buff *skb,
- struct nf_hook_state *state,
- unsigned int queuenum)
+static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
+ unsigned int queuenum)
{
int status = -ENOENT;
struct nf_queue_entry *entry = NULL;
@@ -161,6 +156,27 @@ err:
return status;
}
+/* Packets leaving via this function must come back through nf_reinject(). */
+int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
+ struct nf_hook_entry **entryp, unsigned int verdict)
+{
+ struct nf_hook_entry *entry = *entryp;
+ int ret;
+
+ RCU_INIT_POINTER(state->hook_entries, entry);
+ ret = __nf_queue(skb, state, verdict >> NF_VERDICT_QBITS);
+ if (ret < 0) {
+ if (ret == -ESRCH &&
+ (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) {
+ *entryp = rcu_dereference(entry->next);
+ return 1;
+ }
+ kfree_skb(skb);
+ }
+
+ return 0;
+}
+
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
struct nf_hook_entry *hook_entry;
@@ -187,26 +203,26 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
entry->state.thresh = INT_MIN;
if (verdict == NF_ACCEPT) {
- next_hook:
- verdict = nf_iterate(skb, &entry->state, &hook_entry);
+ hook_entry = rcu_dereference(hook_entry->next);
+ if (hook_entry)
+next_hook:
+ verdict = nf_iterate(skb, &entry->state, &hook_entry);
}
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_STOP:
+okfn:
local_bh_disable();
entry->state.okfn(entry->state.net, entry->state.sk, skb);
local_bh_enable();
break;
case NF_QUEUE:
- RCU_INIT_POINTER(entry->state.hook_entries, hook_entry);
- err = nf_queue(skb, &entry->state,
- verdict >> NF_VERDICT_QBITS);
- if (err < 0) {
- if (err == -ESRCH &&
- (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+ err = nf_queue(skb, &entry->state, &hook_entry, verdict);
+ if (err == 1) {
+ if (hook_entry)
goto next_hook;
- kfree_skb(skb);
+ goto okfn;
}
break;
case NF_STOLEN:
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index b70d3ea1430e..24db22257586 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4423,7 +4423,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
*/
unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
{
- int val;
+ u32 val;
val = ntohl(nla_get_be32(attr));
if (val > max)
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index e3b83c31da2e..517f08767a3c 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -158,7 +158,8 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
if (!(set->flags & NFT_SET_TIMEOUT))
return -EINVAL;
- timeout = be64_to_cpu(nla_get_be64(tb[NFTA_DYNSET_TIMEOUT]));
+ timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
+ tb[NFTA_DYNSET_TIMEOUT])));
}
priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
@@ -246,7 +247,8 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
goto nla_put_failure;
if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
goto nla_put_failure;
- if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT, cpu_to_be64(priv->timeout),
+ if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
+ cpu_to_be64(jiffies_to_msecs(priv->timeout)),
NFTA_DYNSET_PAD))
goto nla_put_failure;
if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index a84cf3d66056..47beb3abcc9d 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -59,7 +59,8 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_exthdr *priv = nft_expr_priv(expr);
- u32 offset, len, err;
+ u32 offset, len;
+ int err;
if (tb[NFTA_EXTHDR_DREG] == NULL ||
tb[NFTA_EXTHDR_TYPE] == NULL ||
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 09473b415b95..baf694de3935 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -44,6 +44,7 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = {
[NFTA_HASH_LEN] = { .type = NLA_U32 },
[NFTA_HASH_MODULUS] = { .type = NLA_U32 },
[NFTA_HASH_SEED] = { .type = NLA_U32 },
+ [NFTA_HASH_OFFSET] = { .type = NLA_U32 },
};
static int nft_hash_init(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index c6d5358482d1..fbc88009ca2e 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -28,22 +28,20 @@ static void nft_range_eval(const struct nft_expr *expr,
const struct nft_pktinfo *pkt)
{
const struct nft_range_expr *priv = nft_expr_priv(expr);
- bool mismatch;
int d1, d2;
d1 = memcmp(&regs->data[priv->sreg], &priv->data_from, priv->len);
d2 = memcmp(&regs->data[priv->sreg], &priv->data_to, priv->len);
switch (priv->op) {
case NFT_RANGE_EQ:
- mismatch = (d1 < 0 || d2 > 0);
+ if (d1 < 0 || d2 > 0)
+ regs->verdict.code = NFT_BREAK;
break;
case NFT_RANGE_NEQ:
- mismatch = (d1 >= 0 && d2 <= 0);
+ if (d1 >= 0 && d2 <= 0)
+ regs->verdict.code = NFT_BREAK;
break;
}
-
- if (mismatch)
- regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = {
@@ -59,6 +57,7 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
struct nft_range_expr *priv = nft_expr_priv(expr);
struct nft_data_desc desc_from, desc_to;
int err;
+ u32 op;
err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from),
&desc_from, tb[NFTA_RANGE_FROM_DATA]);
@@ -80,7 +79,20 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
if (err < 0)
goto err2;
- priv->op = ntohl(nla_get_be32(tb[NFTA_RANGE_OP]));
+ err = nft_parse_u32_check(tb[NFTA_RANGE_OP], U8_MAX, &op);
+ if (err < 0)
+ goto err2;
+
+ switch (op) {
+ case NFT_RANGE_EQ:
+ case NFT_RANGE_NEQ:
+ break;
+ default:
+ err = -EINVAL;
+ goto err2;
+ }
+
+ priv->op = op;
priv->len = desc_from.len;
return 0;
err2:
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index e0aa7c1d0224..fc4977456c30 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1513,7 +1513,7 @@ xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
if (!num_hooks)
return ERR_PTR(-EINVAL);
- ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
+ ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
if (ops == NULL)
return ERR_PTR(-ENOMEM);
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index 018eed7e1ff1..8668a5c18dc3 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -32,6 +32,7 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
li.u.ulog.copy_len = info->len;
li.u.ulog.group = info->group;
li.u.ulog.qthreshold = info->threshold;
+ li.u.ulog.flags = 0;
if (info->flags & XT_NFLOG_F_COPY_LEN)
li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 2fab0c65aa94..b89b688e9d01 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -431,7 +431,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
*/
#define MAX_CPJ_v1 (0xFFFFFFFF / (HZ*60*60*24))
-#define MAX_CPJ (0xFFFFFFFFFFFFFFFF / (HZ*60*60*24))
+#define MAX_CPJ (0xFFFFFFFFFFFFFFFFULL / (HZ*60*60*24))
/* Repeated shift and or gives us all 1s, final shift and add 1 gives
* us the power of 2 below the theoretical max, so GCC simply does a
@@ -473,7 +473,7 @@ static u64 user2credits(u64 user, int revision)
return div64_u64(user * HZ * CREDITS_PER_JIFFY_v1,
XT_HASHLIMIT_SCALE);
} else {
- if (user > 0xFFFFFFFFFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+ if (user > 0xFFFFFFFFFFFFFFFFULL / (HZ*CREDITS_PER_JIFFY))
return div64_u64(user, XT_HASHLIMIT_SCALE_v2)
* HZ * CREDITS_PER_JIFFY;
diff --git a/net/netfilter/xt_ipcomp.c b/net/netfilter/xt_ipcomp.c
index 89d53104c6b3..000e70377f85 100644
--- a/net/netfilter/xt_ipcomp.c
+++ b/net/netfilter/xt_ipcomp.c
@@ -26,6 +26,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fan Du <fan.du@windriver.com>");
MODULE_DESCRIPTION("Xtables: IPv4/6 IPsec-IPComp SPI match");
+MODULE_ALIAS("ipt_ipcomp");
+MODULE_ALIAS("ip6t_ipcomp");
/* Returns 1 if the spi is matched by the range, 0 otherwise */
static inline bool
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 11db0d619c00..d2238b204691 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -250,7 +250,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
static int packet_direct_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
- netdev_features_t features;
+ struct sk_buff *orig_skb = skb;
struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
@@ -258,9 +258,8 @@ static int packet_direct_xmit(struct sk_buff *skb)
!netif_carrier_ok(dev)))
goto drop;
- features = netif_skb_features(skb);
- if (skb_needs_linearize(skb, features) &&
- __skb_linearize(skb))
+ skb = validate_xmit_skb_list(skb, dev);
+ if (skb != orig_skb)
goto drop;
txq = skb_get_tx_queue(dev, skb);
@@ -280,7 +279,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
return ret;
drop:
atomic_long_inc(&dev->tx_dropped);
- kfree_skb(skb);
+ kfree_skb_list(skb);
return NET_XMIT_DROP;
}
diff --git a/net/rds/Makefile b/net/rds/Makefile
index 0e72bec1529f..56c7d27eefee 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -13,5 +13,5 @@ obj-$(CONFIG_RDS_TCP) += rds_tcp.o
rds_tcp-y := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
tcp_send.o tcp_stats.o
-ccflags-$(CONFIG_RDS_DEBUG) := -DDEBUG
+ccflags-$(CONFIG_RDS_DEBUG) := -DRDS_DEBUG
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 25532a46602f..4121e1862444 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -33,7 +33,7 @@
#define KERNEL_HAS_ATOMIC64
#endif
-#ifdef DEBUG
+#ifdef RDS_DEBUG
#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
#else
/* sigh, pr_debug() causes unused variable warnings */
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 4353a29f3b57..1ed18d8c9c9f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -276,7 +276,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
goto error;
trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
- here, ERR_PTR(ret));
+ here, NULL);
spin_lock_bh(&call->conn->params.peer->lock);
hlist_add_head(&call->error_link,
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 941b724d523b..862eea6b266c 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -193,8 +193,8 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
fl6->fl6_dport = htons(7001);
fl6->fl6_sport = htons(7000);
dst = ip6_route_output(&init_net, NULL, fl6);
- if (IS_ERR(dst)) {
- _leave(" [route err %ld]", PTR_ERR(dst));
+ if (dst->error) {
+ _leave(" [route err %d]", dst->error);
return;
}
break;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index a512b18c0088..f893d180da1c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1028,8 +1028,7 @@ static struct nlattr *find_dump_kind(const struct nlmsghdr *n)
if (tb[1] == NULL)
return NULL;
- if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]),
- nla_len(tb[1]), NULL) < 0)
+ if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL) < 0)
return NULL;
kind = tb2[TCA_ACT_KIND];
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 2d93be6717e5..6073a1132725 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -249,8 +249,11 @@ out:
static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
u64 lastuse)
{
- tcf_lastuse_update(&a->tcfa_tm);
+ struct tcf_mirred *m = to_mirred(a);
+ struct tcf_t *tm = &m->tcf_tm;
+
_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
+ tm->lastuse = lastuse;
}
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2ee29a3375f6..2b2a7974e4bb 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -345,7 +345,8 @@ replay:
if (err == 0) {
struct tcf_proto *next = rtnl_dereference(tp->next);
- tfilter_notify(net, skb, n, tp, fh,
+ tfilter_notify(net, skb, n, tp,
+ t->tcm_handle,
RTM_DELTFILTER, false);
if (tcf_destroy(tp, false))
RCU_INIT_POINTER(*back, next);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 4282b488985b..7b50e4307485 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -418,6 +418,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
__u8 has_data = 0;
int gso = 0;
int pktcount = 0;
+ int auth_len = 0;
struct dst_entry *dst;
unsigned char *auth = NULL; /* pointer to auth in skb data */
@@ -510,7 +511,12 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
list_for_each_entry(chunk, &packet->chunk_list, list) {
int padded = SCTP_PAD4(chunk->skb->len);
- if (pkt_size + padded > tp->pathmtu)
+ if (chunk == packet->auth)
+ auth_len = padded;
+ else if (auth_len + padded + packet->overhead >
+ tp->pathmtu)
+ goto nomem;
+ else if (pkt_size + padded > tp->pathmtu)
break;
pkt_size += padded;
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 026e3bca4a94..8ec20a64a3f8 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3422,6 +3422,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
+ /* Report violation if chunk len overflows */
+ ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
+ if (ch_end > skb_tail_pointer(skb))
+ return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+ commands);
+
/* Now that we know we at least have a chunk header,
* do things that are type appropriate.
*/
@@ -3453,12 +3459,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
}
}
- /* Report violation if chunk len overflows */
- ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
- if (ch_end > skb_tail_pointer(skb))
- return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
- commands);
-
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fb02c7033307..9fbb6feb8c27 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4687,7 +4687,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
int __user *optlen)
{
- if (len <= 0)
+ if (len == 0)
return -EINVAL;
if (len > sizeof(struct sctp_event_subscribe))
len = sizeof(struct sctp_event_subscribe);
@@ -6430,6 +6430,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
+ if (len < 0)
+ return -EINVAL;
+
lock_sock(sk);
switch (optname) {
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 6f145b592a53..017801f9dbaa 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -768,6 +768,9 @@ int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
int err;
+ if (!netif_is_bridge_port(dev))
+ return -EOPNOTSUPP;
+
err = switchdev_port_attr_get(dev, &attr);
if (err && err != -EOPNOTSUPP)
return err;
@@ -923,6 +926,9 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
struct nlattr *afspec;
int err = 0;
+ if (!netif_is_bridge_port(dev))
+ return -EOPNOTSUPP;
+
protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
IFLA_PROTINFO);
if (protinfo) {
@@ -956,6 +962,9 @@ int switchdev_port_bridge_dellink(struct net_device *dev,
{
struct nlattr *afspec;
+ if (!netif_is_bridge_port(dev))
+ return -EOPNOTSUPP;
+
afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
IFLA_AF_SPEC);
if (afspec)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 753f774cb46f..aa1babbea385 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -247,11 +247,17 @@ int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
*
* RCU is locked, no other locks set
*/
-void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked)
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr)
{
struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+ u16 acked = msg_bcast_ack(hdr);
struct sk_buff_head xmitq;
+ /* Ignore bc acks sent by peer before bcast synch point was received */
+ if (msg_bc_ack_invalid(hdr))
+ return;
+
__skb_queue_head_init(&xmitq);
tipc_bcast_lock(net);
@@ -279,11 +285,11 @@ int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
__skb_queue_head_init(&xmitq);
tipc_bcast_lock(net);
- if (msg_type(hdr) == STATE_MSG) {
+ if (msg_type(hdr) != STATE_MSG) {
+ tipc_link_bc_init_rcv(l, hdr);
+ } else if (!msg_bc_ack_invalid(hdr)) {
tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
- } else {
- tipc_link_bc_init_rcv(l, hdr);
}
tipc_bcast_unlock(net);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5ffe34472ccd..855d53c64ab3 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -55,7 +55,8 @@ void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
int tipc_bcast_get_mtu(struct net *net);
int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list);
int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
-void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked);
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
+ struct tipc_msg *hdr);
int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
struct tipc_msg *hdr);
int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b36e16cdc945..1055164c6232 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1312,6 +1312,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
msg_set_next_sent(hdr, l->snd_nxt);
msg_set_ack(hdr, l->rcv_nxt - 1);
msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
+ msg_set_bc_ack_invalid(hdr, !node_up);
msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
msg_set_link_tolerance(hdr, tolerance);
msg_set_linkprio(hdr, priority);
@@ -1574,6 +1575,7 @@ static void tipc_link_build_bc_init_msg(struct tipc_link *l,
__skb_queue_head_init(&list);
if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
return;
+ msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
tipc_link_xmit(l, &list, xmitq);
}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index c3832cdf2278..50a739860d37 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -714,6 +714,23 @@ static inline void msg_set_peer_stopping(struct tipc_msg *m, u32 s)
msg_set_bits(m, 5, 13, 0x1, s);
}
+static inline bool msg_bc_ack_invalid(struct tipc_msg *m)
+{
+ switch (msg_user(m)) {
+ case BCAST_PROTOCOL:
+ case NAME_DISTRIBUTOR:
+ case LINK_PROTOCOL:
+ return msg_bits(m, 5, 14, 0x1);
+ default:
+ return false;
+ }
+}
+
+static inline void msg_set_bc_ack_invalid(struct tipc_msg *m, bool invalid)
+{
+ msg_set_bits(m, 5, 14, 0x1, invalid);
+}
+
static inline char *msg_media_addr(struct tipc_msg *m)
{
return (char *)&m->hdr[TIPC_MEDIA_INFO_OFFSET];
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index a04fe9be1c60..c1cfd92de17a 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -156,6 +156,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
pr_warn("Bulk publication failure\n");
return;
}
+ msg_set_bc_ack_invalid(buf_msg(skb), true);
item = (struct distr_item *)msg_data(buf_msg(skb));
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 7ef14e2d2356..9d2f4c2b08ab 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1535,7 +1535,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
if (unlikely(usr == LINK_PROTOCOL))
tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
- tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
+ tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
/* Receive packet directly if conditions permit */
tipc_node_read_lock(n);
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 0082f4b01795..14b3f007826d 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -104,13 +104,16 @@ static int wiphy_suspend(struct device *dev)
rtnl_lock();
if (rdev->wiphy.registered) {
- if (!rdev->wiphy.wowlan_config)
+ if (!rdev->wiphy.wowlan_config) {
cfg80211_leave_all(rdev);
+ cfg80211_process_rdev_events(rdev);
+ }
if (rdev->ops->suspend)
ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
if (ret == 1) {
/* Driver refuse to configure wowlan */
cfg80211_leave_all(rdev);
+ cfg80211_process_rdev_events(rdev);
ret = rdev_suspend(rdev, NULL);
}
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 32060f81a8e7..88725f8eefad 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -421,8 +421,8 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
}
EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
-static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
- const u8 *addr, enum nl80211_iftype iftype)
+int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
+ const u8 *addr, enum nl80211_iftype iftype)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct {
@@ -526,13 +526,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
return 0;
}
-
-int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
- enum nl80211_iftype iftype)
-{
- return __ieee80211_data_to_8023(skb, NULL, addr, iftype);
-}
-EXPORT_SYMBOL(ieee80211_data_to_8023);
+EXPORT_SYMBOL(ieee80211_data_to_8023_exthdr);
int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype,
@@ -747,24 +741,18 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
const u8 *addr, enum nl80211_iftype iftype,
const unsigned int extra_headroom,
- bool has_80211_header)
+ const u8 *check_da, const u8 *check_sa)
{
unsigned int hlen = ALIGN(extra_headroom, 4);
struct sk_buff *frame = NULL;
u16 ethertype;
u8 *payload;
- int offset = 0, remaining, err;
+ int offset = 0, remaining;
struct ethhdr eth;
bool reuse_frag = skb->head_frag && !skb_has_frag_list(skb);
bool reuse_skb = false;
bool last = false;
- if (has_80211_header) {
- err = __ieee80211_data_to_8023(skb, &eth, addr, iftype);
- if (err)
- goto out;
- }
-
while (!last) {
unsigned int subframe_len;
int len;
@@ -781,8 +769,17 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
goto purge;
offset += sizeof(struct ethhdr);
- /* reuse skb for the last subframe */
last = remaining <= subframe_len + padding;
+
+ /* FIXME: should we really accept multicast DA? */
+ if ((check_da && !is_multicast_ether_addr(eth.h_dest) &&
+ !ether_addr_equal(check_da, eth.h_dest)) ||
+ (check_sa && !ether_addr_equal(check_sa, eth.h_source))) {
+ offset += len + padding;
+ continue;
+ }
+
+ /* reuse skb for the last subframe */
if (!skb_is_nonlinear(skb) && !reuse_frag && last) {
skb_pull(skb, offset);
frame = skb;
@@ -820,7 +817,6 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
purge:
__skb_queue_purge(list);
- out:
dev_kfree_skb(skb);
}
EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
diff --git a/samples/Kconfig b/samples/Kconfig
index 85c405fcccb0..a6d2a43bbf2e 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -99,4 +99,10 @@ config SAMPLE_SECCOMP
Build samples of seccomp filters using various methods of
BPF filter construction.
+config SAMPLE_BLACKFIN_GPTIMERS
+ tristate "Build blackfin gptimers sample code -- loadable modules only"
+ depends on BLACKFIN && BFIN_GPTIMERS && m
+ help
+ Build samples of blackfin gptimers sample module.
+
endif # SAMPLES
diff --git a/samples/Makefile b/samples/Makefile
index 1a20169d85ac..e17d66d77f09 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -2,4 +2,4 @@
obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ livepatch/ \
hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ \
- configfs/ connector/ v4l/ trace_printk/
+ configfs/ connector/ v4l/ trace_printk/ blackfin/
diff --git a/Documentation/auxdisplay/.gitignore b/samples/auxdisplay/.gitignore
index 7af222860a96..7af222860a96 100644
--- a/Documentation/auxdisplay/.gitignore
+++ b/samples/auxdisplay/.gitignore
diff --git a/samples/auxdisplay/Makefile b/samples/auxdisplay/Makefile
new file mode 100644
index 000000000000..05e471feb6e5
--- /dev/null
+++ b/samples/auxdisplay/Makefile
@@ -0,0 +1,9 @@
+CC := $(CROSS_COMPILE)gcc
+CFLAGS := -I../../usr/include
+
+PROGS := cfag12864b-example
+
+all: $(PROGS)
+
+clean:
+ rm -fr $(PROGS)
diff --git a/Documentation/auxdisplay/cfag12864b-example.c b/samples/auxdisplay/cfag12864b-example.c
index e7823ffb1ca0..e7823ffb1ca0 100644
--- a/Documentation/auxdisplay/cfag12864b-example.c
+++ b/samples/auxdisplay/cfag12864b-example.c
diff --git a/samples/blackfin/Makefile b/samples/blackfin/Makefile
new file mode 100644
index 000000000000..89b86cfd83a2
--- /dev/null
+++ b/samples/blackfin/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SAMPLE_BLACKFIN_GPTIMERS) += gptimers-example.o
diff --git a/Documentation/blackfin/gptimers-example.c b/samples/blackfin/gptimers-example.c
index 283eba993d9d..283eba993d9d 100644
--- a/Documentation/blackfin/gptimers-example.c
+++ b/samples/blackfin/gptimers-example.c
diff --git a/samples/bpf/parse_ldabs.c b/samples/bpf/parse_ldabs.c
index d17550198d06..6db6b21fdc6d 100644
--- a/samples/bpf/parse_ldabs.c
+++ b/samples/bpf/parse_ldabs.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
diff --git a/samples/bpf/parse_simple.c b/samples/bpf/parse_simple.c
index cf2511c33905..10af53d33cc2 100644
--- a/samples/bpf/parse_simple.c
+++ b/samples/bpf/parse_simple.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c
index edab34dce79b..95c16324760c 100644
--- a/samples/bpf/parse_varlen.c
+++ b/samples/bpf/parse_varlen.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
diff --git a/samples/bpf/tcbpf1_kern.c b/samples/bpf/tcbpf1_kern.c
index fa051b3d53ee..274c884c87fe 100644
--- a/samples/bpf/tcbpf1_kern.c
+++ b/samples/bpf/tcbpf1_kern.c
@@ -1,3 +1,4 @@
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/tcbpf2_kern.c b/samples/bpf/tcbpf2_kern.c
index 3303bb85593b..9c823a609e75 100644
--- a/samples/bpf/tcbpf2_kern.c
+++ b/samples/bpf/tcbpf2_kern.c
@@ -5,6 +5,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/test_cgrp2_tc_kern.c b/samples/bpf/test_cgrp2_tc_kern.c
index 10ff73404e3a..1547b36a7b7b 100644
--- a/samples/bpf/test_cgrp2_tc_kern.c
+++ b/samples/bpf/test_cgrp2_tc_kern.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/if_ether.h>
#include <uapi/linux/in6.h>
#include <uapi/linux/ipv6.h>
diff --git a/Documentation/misc-devices/mei/.gitignore b/samples/mei/.gitignore
index f356b81ca1ec..f356b81ca1ec 100644
--- a/Documentation/misc-devices/mei/.gitignore
+++ b/samples/mei/.gitignore
diff --git a/samples/mei/Makefile b/samples/mei/Makefile
new file mode 100644
index 000000000000..7aac216dc420
--- /dev/null
+++ b/samples/mei/Makefile
@@ -0,0 +1,9 @@
+CC := $(CROSS_COMPILE)gcc
+CFLAGS := -I../../usr/include
+
+PROGS := mei-amt-version
+
+all: $(PROGS)
+
+clean:
+ rm -fr $(PROGS)
diff --git a/Documentation/misc-devices/mei/TODO b/samples/mei/TODO
index 6b3625d3058c..6b3625d3058c 100644
--- a/Documentation/misc-devices/mei/TODO
+++ b/samples/mei/TODO
diff --git a/Documentation/misc-devices/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
index 57d0d871dcf7..57d0d871dcf7 100644
--- a/Documentation/misc-devices/mei/mei-amt-version.c
+++ b/samples/mei/mei-amt-version.c
diff --git a/Documentation/mic/mpssd/.gitignore b/samples/mic/mpssd/.gitignore
index 8b7c72f07c92..8b7c72f07c92 100644
--- a/Documentation/mic/mpssd/.gitignore
+++ b/samples/mic/mpssd/.gitignore
diff --git a/samples/mic/mpssd/Makefile b/samples/mic/mpssd/Makefile
new file mode 100644
index 000000000000..3e3ef91fed6b
--- /dev/null
+++ b/samples/mic/mpssd/Makefile
@@ -0,0 +1,27 @@
+ifndef CROSS_COMPILE
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+
+ifeq ($(ARCH),x86)
+
+PROGS := mpssd
+CC = $(CROSS_COMPILE)gcc
+CFLAGS := -I../../../usr/include -I../../../tools/include
+
+ifdef DEBUG
+CFLAGS += -DDEBUG=$(DEBUG)
+endif
+
+all: $(PROGS)
+mpssd: mpssd.c sysfs.c
+ $(CC) $(CFLAGS) mpssd.c sysfs.c -o mpssd -lpthread
+
+install:
+ install mpssd /usr/sbin/mpssd
+ install micctrl /usr/sbin/micctrl
+
+clean:
+ rm -fr $(PROGS)
+
+endif
+endif
diff --git a/Documentation/mic/mpssd/micctrl b/samples/mic/mpssd/micctrl
index 8f2629b41c5f..8f2629b41c5f 100755
--- a/Documentation/mic/mpssd/micctrl
+++ b/samples/mic/mpssd/micctrl
diff --git a/Documentation/mic/mpssd/mpss b/samples/mic/mpssd/mpss
index 5fcf9fa4b082..5fcf9fa4b082 100755
--- a/Documentation/mic/mpssd/mpss
+++ b/samples/mic/mpssd/mpss
diff --git a/Documentation/mic/mpssd/mpssd.c b/samples/mic/mpssd/mpssd.c
index 49db1def1721..49db1def1721 100644
--- a/Documentation/mic/mpssd/mpssd.c
+++ b/samples/mic/mpssd/mpssd.c
diff --git a/Documentation/mic/mpssd/mpssd.h b/samples/mic/mpssd/mpssd.h
index 8bd64944aacc..8bd64944aacc 100644
--- a/Documentation/mic/mpssd/mpssd.h
+++ b/samples/mic/mpssd/mpssd.h
diff --git a/Documentation/mic/mpssd/sysfs.c b/samples/mic/mpssd/sysfs.c
index 8dd326936083..8dd326936083 100644
--- a/Documentation/mic/mpssd/sysfs.c
+++ b/samples/mic/mpssd/sysfs.c
diff --git a/Documentation/timers/.gitignore b/samples/timers/.gitignore
index c5c45d7ec0df..c5c45d7ec0df 100644
--- a/Documentation/timers/.gitignore
+++ b/samples/timers/.gitignore
diff --git a/samples/timers/Makefile b/samples/timers/Makefile
new file mode 100644
index 000000000000..a5c3c4a35ca1
--- /dev/null
+++ b/samples/timers/Makefile
@@ -0,0 +1,15 @@
+ifndef CROSS_COMPILE
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+
+ifeq ($(ARCH),x86)
+CC := $(CROSS_COMPILE)gcc
+PROGS := hpet_example
+
+all: $(PROGS)
+
+clean:
+ rm -fr $(PROGS)
+
+endif
+endif
diff --git a/Documentation/timers/hpet_example.c b/samples/timers/hpet_example.c
index 3ab4993d85e0..3ab4993d85e0 100644
--- a/Documentation/timers/hpet_example.c
+++ b/samples/timers/hpet_example.c
diff --git a/Documentation/watchdog/src/.gitignore b/samples/watchdog/.gitignore
index ac90997dba93..ff0ebb540333 100644
--- a/Documentation/watchdog/src/.gitignore
+++ b/samples/watchdog/.gitignore
@@ -1,2 +1 @@
watchdog-simple
-watchdog-test
diff --git a/samples/watchdog/Makefile b/samples/watchdog/Makefile
new file mode 100644
index 000000000000..9b53d89b1ccf
--- /dev/null
+++ b/samples/watchdog/Makefile
@@ -0,0 +1,8 @@
+CC := $(CROSS_COMPILE)gcc
+PROGS := watchdog-simple
+
+all: $(PROGS)
+
+clean:
+ rm -fr $(PROGS)
+
diff --git a/Documentation/watchdog/src/watchdog-simple.c b/samples/watchdog/watchdog-simple.c
index ba45803a2216..ba45803a2216 100644
--- a/Documentation/watchdog/src/watchdog-simple.c
+++ b/samples/watchdog/watchdog-simple.c
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 11602e5efb3b..de46ab03f063 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -81,6 +81,7 @@ endif
ifneq ($(strip $(lib-y) $(lib-m) $(lib-)),)
lib-target := $(obj)/lib.a
+obj-y += $(obj)/lib-ksyms.o
endif
ifneq ($(strip $(obj-y) $(obj-m) $(obj-) $(subdir-m) $(lib-target)),)
@@ -358,12 +359,22 @@ $(sort $(subdir-obj-y)): $(subdir-ym) ;
# Rule to compile a set of .o files into one .o file
#
ifdef builtin-target
-quiet_cmd_link_o_target = LD $@
+
+ifdef CONFIG_THIN_ARCHIVES
+ cmd_make_builtin = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS)
+ cmd_make_empty_builtin = rm -f $@; $(AR) rcST$(KBUILD_ARFLAGS)
+ quiet_cmd_link_o_target = AR $@
+else
+ cmd_make_builtin = $(LD) $(ld_flags) -r -o
+ cmd_make_empty_builtin = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS)
+ quiet_cmd_link_o_target = LD $@
+endif
+
# If the list of objects to link is empty, just create an empty built-in.o
cmd_link_o_target = $(if $(strip $(obj-y)),\
- $(LD) $(ld_flags) -r -o $@ $(filter $(obj-y), $^) \
+ $(cmd_make_builtin) $@ $(filter $(obj-y), $^) \
$(cmd_secanalysis),\
- rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@)
+ $(cmd_make_empty_builtin) $@)
$(builtin-target): $(obj-y) FORCE
$(call if_changed,link_o_target)
@@ -389,12 +400,36 @@ $(modorder-target): $(subdir-ym) FORCE
#
ifdef lib-target
quiet_cmd_link_l_target = AR $@
-cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y)
+
+ifdef CONFIG_THIN_ARCHIVES
+ cmd_link_l_target = rm -f $@; $(AR) rcsT$(KBUILD_ARFLAGS) $@ $(lib-y)
+else
+ cmd_link_l_target = rm -f $@; $(AR) rcs$(KBUILD_ARFLAGS) $@ $(lib-y)
+endif
$(lib-target): $(lib-y) FORCE
$(call if_changed,link_l_target)
targets += $(lib-target)
+
+dummy-object = $(obj)/.lib_exports.o
+ksyms-lds = $(dot-target).lds
+ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
+ref_prefix = EXTERN(_
+else
+ref_prefix = EXTERN(
+endif
+
+quiet_cmd_export_list = EXPORTS $@
+cmd_export_list = $(OBJDUMP) -h $< | \
+ sed -ne '/___ksymtab/{s/.*+/$(ref_prefix)/;s/ .*/)/;p}' >$(ksyms-lds);\
+ rm -f $(dummy-object);\
+ $(AR) rcs$(KBUILD_ARFLAGS) $(dummy-object);\
+ $(LD) $(ld_flags) -r -o $@ -T $(ksyms-lds) $(dummy-object);\
+ rm $(dummy-object) $(ksyms-lds)
+
+$(obj)/lib-ksyms.o: $(lib-target) FORCE
+ $(call if_changed,export_list)
endif
#
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index 61f0e6db909b..060d2cb373db 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -6,6 +6,12 @@ ifdef CONFIG_GCC_PLUGINS
gcc-plugin-$(CONFIG_GCC_PLUGIN_CYC_COMPLEXITY) += cyc_complexity_plugin.so
+ gcc-plugin-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) += latent_entropy_plugin.so
+ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_LATENT_ENTROPY) += -DLATENT_ENTROPY_PLUGIN
+ ifdef CONFIG_PAX_LATENT_ENTROPY
+ DISABLE_LATENT_ENTROPY_PLUGIN += -fplugin-arg-latent_entropy_plugin-disable
+ endif
+
ifdef CONFIG_GCC_PLUGIN_SANCOV
ifeq ($(CFLAGS_KCOV),)
# It is needed because of the gcc-plugin.sh and gcc version checks.
@@ -21,7 +27,8 @@ ifdef CONFIG_GCC_PLUGINS
GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
- export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR SANCOV_PLUGIN
+ export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGIN GCC_PLUGIN_SUBDIR
+ export SANCOV_PLUGIN DISABLE_LATENT_ENTROPY_PLUGIN
ifneq ($(PLUGINCC),)
# SANCOV_PLUGIN can be only in CFLAGS_KCOV because avoid duplication.
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 1366a94b6c39..16923ba4b5b1 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -115,14 +115,18 @@ $(modules:.ko=.mod.o): %.mod.o: %.mod.c FORCE
targets += $(modules:.ko=.mod.o)
-# Step 6), final link of the modules
+ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
+
+# Step 6), final link of the modules with optional arch pass after final link
quiet_cmd_ld_ko_o = LD [M] $@
- cmd_ld_ko_o = $(LD) -r $(LDFLAGS) \
- $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \
- -o $@ $(filter-out FORCE,$^)
+ cmd_ld_ko_o = \
+ $(LD) -r $(LDFLAGS) \
+ $(KBUILD_LDFLAGS_MODULE) $(LDFLAGS_MODULE) \
+ -o $@ $(filter-out FORCE,$^) ; \
+ $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
$(modules): %.ko :%.o %.mod.o FORCE
- $(call if_changed,ld_ko_o)
+ +$(call if_changed,ld_ko_o)
targets += $(modules)
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index 746ec1ece614..fff818b92acb 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -82,8 +82,7 @@
* to date before even starting the recursive build, so it's too late
* at this point anyway.
*
- * The algorithm to grep for "CONFIG_..." is bit unusual, but should
- * be fast ;-) We don't even try to really parse the header files, but
+ * We don't even try to really parse the header files, but
* merely grep, i.e. if CONFIG_FOO is mentioned in a comment, it will
* be picked up as well. It's not a problem with respect to
* correctness, since that can only give too many dependencies, thus
@@ -115,11 +114,6 @@
#include <ctype.h>
#include <arpa/inet.h>
-#define INT_CONF ntohl(0x434f4e46)
-#define INT_ONFI ntohl(0x4f4e4649)
-#define INT_NFIG ntohl(0x4e464947)
-#define INT_FIG_ ntohl(0x4649475f)
-
int insert_extra_deps;
char *target;
char *depfile;
@@ -241,37 +235,22 @@ static void use_config(const char *m, int slen)
print_config(m, slen);
}
-static void parse_config_file(const char *map, size_t len)
+static void parse_config_file(const char *p)
{
- const int *end = (const int *) (map + len);
- /* start at +1, so that p can never be < map */
- const int *m = (const int *) map + 1;
- const char *p, *q;
-
- for (; m < end; m++) {
- if (*m == INT_CONF) { p = (char *) m ; goto conf; }
- if (*m == INT_ONFI) { p = (char *) m-1; goto conf; }
- if (*m == INT_NFIG) { p = (char *) m-2; goto conf; }
- if (*m == INT_FIG_) { p = (char *) m-3; goto conf; }
- continue;
- conf:
- if (p > map + len - 7)
- continue;
- if (memcmp(p, "CONFIG_", 7))
- continue;
+ const char *q, *r;
+
+ while ((p = strstr(p, "CONFIG_"))) {
p += 7;
- for (q = p; q < map + len; q++) {
- if (!(isalnum(*q) || *q == '_'))
- goto found;
- }
- continue;
-
- found:
- if (!memcmp(q - 7, "_MODULE", 7))
- q -= 7;
- if (q - p < 0)
- continue;
- use_config(p, q - p);
+ q = p;
+ while (*q && (isalnum(*q) || *q == '_'))
+ q++;
+ if (memcmp(q - 7, "_MODULE", 7) == 0)
+ r = q - 7;
+ else
+ r = q;
+ if (r > p)
+ use_config(p, r - p);
+ p = q;
}
}
@@ -291,7 +270,7 @@ static void do_config_file(const char *filename)
{
struct stat st;
int fd;
- void *map;
+ char *map;
fd = open(filename, O_RDONLY);
if (fd < 0) {
@@ -308,18 +287,23 @@ static void do_config_file(const char *filename)
close(fd);
return;
}
- map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
- if ((long) map == -1) {
- perror("fixdep: mmap");
+ map = malloc(st.st_size + 1);
+ if (!map) {
+ perror("fixdep: malloc");
close(fd);
return;
}
+ if (read(fd, map, st.st_size) != st.st_size) {
+ perror("fixdep: read");
+ close(fd);
+ return;
+ }
+ map[st.st_size] = '\0';
+ close(fd);
- parse_config_file(map, st.st_size);
-
- munmap(map, st.st_size);
+ parse_config_file(map);
- close(fd);
+ free(map);
}
/*
@@ -446,22 +430,8 @@ static void print_deps(void)
close(fd);
}
-static void traps(void)
-{
- static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
- int *p = (int *)test;
-
- if (*p != INT_CONF) {
- fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n",
- *p);
- exit(2);
- }
-}
-
int main(int argc, char *argv[])
{
- traps();
-
if (argc == 5 && !strcmp(argv[1], "-e")) {
insert_extra_deps = 1;
argv++;
diff --git a/scripts/coccicheck b/scripts/coccicheck
index c92c1528a54d..ec487b8e7051 100755
--- a/scripts/coccicheck
+++ b/scripts/coccicheck
@@ -1,7 +1,7 @@
#!/bin/bash
# Linux kernel coccicheck
#
-# Read Documentation/coccinelle.txt
+# Read Documentation/dev-tools/coccinelle.rst
#
# This script requires at least spatch
# version 1.0.0-rc11.
diff --git a/scripts/coccinelle/api/memdup_user.cocci b/scripts/coccinelle/api/memdup_user.cocci
index c606231b0e46..2a5aea8e8487 100644
--- a/scripts/coccinelle/api/memdup_user.cocci
+++ b/scripts/coccinelle/api/memdup_user.cocci
@@ -15,11 +15,11 @@ virtual org
virtual report
@depends on patch@
-expression from,to,size,flag;
+expression from,to,size;
identifier l1,l2;
@@
-- to = \(kmalloc\|kzalloc\)(size,flag);
+- to = \(kmalloc\|kzalloc\)(size,GFP_KERNEL);
+ to = memdup_user(from,size);
if (
- to==NULL
@@ -37,12 +37,12 @@ identifier l1,l2;
- }
@r depends on !patch@
-expression from,to,size,flag;
+expression from,to,size;
position p;
statement S1,S2;
@@
-* to = \(kmalloc@p\|kzalloc@p\)(size,flag);
+* to = \(kmalloc@p\|kzalloc@p\)(size,GFP_KERNEL);
if (to==NULL || ...) S1
if (copy_from_user(to, from, size) != 0)
S2
diff --git a/scripts/coccinelle/api/pm_runtime.cocci b/scripts/coccinelle/api/pm_runtime.cocci
index 89b98a2f7a6f..d67ccf5f8227 100644
--- a/scripts/coccinelle/api/pm_runtime.cocci
+++ b/scripts/coccinelle/api/pm_runtime.cocci
@@ -17,9 +17,10 @@ virtual report
@runtime_bad_err_handle exists@
expression ret;
+position p;
@@
(
-ret = \(pm_runtime_idle\|
+ret@p = \(pm_runtime_idle\|
pm_runtime_suspend\|
pm_runtime_autosuspend\|
pm_runtime_resume\|
@@ -47,12 +48,13 @@ IS_ERR_VALUE(ret)
// For context mode
//----------------------------------------------------------
-@depends on runtime_bad_err_handle && context@
+@depends on context@
identifier pm_runtime_api;
expression ret;
+position runtime_bad_err_handle.p;
@@
(
-ret = pm_runtime_api(...);
+ret@p = pm_runtime_api(...);
...
* IS_ERR_VALUE(ret)
...
@@ -62,12 +64,13 @@ ret = pm_runtime_api(...);
// For patch mode
//----------------------------------------------------------
-@depends on runtime_bad_err_handle && patch@
+@depends on patch@
identifier pm_runtime_api;
expression ret;
+position runtime_bad_err_handle.p;
@@
(
-ret = pm_runtime_api(...);
+ret@p = pm_runtime_api(...);
...
- IS_ERR_VALUE(ret)
+ ret < 0
@@ -78,13 +81,14 @@ ret = pm_runtime_api(...);
// For org and report mode
//----------------------------------------------------------
-@r depends on runtime_bad_err_handle && (org || report) exists@
+@r depends on (org || report) exists@
position p1, p2;
identifier pm_runtime_api;
expression ret;
+position runtime_bad_err_handle.p;
@@
(
-ret = pm_runtime_api@p1(...);
+ret@p = pm_runtime_api@p1(...);
...
IS_ERR_VALUE@p2(ret)
...
diff --git a/scripts/coccinelle/misc/cond_no_effect.cocci b/scripts/coccinelle/misc/cond_no_effect.cocci
new file mode 100644
index 000000000000..8467dbd1c465
--- /dev/null
+++ b/scripts/coccinelle/misc/cond_no_effect.cocci
@@ -0,0 +1,64 @@
+///Find conditions where if and else branch are functionally
+// identical.
+//
+// There can be false positives in cases where the positional
+// information is used (as with lockdep) or where the identity
+// is a placeholder for not yet handled cases.
+// Unfortunately there also seems to be a tendency to use
+// the last if else/else as a "default behavior" - which some
+// might consider a legitimate coding pattern. From discussion
+// on kernelnewbies though it seems that this is not really an
+// accepted pattern and if at all it would need to be commented
+//
+// In the Linux kernel it does not seem to actually report
+// false positives except for those that were documented as
+// being intentional.
+// the two known cases are:
+// arch/sh/kernel/traps_64.c:read_opcode()
+// } else if ((pc & 1) == 0) {
+// /* SHcompact */
+// /* TODO : provide handling for this. We don't really support
+// user-mode SHcompact yet, and for a kernel fault, this would
+// have to come from a module built for SHcompact. */
+// return -EFAULT;
+// } else {
+// /* misaligned */
+// return -EFAULT;
+// }
+// fs/kernfs/file.c:kernfs_fop_open()
+// * Both paths of the branch look the same. They're supposed to
+// * look that way and give @of->mutex different static lockdep keys.
+// */
+// if (has_mmap)
+// mutex_init(&of->mutex);
+// else
+// mutex_init(&of->mutex);
+//
+// All other cases look like bugs or at least lack of documentation
+//
+// Confidence: Moderate
+// Copyright: (C) 2016 Nicholas Mc Guire, OSADL. GPLv2.
+// Comments:
+// Options: --no-includes --include-headers
+
+virtual org
+virtual report
+
+@cond@
+statement S1;
+position p;
+@@
+
+* if@p (...) S1 else S1
+
+@script:python depends on org@
+p << cond.p;
+@@
+
+cocci.print_main("WARNING: possible condition with no effect (if == else)",p)
+
+@script:python depends on report@
+p << cond.p;
+@@
+
+coccilib.report.print_report(p[0],"WARNING: possible condition with no effect (if == else)")
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
new file mode 100644
index 000000000000..ff1939b804ae
--- /dev/null
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -0,0 +1,640 @@
+/*
+ * Copyright 2012-2016 by the PaX Team <pageexec@freemail.hu>
+ * Copyright 2016 by Emese Revfy <re.emese@gmail.com>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ * but for the kernel it doesn't matter since it doesn't link against
+ * any of the gcc libraries
+ *
+ * This gcc plugin helps generate a little bit of entropy from program state,
+ * used throughout the uptime of the kernel. Here is an instrumentation example:
+ *
+ * before:
+ * void __latent_entropy test(int argc, char *argv[])
+ * {
+ * if (argc <= 1)
+ * printf("%s: no command arguments :(\n", *argv);
+ * else
+ * printf("%s: %d command arguments!\n", *argv, args - 1);
+ * }
+ *
+ * after:
+ * void __latent_entropy test(int argc, char *argv[])
+ * {
+ * // latent_entropy_execute() 1.
+ * unsigned long local_entropy;
+ * // init_local_entropy() 1.
+ * void *local_entropy_frameaddr;
+ * // init_local_entropy() 3.
+ * unsigned long tmp_latent_entropy;
+ *
+ * // init_local_entropy() 2.
+ * local_entropy_frameaddr = __builtin_frame_address(0);
+ * local_entropy = (unsigned long) local_entropy_frameaddr;
+ *
+ * // init_local_entropy() 4.
+ * tmp_latent_entropy = latent_entropy;
+ * // init_local_entropy() 5.
+ * local_entropy ^= tmp_latent_entropy;
+ *
+ * // latent_entropy_execute() 3.
+ * if (argc <= 1) {
+ * // perturb_local_entropy()
+ * local_entropy += 4623067384293424948;
+ * printf("%s: no command arguments :(\n", *argv);
+ * // perturb_local_entropy()
+ * } else {
+ * local_entropy ^= 3896280633962944730;
+ * printf("%s: %d command arguments!\n", *argv, args - 1);
+ * }
+ *
+ * // latent_entropy_execute() 4.
+ * tmp_latent_entropy = rol(tmp_latent_entropy, local_entropy);
+ * latent_entropy = tmp_latent_entropy;
+ * }
+ *
+ * TODO:
+ * - add ipa pass to identify not explicitly marked candidate functions
+ * - mix in more program state (function arguments/return values,
+ * loop variables, etc)
+ * - more instrumentation control via attribute parameters
+ *
+ * BUGS:
+ * - none known
+ *
+ * Options:
+ * -fplugin-arg-latent_entropy_plugin-disable
+ *
+ * Attribute: __attribute__((latent_entropy))
+ * The latent_entropy gcc attribute can be only on functions and variables.
+ * If it is on a function then the plugin will instrument it. If the attribute
+ * is on a variable then the plugin will initialize it with a random value.
+ * The variable must be an integer, an integer array type or a structure
+ * with integer fields.
+ */
+
+#include "gcc-common.h"
+
+int plugin_is_GPL_compatible;
+
+static GTY(()) tree latent_entropy_decl;
+
+static struct plugin_info latent_entropy_plugin_info = {
+ .version = "201606141920vanilla",
+ .help = "disable\tturn off latent entropy instrumentation\n",
+};
+
+static unsigned HOST_WIDE_INT seed;
+/*
+ * get_random_seed() (this is a GCC function) generates the seed.
+ * This is a simple random generator without any cryptographic security because
+ * the entropy doesn't come from here.
+ */
+static unsigned HOST_WIDE_INT get_random_const(void)
+{
+ unsigned int i;
+ unsigned HOST_WIDE_INT ret = 0;
+
+ for (i = 0; i < 8 * sizeof(ret); i++) {
+ ret = (ret << 1) | (seed & 1);
+ seed >>= 1;
+ if (ret & 1)
+ seed ^= 0xD800000000000000ULL;
+ }
+
+ return ret;
+}
+
+static tree tree_get_random_const(tree type)
+{
+ unsigned long long mask;
+
+ mask = 1ULL << (TREE_INT_CST_LOW(TYPE_SIZE(type)) - 1);
+ mask = 2 * (mask - 1) + 1;
+
+ if (TYPE_UNSIGNED(type))
+ return build_int_cstu(type, mask & get_random_const());
+ return build_int_cst(type, mask & get_random_const());
+}
+
+static tree handle_latent_entropy_attribute(tree *node, tree name,
+ tree args __unused,
+ int flags __unused,
+ bool *no_add_attrs)
+{
+ tree type;
+#if BUILDING_GCC_VERSION <= 4007
+ VEC(constructor_elt, gc) *vals;
+#else
+ vec<constructor_elt, va_gc> *vals;
+#endif
+
+ switch (TREE_CODE(*node)) {
+ default:
+ *no_add_attrs = true;
+ error("%qE attribute only applies to functions and variables",
+ name);
+ break;
+
+ case VAR_DECL:
+ if (DECL_INITIAL(*node)) {
+ *no_add_attrs = true;
+ error("variable %qD with %qE attribute must not be initialized",
+ *node, name);
+ break;
+ }
+
+ if (!TREE_STATIC(*node)) {
+ *no_add_attrs = true;
+ error("variable %qD with %qE attribute must not be local",
+ *node, name);
+ break;
+ }
+
+ type = TREE_TYPE(*node);
+ switch (TREE_CODE(type)) {
+ default:
+ *no_add_attrs = true;
+ error("variable %qD with %qE attribute must be an integer or a fixed length integer array type or a fixed sized structure with integer fields",
+ *node, name);
+ break;
+
+ case RECORD_TYPE: {
+ tree fld, lst = TYPE_FIELDS(type);
+ unsigned int nelt = 0;
+
+ for (fld = lst; fld; nelt++, fld = TREE_CHAIN(fld)) {
+ tree fieldtype;
+
+ fieldtype = TREE_TYPE(fld);
+ if (TREE_CODE(fieldtype) == INTEGER_TYPE)
+ continue;
+
+ *no_add_attrs = true;
+ error("structure variable %qD with %qE attribute has a non-integer field %qE",
+ *node, name, fld);
+ break;
+ }
+
+ if (fld)
+ break;
+
+#if BUILDING_GCC_VERSION <= 4007
+ vals = VEC_alloc(constructor_elt, gc, nelt);
+#else
+ vec_alloc(vals, nelt);
+#endif
+
+ for (fld = lst; fld; fld = TREE_CHAIN(fld)) {
+ tree random_const, fld_t = TREE_TYPE(fld);
+
+ random_const = tree_get_random_const(fld_t);
+ CONSTRUCTOR_APPEND_ELT(vals, fld, random_const);
+ }
+
+ /* Initialize the fields with random constants */
+ DECL_INITIAL(*node) = build_constructor(type, vals);
+ break;
+ }
+
+ /* Initialize the variable with a random constant */
+ case INTEGER_TYPE:
+ DECL_INITIAL(*node) = tree_get_random_const(type);
+ break;
+
+ case ARRAY_TYPE: {
+ tree elt_type, array_size, elt_size;
+ unsigned int i, nelt;
+ HOST_WIDE_INT array_size_int, elt_size_int;
+
+ elt_type = TREE_TYPE(type);
+ elt_size = TYPE_SIZE_UNIT(TREE_TYPE(type));
+ array_size = TYPE_SIZE_UNIT(type);
+
+ if (TREE_CODE(elt_type) != INTEGER_TYPE || !array_size
+ || TREE_CODE(array_size) != INTEGER_CST) {
+ *no_add_attrs = true;
+ error("array variable %qD with %qE attribute must be a fixed length integer array type",
+ *node, name);
+ break;
+ }
+
+ array_size_int = TREE_INT_CST_LOW(array_size);
+ elt_size_int = TREE_INT_CST_LOW(elt_size);
+ nelt = array_size_int / elt_size_int;
+
+#if BUILDING_GCC_VERSION <= 4007
+ vals = VEC_alloc(constructor_elt, gc, nelt);
+#else
+ vec_alloc(vals, nelt);
+#endif
+
+ for (i = 0; i < nelt; i++) {
+ tree cst = size_int(i);
+ tree rand_cst = tree_get_random_const(elt_type);
+
+ CONSTRUCTOR_APPEND_ELT(vals, cst, rand_cst);
+ }
+
+ /*
+ * Initialize the elements of the array with random
+ * constants
+ */
+ DECL_INITIAL(*node) = build_constructor(type, vals);
+ break;
+ }
+ }
+ break;
+
+ case FUNCTION_DECL:
+ break;
+ }
+
+ return NULL_TREE;
+}
+
+static struct attribute_spec latent_entropy_attr = {
+ .name = "latent_entropy",
+ .min_length = 0,
+ .max_length = 0,
+ .decl_required = true,
+ .type_required = false,
+ .function_type_required = false,
+ .handler = handle_latent_entropy_attribute,
+#if BUILDING_GCC_VERSION >= 4007
+ .affects_type_identity = false
+#endif
+};
+
+static void register_attributes(void *event_data __unused, void *data __unused)
+{
+ register_attribute(&latent_entropy_attr);
+}
+
+static bool latent_entropy_gate(void)
+{
+ tree list;
+
+ /* don't bother with noreturn functions for now */
+ if (TREE_THIS_VOLATILE(current_function_decl))
+ return false;
+
+ /* gcc-4.5 doesn't discover some trivial noreturn functions */
+ if (EDGE_COUNT(EXIT_BLOCK_PTR_FOR_FN(cfun)->preds) == 0)
+ return false;
+
+ list = DECL_ATTRIBUTES(current_function_decl);
+ return lookup_attribute("latent_entropy", list) != NULL_TREE;
+}
+
+static tree create_var(tree type, const char *name)
+{
+ tree var;
+
+ var = create_tmp_var(type, name);
+ add_referenced_var(var);
+ mark_sym_for_renaming(var);
+ return var;
+}
+
+/*
+ * Set up the next operation and its constant operand to use in the latent
+ * entropy PRNG. When RHS is specified, the request is for perturbing the
+ * local latent entropy variable, otherwise it is for perturbing the global
+ * latent entropy variable where the two operands are already given by the
+ * local and global latent entropy variables themselves.
+ *
+ * The operation is one of add/xor/rol when instrumenting the local entropy
+ * variable and one of add/xor when perturbing the global entropy variable.
+ * Rotation is not used for the latter case because it would transmit less
+ * entropy to the global variable than the other two operations.
+ */
+static enum tree_code get_op(tree *rhs)
+{
+ static enum tree_code op;
+ unsigned HOST_WIDE_INT random_const;
+
+ random_const = get_random_const();
+
+ switch (op) {
+ case BIT_XOR_EXPR:
+ op = PLUS_EXPR;
+ break;
+
+ case PLUS_EXPR:
+ if (rhs) {
+ op = LROTATE_EXPR;
+ /*
+ * This code limits the value of random_const to
+ * the size of a wide int for the rotation
+ */
+ random_const &= HOST_BITS_PER_WIDE_INT - 1;
+ break;
+ }
+
+ case LROTATE_EXPR:
+ default:
+ op = BIT_XOR_EXPR;
+ break;
+ }
+ if (rhs)
+ *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
+ return op;
+}
+
+static gimple create_assign(enum tree_code code, tree lhs, tree op1,
+ tree op2)
+{
+ return gimple_build_assign_with_ops(code, lhs, op1, op2);
+}
+
+static void perturb_local_entropy(basic_block bb, tree local_entropy)
+{
+ gimple_stmt_iterator gsi;
+ gimple assign;
+ tree rhs;
+ enum tree_code op;
+
+ op = get_op(&rhs);
+ assign = create_assign(op, local_entropy, local_entropy, rhs);
+ gsi = gsi_after_labels(bb);
+ gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+}
+
+static void __perturb_latent_entropy(gimple_stmt_iterator *gsi,
+ tree local_entropy)
+{
+ gimple assign;
+ tree temp;
+ enum tree_code op;
+
+ /* 1. create temporary copy of latent_entropy */
+ temp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy");
+
+ /* 2. read... */
+ add_referenced_var(latent_entropy_decl);
+ mark_sym_for_renaming(latent_entropy_decl);
+ assign = gimple_build_assign(temp, latent_entropy_decl);
+ gsi_insert_before(gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+
+ /* 3. ...modify... */
+ op = get_op(NULL);
+ assign = create_assign(op, temp, temp, local_entropy);
+ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+
+ /* 4. ...write latent_entropy */
+ assign = gimple_build_assign(latent_entropy_decl, temp);
+ gsi_insert_after(gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+}
+
+static bool handle_tail_calls(basic_block bb, tree local_entropy)
+{
+ gimple_stmt_iterator gsi;
+
+ for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+ gcall *call;
+ gimple stmt = gsi_stmt(gsi);
+
+ if (!is_gimple_call(stmt))
+ continue;
+
+ call = as_a_gcall(stmt);
+ if (!gimple_call_tail_p(call))
+ continue;
+
+ __perturb_latent_entropy(&gsi, local_entropy);
+ return true;
+ }
+
+ return false;
+}
+
+static void perturb_latent_entropy(tree local_entropy)
+{
+ edge_iterator ei;
+ edge e, last_bb_e;
+ basic_block last_bb;
+
+ gcc_assert(single_pred_p(EXIT_BLOCK_PTR_FOR_FN(cfun)));
+ last_bb_e = single_pred_edge(EXIT_BLOCK_PTR_FOR_FN(cfun));
+
+ FOR_EACH_EDGE(e, ei, last_bb_e->src->preds) {
+ if (ENTRY_BLOCK_PTR_FOR_FN(cfun) == e->src)
+ continue;
+ if (EXIT_BLOCK_PTR_FOR_FN(cfun) == e->src)
+ continue;
+
+ handle_tail_calls(e->src, local_entropy);
+ }
+
+ last_bb = single_pred(EXIT_BLOCK_PTR_FOR_FN(cfun));
+ if (!handle_tail_calls(last_bb, local_entropy)) {
+ gimple_stmt_iterator gsi = gsi_last_bb(last_bb);
+
+ __perturb_latent_entropy(&gsi, local_entropy);
+ }
+}
+
+static void init_local_entropy(basic_block bb, tree local_entropy)
+{
+ gimple assign, call;
+ tree frame_addr, rand_const, tmp, fndecl, udi_frame_addr;
+ enum tree_code op;
+ unsigned HOST_WIDE_INT rand_cst;
+ gimple_stmt_iterator gsi = gsi_after_labels(bb);
+
+ /* 1. create local_entropy_frameaddr */
+ frame_addr = create_var(ptr_type_node, "local_entropy_frameaddr");
+
+ /* 2. local_entropy_frameaddr = __builtin_frame_address() */
+ fndecl = builtin_decl_implicit(BUILT_IN_FRAME_ADDRESS);
+ call = gimple_build_call(fndecl, 1, integer_zero_node);
+ gimple_call_set_lhs(call, frame_addr);
+ gsi_insert_before(&gsi, call, GSI_NEW_STMT);
+ update_stmt(call);
+
+ udi_frame_addr = fold_convert(unsigned_intDI_type_node, frame_addr);
+ assign = gimple_build_assign(local_entropy, udi_frame_addr);
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+
+ /* 3. create temporary copy of latent_entropy */
+ tmp = create_var(unsigned_intDI_type_node, "tmp_latent_entropy");
+
+ /* 4. read the global entropy variable into local entropy */
+ add_referenced_var(latent_entropy_decl);
+ mark_sym_for_renaming(latent_entropy_decl);
+ assign = gimple_build_assign(tmp, latent_entropy_decl);
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+
+ /* 5. mix local_entropy_frameaddr into local entropy */
+ assign = create_assign(BIT_XOR_EXPR, local_entropy, local_entropy, tmp);
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+
+ rand_cst = get_random_const();
+ rand_const = build_int_cstu(unsigned_intDI_type_node, rand_cst);
+ op = get_op(NULL);
+ assign = create_assign(op, local_entropy, local_entropy, rand_const);
+ gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
+ update_stmt(assign);
+}
+
+static bool create_latent_entropy_decl(void)
+{
+ varpool_node_ptr node;
+
+ if (latent_entropy_decl != NULL_TREE)
+ return true;
+
+ FOR_EACH_VARIABLE(node) {
+ tree name, var = NODE_DECL(node);
+
+ if (DECL_NAME_LENGTH(var) < sizeof("latent_entropy") - 1)
+ continue;
+
+ name = DECL_NAME(var);
+ if (strcmp(IDENTIFIER_POINTER(name), "latent_entropy"))
+ continue;
+
+ latent_entropy_decl = var;
+ break;
+ }
+
+ return latent_entropy_decl != NULL_TREE;
+}
+
+static unsigned int latent_entropy_execute(void)
+{
+ basic_block bb;
+ tree local_entropy;
+
+ if (!create_latent_entropy_decl())
+ return 0;
+
+ /* prepare for step 2 below */
+ gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+ bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
+ if (!single_pred_p(bb)) {
+ split_edge(single_succ_edge(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+ gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+ bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
+ }
+
+ /* 1. create the local entropy variable */
+ local_entropy = create_var(unsigned_intDI_type_node, "local_entropy");
+
+ /* 2. initialize the local entropy variable */
+ init_local_entropy(bb, local_entropy);
+
+ bb = bb->next_bb;
+
+ /*
+ * 3. instrument each BB with an operation on the
+ * local entropy variable
+ */
+ while (bb != EXIT_BLOCK_PTR_FOR_FN(cfun)) {
+ perturb_local_entropy(bb, local_entropy);
+ bb = bb->next_bb;
+ };
+
+ /* 4. mix local entropy into the global entropy variable */
+ perturb_latent_entropy(local_entropy);
+ return 0;
+}
+
+static void latent_entropy_start_unit(void *gcc_data __unused,
+ void *user_data __unused)
+{
+ tree type, id;
+ int quals;
+
+ seed = get_random_seed(false);
+
+ if (in_lto_p)
+ return;
+
+ /* extern volatile u64 latent_entropy */
+ gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64);
+ quals = TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE;
+ type = build_qualified_type(long_long_unsigned_type_node, quals);
+ id = get_identifier("latent_entropy");
+ latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, id, type);
+
+ TREE_STATIC(latent_entropy_decl) = 1;
+ TREE_PUBLIC(latent_entropy_decl) = 1;
+ TREE_USED(latent_entropy_decl) = 1;
+ DECL_PRESERVE_P(latent_entropy_decl) = 1;
+ TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
+ DECL_EXTERNAL(latent_entropy_decl) = 1;
+ DECL_ARTIFICIAL(latent_entropy_decl) = 1;
+ lang_hooks.decls.pushdecl(latent_entropy_decl);
+}
+
+#define PASS_NAME latent_entropy
+#define PROPERTIES_REQUIRED PROP_gimple_leh | PROP_cfg
+#define TODO_FLAGS_FINISH TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func \
+ | TODO_update_ssa
+#include "gcc-generate-gimple-pass.h"
+
+int plugin_init(struct plugin_name_args *plugin_info,
+ struct plugin_gcc_version *version)
+{
+ bool enabled = true;
+ const char * const plugin_name = plugin_info->base_name;
+ const int argc = plugin_info->argc;
+ const struct plugin_argument * const argv = plugin_info->argv;
+ int i;
+
+ struct register_pass_info latent_entropy_pass_info;
+
+ latent_entropy_pass_info.pass = make_latent_entropy_pass();
+ latent_entropy_pass_info.reference_pass_name = "optimized";
+ latent_entropy_pass_info.ref_pass_instance_number = 1;
+ latent_entropy_pass_info.pos_op = PASS_POS_INSERT_BEFORE;
+ static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
+ {
+ .base = &latent_entropy_decl,
+ .nelt = 1,
+ .stride = sizeof(latent_entropy_decl),
+ .cb = &gt_ggc_mx_tree_node,
+ .pchw = &gt_pch_nx_tree_node
+ },
+ LAST_GGC_ROOT_TAB
+ };
+
+ if (!plugin_default_version_check(version, &gcc_version)) {
+ error(G_("incompatible gcc/plugin versions"));
+ return 1;
+ }
+
+ for (i = 0; i < argc; ++i) {
+ if (!(strcmp(argv[i].key, "disable"))) {
+ enabled = false;
+ continue;
+ }
+ error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
+ }
+
+ register_callback(plugin_name, PLUGIN_INFO, NULL,
+ &latent_entropy_plugin_info);
+ if (enabled) {
+ register_callback(plugin_name, PLUGIN_START_UNIT,
+ &latent_entropy_start_unit, NULL);
+ register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS,
+ NULL, (void *)&gt_ggc_r_gt_latent_entropy);
+ register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL,
+ &latent_entropy_pass_info);
+ }
+ register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes,
+ NULL);
+
+ return 0;
+}
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 17fa901418ae..0055b07b03b6 100755
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -97,7 +97,10 @@ print_mtime() {
}
list_parse() {
- [ ! -L "$1" ] && echo "$1 \\" || :
+ if [ -L "$1" ]; then
+ return
+ fi
+ echo "$1" | sed 's/:/\\:/g; s/$/ \\/'
}
# for each file print a line in following format
diff --git a/scripts/genksyms/lex.l b/scripts/genksyms/lex.l
index e583565f2011..5235aa507ba5 100644
--- a/scripts/genksyms/lex.l
+++ b/scripts/genksyms/lex.l
@@ -289,6 +289,23 @@ repeat:
}
break;
+ case ST_TYPEOF_1:
+ if (token == IDENT)
+ {
+ if (is_reserved_word(yytext, yyleng)
+ || find_symbol(yytext, SYM_TYPEDEF, 1))
+ {
+ yyless(0);
+ unput('(');
+ lexstate = ST_NORMAL;
+ token = TYPEOF_KEYW;
+ break;
+ }
+ _APP("(", 1);
+ }
+ lexstate = ST_TYPEOF;
+ /* FALLTHRU */
+
case ST_TYPEOF:
switch (token)
{
@@ -313,24 +330,6 @@ repeat:
}
break;
- case ST_TYPEOF_1:
- if (token == IDENT)
- {
- if (is_reserved_word(yytext, yyleng)
- || find_symbol(yytext, SYM_TYPEDEF, 1))
- {
- yyless(0);
- unput('(');
- lexstate = ST_NORMAL;
- token = TYPEOF_KEYW;
- break;
- }
- _APP("(", 1);
- }
- APP;
- lexstate = ST_TYPEOF;
- goto repeat;
-
case ST_BRACKET:
APP;
switch (token)
diff --git a/scripts/genksyms/lex.lex.c_shipped b/scripts/genksyms/lex.lex.c_shipped
index f82740a69b85..985c5541aae4 100644
--- a/scripts/genksyms/lex.lex.c_shipped
+++ b/scripts/genksyms/lex.lex.c_shipped
@@ -2098,6 +2098,23 @@ repeat:
}
break;
+ case ST_TYPEOF_1:
+ if (token == IDENT)
+ {
+ if (is_reserved_word(yytext, yyleng)
+ || find_symbol(yytext, SYM_TYPEDEF, 1))
+ {
+ yyless(0);
+ unput('(');
+ lexstate = ST_NORMAL;
+ token = TYPEOF_KEYW;
+ break;
+ }
+ _APP("(", 1);
+ }
+ lexstate = ST_TYPEOF;
+ /* FALLTHRU */
+
case ST_TYPEOF:
switch (token)
{
@@ -2122,24 +2139,6 @@ repeat:
}
break;
- case ST_TYPEOF_1:
- if (token == IDENT)
- {
- if (is_reserved_word(yytext, yyleng)
- || find_symbol(yytext, SYM_TYPEDEF, 1))
- {
- yyless(0);
- unput('(');
- lexstate = ST_NORMAL;
- token = TYPEOF_KEYW;
- break;
- }
- _APP("(", 1);
- }
- APP;
- lexstate = ST_TYPEOF;
- goto repeat;
-
case ST_BRACKET:
APP;
switch (token)
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 4f727eb5ec43..f742c65108b9 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -37,12 +37,40 @@ info()
fi
}
+# Thin archive build here makes a final archive with
+# symbol table and indexes from vmlinux objects, which can be
+# used as input to linker.
+#
+# Traditional incremental style of link does not require this step
+#
+# built-in.o output file
+#
+archive_builtin()
+{
+ if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
+ info AR built-in.o
+ rm -f built-in.o;
+ ${AR} rcsT${KBUILD_ARFLAGS} built-in.o \
+ ${KBUILD_VMLINUX_INIT} \
+ ${KBUILD_VMLINUX_MAIN}
+ fi
+}
+
# Link of vmlinux.o used for section mismatch analysis
# ${1} output file
modpost_link()
{
- ${LD} ${LDFLAGS} -r -o ${1} ${KBUILD_VMLINUX_INIT} \
- --start-group ${KBUILD_VMLINUX_MAIN} --end-group
+ local objects
+
+ if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
+ objects="--whole-archive built-in.o"
+ else
+ objects="${KBUILD_VMLINUX_INIT} \
+ --start-group \
+ ${KBUILD_VMLINUX_MAIN} \
+ --end-group"
+ fi
+ ${LD} ${LDFLAGS} -r -o ${1} ${objects}
}
# Link of vmlinux
@@ -51,18 +79,36 @@ modpost_link()
vmlinux_link()
{
local lds="${objtree}/${KBUILD_LDS}"
+ local objects
if [ "${SRCARCH}" != "um" ]; then
- ${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2} \
- -T ${lds} ${KBUILD_VMLINUX_INIT} \
- --start-group ${KBUILD_VMLINUX_MAIN} --end-group ${1}
+ if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
+ objects="--whole-archive built-in.o ${1}"
+ else
+ objects="${KBUILD_VMLINUX_INIT} \
+ --start-group \
+ ${KBUILD_VMLINUX_MAIN} \
+ --end-group \
+ ${1}"
+ fi
+
+ ${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2} \
+ -T ${lds} ${objects}
else
- ${CC} ${CFLAGS_vmlinux} -o ${2} \
- -Wl,-T,${lds} ${KBUILD_VMLINUX_INIT} \
- -Wl,--start-group \
- ${KBUILD_VMLINUX_MAIN} \
- -Wl,--end-group \
- -lutil -lrt -lpthread ${1}
+ if [ -n "${CONFIG_THIN_ARCHIVES}" ]; then
+ objects="-Wl,--whole-archive built-in.o ${1}"
+ else
+ objects="${KBUILD_VMLINUX_INIT} \
+ -Wl,--start-group \
+ ${KBUILD_VMLINUX_MAIN} \
+ -Wl,--end-group \
+ ${1}"
+ fi
+
+ ${CC} ${CFLAGS_vmlinux} -o ${2} \
+ -Wl,-T,${lds} \
+ ${objects} \
+ -lutil -lrt -lpthread
rm -f linux
fi
}
@@ -119,6 +165,7 @@ cleanup()
rm -f .tmp_kallsyms*
rm -f .tmp_version
rm -f .tmp_vmlinux*
+ rm -f built-in.o
rm -f System.map
rm -f vmlinux
rm -f vmlinux.o
@@ -162,6 +209,8 @@ case "${KCONFIG_CONFIG}" in
. "./${KCONFIG_CONFIG}"
esac
+archive_builtin
+
#link vmlinux.o
info LD vmlinux.o
modpost_link vmlinux.o
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index f826e8739023..d942c7c2bc0a 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -41,7 +41,7 @@ config BIG_KEYS
bool "Large payload keys"
depends on KEYS
depends on TMPFS
- select CRYPTO
+ depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
select CRYPTO_AES
select CRYPTO_ECB
select CRYPTO_RNG
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index c0b3030b5634..835c1ab30d01 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -9,6 +9,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "big_key: "fmt
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/file.h>
@@ -341,44 +342,48 @@ error:
*/
static int __init big_key_init(void)
{
- return register_key_type(&key_type_big_key);
-}
-
-/*
- * Initialize big_key crypto and RNG algorithms
- */
-static int __init big_key_crypto_init(void)
-{
- int ret = -EINVAL;
+ struct crypto_skcipher *cipher;
+ struct crypto_rng *rng;
+ int ret;
- /* init RNG */
- big_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
- if (IS_ERR(big_key_rng)) {
- big_key_rng = NULL;
- return -EFAULT;
+ rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
+ if (IS_ERR(rng)) {
+ pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
+ return PTR_ERR(rng);
}
+ big_key_rng = rng;
+
/* seed RNG */
- ret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng));
- if (ret)
- goto error;
+ ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
+ if (ret) {
+ pr_err("Can't reset rng: %d\n", ret);
+ goto error_rng;
+ }
/* init block cipher */
- big_key_skcipher = crypto_alloc_skcipher(big_key_alg_name,
- 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(big_key_skcipher)) {
- big_key_skcipher = NULL;
- ret = -EFAULT;
- goto error;
+ cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(cipher)) {
+ ret = PTR_ERR(cipher);
+ pr_err("Can't alloc crypto: %d\n", ret);
+ goto error_rng;
+ }
+
+ big_key_skcipher = cipher;
+
+ ret = register_key_type(&key_type_big_key);
+ if (ret < 0) {
+ pr_err("Can't register type: %d\n", ret);
+ goto error_cipher;
}
return 0;
-error:
+error_cipher:
+ crypto_free_skcipher(big_key_skcipher);
+error_rng:
crypto_free_rng(big_key_rng);
- big_key_rng = NULL;
return ret;
}
-device_initcall(big_key_init);
-late_initcall(big_key_crypto_init);
+late_initcall(big_key_init);
diff --git a/security/keys/proc.c b/security/keys/proc.c
index f0611a6368cd..b9f531c9e4fa 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -181,7 +181,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
struct timespec now;
unsigned long timo;
key_ref_t key_ref, skey_ref;
- char xbuf[12];
+ char xbuf[16];
int rc;
struct keyring_search_context ctx = {
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 085057936287..09fd6108e421 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3557,7 +3557,7 @@ static int selinux_file_mprotect(struct vm_area_struct *vma,
} else if (!vma->vm_file &&
((vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack) ||
- vma_is_stack_for_task(vma, current))) {
+ vma_is_stack_for_current(vma))) {
rc = current_has_perm(current, PROCESS__EXECSTACK);
} else if (vma->vm_file && vma->anon_vma) {
/*
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index ade7c6cad172..682b73af7766 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
* the execve().
*/
if (get_user_pages_remote(current, bprm->mm, pos, 1,
- 0, 1, &page, NULL) <= 0)
+ FOLL_FORCE, &page, NULL) <= 0)
return false;
#else
page = bprm->page[pos / PAGE_SIZE];
diff --git a/sound/core/seq/seq_compat.c b/sound/core/seq/seq_compat.c
index fce5697e4261..8c3507216676 100644
--- a/sound/core/seq/seq_compat.c
+++ b/sound/core/seq/seq_compat.c
@@ -58,7 +58,7 @@ static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned
goto error;
data->kernel = NULL;
- err = snd_seq_kernel_client_ctl(client->number, cmd, &data);
+ err = snd_seq_kernel_client_ctl(client->number, cmd, data);
if (err < 0)
goto error;
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index dcc102813aef..37d9cfbc29f9 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -448,8 +448,8 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
ktime_get_ts64(&tm);
tm = timespec64_sub(tm, tmr->last_update);
- cur_time.tv_nsec = tm.tv_nsec;
- cur_time.tv_sec = tm.tv_sec;
+ cur_time.tv_nsec += tm.tv_nsec;
+ cur_time.tv_sec += tm.tv_sec;
snd_seq_sanity_real_time(&cur_time);
}
spin_unlock_irqrestore(&tmr->lock, flags);
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index d17937b92331..7e3aa50b21f9 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -111,7 +111,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return -EINVAL;
hm = kmalloc(sizeof(*hm), GFP_KERNEL);
- hr = kmalloc(sizeof(*hr), GFP_KERNEL);
+ hr = kzalloc(sizeof(*hr), GFP_KERNEL);
if (!hm || !hr) {
err = -ENOMEM;
goto out;
diff --git a/sound/pci/hda/dell_wmi_helper.c b/sound/pci/hda/dell_wmi_helper.c
index 9c22f95838ef..19d41da79f93 100644
--- a/sound/pci/hda/dell_wmi_helper.c
+++ b/sound/pci/hda/dell_wmi_helper.c
@@ -49,7 +49,7 @@ static void alc_fixup_dell_wmi(struct hda_codec *codec,
removefunc = true;
if (dell_led_set_func(DELL_LED_MICMUTE, false) >= 0) {
dell_led_value = 0;
- if (spec->gen.num_adc_nids > 1)
+ if (spec->gen.num_adc_nids > 1 && !spec->gen.dyn_adc_switch)
codec_dbg(codec, "Skipping micmute LED control due to several ADCs");
else {
dell_old_cap_hook = spec->gen.cap_sync_hook;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c3469f756ec2..c64d986009a9 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -341,8 +341,7 @@ enum {
/* quirks for Nvidia */
#define AZX_DCAPS_PRESET_NVIDIA \
- (AZX_DCAPS_NO_MSI | /*AZX_DCAPS_ALIGN_BUFSIZE |*/ \
- AZX_DCAPS_NO_64BIT | AZX_DCAPS_CORBRP_SELF_CLEAR |\
+ (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
AZX_DCAPS_SNOOP_TYPE(NVIDIA))
#define AZX_DCAPS_PRESET_CTHDA \
@@ -1716,6 +1715,10 @@ static int azx_first_init(struct azx *chip)
}
}
+ /* NVidia hardware normally only supports up to 40 bits of DMA */
+ if (chip->pci->vendor == PCI_VENDOR_ID_NVIDIA)
+ dma_bits = 40;
+
/* disable 64bit DMA address on some devices */
if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
dev_dbg(card->dev, "Disabling 64bit DMA\n");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b58e8c76346a..2f909dd8b7b8 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5811,8 +5811,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
#define ALC295_STANDARD_PINS \
{0x12, 0xb7a60130}, \
{0x14, 0x90170110}, \
- {0x17, 0x21014020}, \
- {0x18, 0x21a19030}, \
{0x21, 0x04211020}
#define ALC298_STANDARD_PINS \
@@ -5859,11 +5857,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x1b, 0x02011020},
{0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170110},
+ {0x1b, 0x01011020},
+ {0x21, 0x0221101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x14, 0x90170130},
{0x1b, 0x01014020},
{0x21, 0x0221103f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x14, 0x90170130},
+ {0x1b, 0x01011020},
+ {0x21, 0x0221103f}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170130},
{0x1b, 0x02011020},
{0x21, 0x0221103f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -6039,7 +6045,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
ALC292_STANDARD_PINS,
{0x13, 0x90a60140}),
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
- ALC295_STANDARD_PINS),
+ ALC295_STANDARD_PINS,
+ {0x17, 0x21014020},
+ {0x18, 0x21a19030}),
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC295_STANDARD_PINS,
+ {0x17, 0x21014040},
+ {0x18, 0x21a19050}),
SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC298_STANDARD_PINS,
{0x17, 0x90170110}),
@@ -6613,6 +6625,7 @@ enum {
ALC891_FIXUP_HEADSET_MODE,
ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
ALC662_FIXUP_ACER_VERITON,
+ ALC892_FIXUP_ASROCK_MOBO,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -6889,6 +6902,16 @@ static const struct hda_fixup alc662_fixups[] = {
{ }
}
},
+ [ALC892_FIXUP_ASROCK_MOBO] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x15, 0x40f000f0 }, /* disabled */
+ { 0x16, 0x40f000f0 }, /* disabled */
+ { 0x18, 0x01014011 }, /* LO */
+ { 0x1a, 0x01014012 }, /* LO */
+ { }
+ }
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6926,6 +6949,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index f0955fd7a2e7..6a23302297c9 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -62,7 +62,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
removefunc = false;
}
if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
- if (spec->num_adc_nids > 1)
+ if (spec->num_adc_nids > 1 && !spec->dyn_adc_switch)
codec_dbg(codec,
"Skipping micmute LED control due to several ADCs");
else {
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
index 14e587e70655..90009c0b3a92 100644
--- a/sound/usb/line6/driver.c
+++ b/sound/usb/line6/driver.c
@@ -604,8 +604,8 @@ line6_hwdep_write(struct snd_hwdep *hwdep, const char __user *data, long count,
}
data_copy = memdup_user(data, count);
- if (IS_ERR(ERR_PTR))
- return -ENOMEM;
+ if (IS_ERR(data_copy))
+ return PTR_ERR(data_copy);
rv = line6_send_raw_message(line6, data_copy, count);
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 9352a44ae6e4..49cd4a65e390 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -317,7 +317,8 @@ static int podhd_init(struct usb_line6 *line6,
if (pod->line6.properties->capabilities & LINE6_CAP_PCM) {
/* initialize PCM subsystem: */
err = line6_init_pcm(line6,
- (id->driver_info == LINE6_PODX3) ? &podx3_pcm_properties :
+ (id->driver_info == LINE6_PODX3 ||
+ id->driver_info == LINE6_PODX3LIVE) ? &podx3_pcm_properties :
&podhd_pcm_properties);
if (err < 0)
return err;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index c60a776e815d..8a59d4782a0f 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2907,6 +2907,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+/* Syntek STK1160 */
+{
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .idVendor = 0x05e1,
+ .idProduct = 0x0408,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Syntek",
+ .product_name = "STK1160",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER
+ }
+},
+
/* Digidesign Mbox */
{
/* Thanks to Clemens Ladisch <clemens@ladisch.de> */
diff --git a/Documentation/accounting/.gitignore b/tools/accounting/.gitignore
index 86485203c4ae..86485203c4ae 100644
--- a/Documentation/accounting/.gitignore
+++ b/tools/accounting/.gitignore
diff --git a/tools/accounting/Makefile b/tools/accounting/Makefile
new file mode 100644
index 000000000000..647c94a219bf
--- /dev/null
+++ b/tools/accounting/Makefile
@@ -0,0 +1,9 @@
+CC := $(CROSS_COMPILE)gcc
+CFLAGS := -I../../usr/include
+
+PROGS := getdelays
+
+all: $(PROGS)
+
+clean:
+ rm -fr $(PROGS)
diff --git a/Documentation/accounting/getdelays.c b/tools/accounting/getdelays.c
index b5ca536e56a8..b5ca536e56a8 100644
--- a/Documentation/accounting/getdelays.c
+++ b/tools/accounting/getdelays.c
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 1188bc849ee3..a39629206864 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -194,6 +194,8 @@
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/Documentation/laptops/.gitignore b/tools/laptop/dslm/.gitignore
index 9fc984e64386..9fc984e64386 100644
--- a/Documentation/laptops/.gitignore
+++ b/tools/laptop/dslm/.gitignore
diff --git a/tools/laptop/dslm/Makefile b/tools/laptop/dslm/Makefile
new file mode 100644
index 000000000000..ff613b31730b
--- /dev/null
+++ b/tools/laptop/dslm/Makefile
@@ -0,0 +1,9 @@
+CC := $(CROSS_COMPILE)gcc
+CFLAGS := -I../../usr/include
+
+PROGS := dslm
+
+all: $(PROGS)
+
+clean:
+ rm -fr $(PROGS)
diff --git a/Documentation/laptops/dslm.c b/tools/laptop/dslm/dslm.c
index d5dd2d4b04d8..d5dd2d4b04d8 100644
--- a/Documentation/laptops/dslm.c
+++ b/tools/laptop/dslm/dslm.c
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index c0c0b265e88e..b63a31be1218 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -98,6 +98,15 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
*type = INSN_FP_SETUP;
break;
+ case 0x8d:
+ if (insn.rex_prefix.bytes &&
+ insn.rex_prefix.bytes[0] == 0x48 &&
+ insn.modrm.nbytes && insn.modrm.bytes[0] == 0x2c &&
+ insn.sib.nbytes && insn.sib.bytes[0] == 0x24)
+ /* lea %(rsp), %rbp */
+ *type = INSN_FP_SETUP;
+ break;
+
case 0x90:
*type = INSN_NOP;
break;
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 143b6cdd7f06..e8a1f699058a 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -97,6 +97,19 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file,
return next;
}
+static bool gcov_enabled(struct objtool_file *file)
+{
+ struct section *sec;
+ struct symbol *sym;
+
+ list_for_each_entry(sec, &file->elf->sections, list)
+ list_for_each_entry(sym, &sec->symbol_list, list)
+ if (!strncmp(sym->name, "__gcov_.", 8))
+ return true;
+
+ return false;
+}
+
#define for_each_insn(file, insn) \
list_for_each_entry(insn, &file->insn_list, list)
@@ -713,6 +726,7 @@ static struct rela *find_switch_table(struct objtool_file *file,
struct instruction *insn)
{
struct rela *text_rela, *rodata_rela;
+ struct instruction *orig_insn = insn;
text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
if (text_rela && text_rela->sym == file->rodata->sym) {
@@ -733,10 +747,16 @@ static struct rela *find_switch_table(struct objtool_file *file,
/* case 3 */
func_for_each_insn_continue_reverse(file, func, insn) {
- if (insn->type == INSN_JUMP_UNCONDITIONAL ||
- insn->type == INSN_JUMP_DYNAMIC)
+ if (insn->type == INSN_JUMP_DYNAMIC)
break;
+ /* allow small jumps within the range */
+ if (insn->type == INSN_JUMP_UNCONDITIONAL &&
+ insn->jump_dest &&
+ (insn->jump_dest->offset <= insn->offset ||
+ insn->jump_dest->offset > orig_insn->offset))
+ break;
+
text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
insn->len);
if (text_rela && text_rela->sym == file->rodata->sym)
@@ -1034,34 +1054,6 @@ static int validate_branch(struct objtool_file *file,
return 0;
}
-static bool is_gcov_insn(struct instruction *insn)
-{
- struct rela *rela;
- struct section *sec;
- struct symbol *sym;
- unsigned long offset;
-
- rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
- if (!rela)
- return false;
-
- if (rela->sym->type != STT_SECTION)
- return false;
-
- sec = rela->sym->sec;
- offset = rela->addend + insn->offset + insn->len - rela->offset;
-
- list_for_each_entry(sym, &sec->symbol_list, list) {
- if (sym->type != STT_OBJECT)
- continue;
-
- if (offset >= sym->offset && offset < sym->offset + sym->len)
- return (!memcmp(sym->name, "__gcov0.", 8));
- }
-
- return false;
-}
-
static bool is_kasan_insn(struct instruction *insn)
{
return (insn->type == INSN_CALL &&
@@ -1083,9 +1075,6 @@ static bool ignore_unreachable_insn(struct symbol *func,
if (insn->type == INSN_NOP)
return true;
- if (is_gcov_insn(insn))
- return true;
-
/*
* Check if this (or a subsequent) instruction is related to
* CONFIG_UBSAN or CONFIG_KASAN.
@@ -1146,6 +1135,19 @@ static int validate_functions(struct objtool_file *file)
ignore_unreachable_insn(func, insn))
continue;
+ /*
+ * gcov produces a lot of unreachable
+ * instructions. If we get an unreachable
+ * warning and the file has gcov enabled, just
+ * ignore it, and all other such warnings for
+ * the file.
+ */
+ if (!file->ignore_unreachables &&
+ gcov_enabled(file)) {
+ file->ignore_unreachables = true;
+ continue;
+ }
+
WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
warnings++;
}
diff --git a/Documentation/pcmcia/.gitignore b/tools/pcmcia/.gitignore
index 53d081336757..53d081336757 100644
--- a/Documentation/pcmcia/.gitignore
+++ b/tools/pcmcia/.gitignore
diff --git a/tools/pcmcia/Makefile b/tools/pcmcia/Makefile
new file mode 100644
index 000000000000..81a7498c5cd9
--- /dev/null
+++ b/tools/pcmcia/Makefile
@@ -0,0 +1,9 @@
+CC := $(CROSS_COMPILE)gcc
+CFLAGS := -I../../usr/include
+
+PROGS := crc32hash
+
+all: $(PROGS)
+
+clean:
+ rm -fr $(PROGS)
diff --git a/Documentation/pcmcia/crc32hash.c b/tools/pcmcia/crc32hash.c
index 44f8beea7260..44f8beea7260 100644
--- a/Documentation/pcmcia/crc32hash.c
+++ b/tools/pcmcia/crc32hash.c
diff --git a/tools/perf/jvmti/Makefile b/tools/perf/jvmti/Makefile
index 5ce61a1bda9c..df14e6b67b63 100644
--- a/tools/perf/jvmti/Makefile
+++ b/tools/perf/jvmti/Makefile
@@ -36,7 +36,7 @@ SOLIBEXT=so
# The following works at least on fedora 23, you may need the next
# line for other distros.
ifneq (,$(wildcard /usr/sbin/update-java-alternatives))
-JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | cut -d ' ' -f 3)
+JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
else
ifneq (,$(wildcard /usr/sbin/alternatives))
JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index fb8e42c7507a..4ffff7be9299 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -601,7 +601,8 @@ int hist_browser__run(struct hist_browser *browser, const char *help)
u64 nr_entries;
hbt->timer(hbt->arg);
- if (hist_browser__has_filter(browser))
+ if (hist_browser__has_filter(browser) ||
+ symbol_conf.report_hierarchy)
hist_browser__update_nr_entries(browser);
nr_entries = hist_browser__nr_entries(browser);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 85dd0db0a127..2f3eded54b0c 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1895,7 +1895,6 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
if (ph->needs_swap)
nr = bswap_32(nr);
- ph->env.nr_numa_nodes = nr;
nodes = zalloc(sizeof(*nodes) * nr);
if (!nodes)
return -ENOMEM;
@@ -1932,6 +1931,7 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
free(str);
}
+ ph->env.nr_numa_nodes = nr;
ph->env.numa_nodes = nodes;
return 0;
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 9f43fda2570f..660fca05bc93 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -136,8 +136,8 @@ do { \
group [^,{}/]*[{][^}]*[}][^,{}/]*
event_pmu [^,{}/]+[/][^/]*[/][^,{}/]*
event [^,{}/]+
-bpf_object .*\.(o|bpf)
-bpf_source .*\.c
+bpf_object [^,{}]+\.(o|bpf)
+bpf_source [^,{}]+\.c
num_dec [0-9]+
num_hex 0x[a-fA-F0-9]+
diff --git a/Documentation/filesystems/.gitignore b/tools/testing/selftests/filesystems/.gitignore
index 31d6e426b6d4..31d6e426b6d4 100644
--- a/Documentation/filesystems/.gitignore
+++ b/tools/testing/selftests/filesystems/.gitignore
diff --git a/tools/testing/selftests/filesystems/Makefile b/tools/testing/selftests/filesystems/Makefile
new file mode 100644
index 000000000000..0ab11307b414
--- /dev/null
+++ b/tools/testing/selftests/filesystems/Makefile
@@ -0,0 +1,7 @@
+TEST_PROGS := dnotify_test
+all: $(TEST_PROGS)
+
+include ../lib.mk
+
+clean:
+ rm -fr $(TEST_PROGS)
diff --git a/Documentation/filesystems/dnotify_test.c b/tools/testing/selftests/filesystems/dnotify_test.c
index 8b37b4a1e18d..8b37b4a1e18d 100644
--- a/Documentation/filesystems/dnotify_test.c
+++ b/tools/testing/selftests/filesystems/dnotify_test.c
diff --git a/tools/testing/selftests/futex/functional/run.sh b/tools/testing/selftests/futex/functional/run.sh
index e87dbe2a0b0d..7ff002eed624 100755
--- a/tools/testing/selftests/futex/functional/run.sh
+++ b/tools/testing/selftests/futex/functional/run.sh
@@ -24,7 +24,7 @@
# Test for a color capable console
if [ -z "$USE_COLOR" ]; then
- tput setf 7
+ tput setf 7 || tput setaf 7
if [ $? -eq 0 ]; then
USE_COLOR=1
tput sgr0
diff --git a/tools/testing/selftests/futex/run.sh b/tools/testing/selftests/futex/run.sh
index 4126312ad64e..88bcb1767362 100755
--- a/tools/testing/selftests/futex/run.sh
+++ b/tools/testing/selftests/futex/run.sh
@@ -23,7 +23,7 @@
# Test for a color capable shell and pass the result to the subdir scripts
USE_COLOR=0
-tput setf 7
+tput setf 7 || tput setaf 7
if [ $? -eq 0 ]; then
USE_COLOR=1
tput sgr0
diff --git a/Documentation/ia64/.gitignore b/tools/testing/selftests/ia64/.gitignore
index ab806edc8732..ab806edc8732 100644
--- a/Documentation/ia64/.gitignore
+++ b/tools/testing/selftests/ia64/.gitignore
diff --git a/tools/testing/selftests/ia64/Makefile b/tools/testing/selftests/ia64/Makefile
new file mode 100644
index 000000000000..2b3de2d3e945
--- /dev/null
+++ b/tools/testing/selftests/ia64/Makefile
@@ -0,0 +1,8 @@
+TEST_PROGS := aliasing-test
+
+all: $(TEST_PROGS)
+
+include ../lib.mk
+
+clean:
+ rm -fr $(TEST_PROGS)
diff --git a/Documentation/ia64/aliasing-test.c b/tools/testing/selftests/ia64/aliasing-test.c
index 62a190d45f38..62a190d45f38 100644
--- a/Documentation/ia64/aliasing-test.c
+++ b/tools/testing/selftests/ia64/aliasing-test.c
diff --git a/Documentation/networking/timestamping/.gitignore b/tools/testing/selftests/networking/timestamping/.gitignore
index 9e69e982fb38..9e69e982fb38 100644
--- a/Documentation/networking/timestamping/.gitignore
+++ b/tools/testing/selftests/networking/timestamping/.gitignore
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
new file mode 100644
index 000000000000..ccbb9edbbbb9
--- /dev/null
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -0,0 +1,8 @@
+TEST_PROGS := hwtstamp_config timestamping txtimestamp
+
+all: $(TEST_PROGS)
+
+include ../../lib.mk
+
+clean:
+ rm -fr $(TEST_PROGS)
diff --git a/Documentation/networking/timestamping/hwtstamp_config.c b/tools/testing/selftests/networking/timestamping/hwtstamp_config.c
index e8b685a7f15f..e8b685a7f15f 100644
--- a/Documentation/networking/timestamping/hwtstamp_config.c
+++ b/tools/testing/selftests/networking/timestamping/hwtstamp_config.c
diff --git a/Documentation/networking/timestamping/timestamping.c b/tools/testing/selftests/networking/timestamping/timestamping.c
index 5cdfd743447b..5cdfd743447b 100644
--- a/Documentation/networking/timestamping/timestamping.c
+++ b/tools/testing/selftests/networking/timestamping/timestamping.c
diff --git a/Documentation/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c
index 5df07047ca86..5df07047ca86 100644
--- a/Documentation/networking/timestamping/txtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/txtimestamp.c
diff --git a/tools/testing/selftests/powerpc/copyloops/asm/export.h b/tools/testing/selftests/powerpc/copyloops/asm/export.h
new file mode 100644
index 000000000000..2d14a9b4248c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/copyloops/asm/export.h
@@ -0,0 +1 @@
+#define EXPORT_SYMBOL(x)
diff --git a/tools/testing/selftests/powerpc/math/.gitignore b/tools/testing/selftests/powerpc/math/.gitignore
index 4fe13a439fd7..50ded63e25b7 100644
--- a/tools/testing/selftests/powerpc/math/.gitignore
+++ b/tools/testing/selftests/powerpc/math/.gitignore
@@ -4,3 +4,4 @@ fpu_preempt
vmx_preempt
fpu_signal
vmx_signal
+vsx_preempt
diff --git a/tools/testing/selftests/powerpc/signal/.gitignore b/tools/testing/selftests/powerpc/signal/.gitignore
new file mode 100644
index 000000000000..1b89224a8aab
--- /dev/null
+++ b/tools/testing/selftests/powerpc/signal/.gitignore
@@ -0,0 +1,2 @@
+signal
+signal_tm
diff --git a/tools/testing/selftests/powerpc/stringloops/asm/export.h b/tools/testing/selftests/powerpc/stringloops/asm/export.h
new file mode 100644
index 000000000000..2d14a9b4248c
--- /dev/null
+++ b/tools/testing/selftests/powerpc/stringloops/asm/export.h
@@ -0,0 +1 @@
+#define EXPORT_SYMBOL(x)
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index 82c0a9ce6e74..427621792229 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -7,3 +7,7 @@ tm-fork
tm-tar
tm-tmspr
tm-exec
+tm-signal-context-chk-fpu
+tm-signal-context-chk-gpr
+tm-signal-context-chk-vmx
+tm-signal-context-chk-vsx
diff --git a/Documentation/prctl/.gitignore b/tools/testing/selftests/prctl/.gitignore
index 0b5c27447bf6..0b5c27447bf6 100644
--- a/Documentation/prctl/.gitignore
+++ b/tools/testing/selftests/prctl/.gitignore
diff --git a/tools/testing/selftests/prctl/Makefile b/tools/testing/selftests/prctl/Makefile
new file mode 100644
index 000000000000..35aa1c8f2df2
--- /dev/null
+++ b/tools/testing/selftests/prctl/Makefile
@@ -0,0 +1,15 @@
+ifndef CROSS_COMPILE
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+
+ifeq ($(ARCH),x86)
+TEST_PROGS := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test \
+ disable-tsc-test
+all: $(TEST_PROGS)
+
+include ../lib.mk
+
+clean:
+ rm -fr $(TEST_PROGS)
+endif
+endif
diff --git a/Documentation/prctl/disable-tsc-ctxt-sw-stress-test.c b/tools/testing/selftests/prctl/disable-tsc-ctxt-sw-stress-test.c
index f7499d1c0415..f7499d1c0415 100644
--- a/Documentation/prctl/disable-tsc-ctxt-sw-stress-test.c
+++ b/tools/testing/selftests/prctl/disable-tsc-ctxt-sw-stress-test.c
diff --git a/Documentation/prctl/disable-tsc-on-off-stress-test.c b/tools/testing/selftests/prctl/disable-tsc-on-off-stress-test.c
index a06f027e9d16..a06f027e9d16 100644
--- a/Documentation/prctl/disable-tsc-on-off-stress-test.c
+++ b/tools/testing/selftests/prctl/disable-tsc-on-off-stress-test.c
diff --git a/Documentation/prctl/disable-tsc-test.c b/tools/testing/selftests/prctl/disable-tsc-test.c
index 8d494f7bebdb..8d494f7bebdb 100644
--- a/Documentation/prctl/disable-tsc-test.c
+++ b/tools/testing/selftests/prctl/disable-tsc-test.c
diff --git a/Documentation/ptp/.gitignore b/tools/testing/selftests/ptp/.gitignore
index f562e49d6917..f562e49d6917 100644
--- a/Documentation/ptp/.gitignore
+++ b/tools/testing/selftests/ptp/.gitignore
diff --git a/tools/testing/selftests/ptp/Makefile b/tools/testing/selftests/ptp/Makefile
new file mode 100644
index 000000000000..83dd42b2129e
--- /dev/null
+++ b/tools/testing/selftests/ptp/Makefile
@@ -0,0 +1,8 @@
+TEST_PROGS := testptp
+LDLIBS += -lrt
+all: $(TEST_PROGS)
+
+include ../lib.mk
+
+clean:
+ rm -fr $(TEST_PROGS)
diff --git a/Documentation/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c
index 5d2eae16f7ee..5d2eae16f7ee 100644
--- a/Documentation/ptp/testptp.c
+++ b/tools/testing/selftests/ptp/testptp.c
diff --git a/Documentation/ptp/testptp.mk b/tools/testing/selftests/ptp/testptp.mk
index 4ef2d9755421..4ef2d9755421 100644
--- a/Documentation/ptp/testptp.mk
+++ b/tools/testing/selftests/ptp/testptp.mk
diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
index 5a246a02dff3..15cf56d32155 100644
--- a/tools/testing/selftests/timers/posix_timers.c
+++ b/tools/testing/selftests/timers/posix_timers.c
@@ -122,7 +122,7 @@ static int check_itimer(int which)
else if (which == ITIMER_REAL)
idle_loop();
- gettimeofday(&end, NULL);
+ err = gettimeofday(&end, NULL);
if (err < 0) {
perror("Can't call gettimeofday()\n");
return -1;
@@ -175,7 +175,7 @@ static int check_timer_create(int which)
user_loop();
- gettimeofday(&end, NULL);
+ err = gettimeofday(&end, NULL);
if (err < 0) {
perror("Can't call gettimeofday()\n");
return -1;
diff --git a/Documentation/vDSO/.gitignore b/tools/testing/selftests/vDSO/.gitignore
index 133bf9ee986c..133bf9ee986c 100644
--- a/Documentation/vDSO/.gitignore
+++ b/tools/testing/selftests/vDSO/.gitignore
diff --git a/tools/testing/selftests/vDSO/Makefile b/tools/testing/selftests/vDSO/Makefile
new file mode 100644
index 000000000000..706b68b1c372
--- /dev/null
+++ b/tools/testing/selftests/vDSO/Makefile
@@ -0,0 +1,20 @@
+ifndef CROSS_COMPILE
+CFLAGS := -std=gnu99
+CFLAGS_vdso_standalone_test_x86 := -nostdlib -fno-asynchronous-unwind-tables -fno-stack-protector
+ifeq ($(CONFIG_X86_32),y)
+LDLIBS += -lgcc_s
+endif
+
+TEST_PROGS := vdso_test vdso_standalone_test_x86
+
+all: $(TEST_PROGS)
+vdso_test: parse_vdso.c vdso_test.c
+vdso_standalone_test_x86: vdso_standalone_test_x86.c parse_vdso.c
+ $(CC) $(CFLAGS) $(CFLAGS_vdso_standalone_test_x86) \
+ vdso_standalone_test_x86.c parse_vdso.c \
+ -o vdso_standalone_test_x86
+
+include ../lib.mk
+clean:
+ rm -fr $(TEST_PROGS)
+endif
diff --git a/Documentation/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c
index 1dbb4b87268f..1dbb4b87268f 100644
--- a/Documentation/vDSO/parse_vdso.c
+++ b/tools/testing/selftests/vDSO/parse_vdso.c
diff --git a/Documentation/vDSO/vdso_standalone_test_x86.c b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
index 93b0ebf8cc38..93b0ebf8cc38 100644
--- a/Documentation/vDSO/vdso_standalone_test_x86.c
+++ b/tools/testing/selftests/vDSO/vdso_standalone_test_x86.c
diff --git a/Documentation/vDSO/vdso_test.c b/tools/testing/selftests/vDSO/vdso_test.c
index 8daeb7d7032c..8daeb7d7032c 100644
--- a/Documentation/vDSO/vdso_test.c
+++ b/tools/testing/selftests/vDSO/vdso_test.c
diff --git a/tools/testing/selftests/watchdog/.gitignore b/tools/testing/selftests/watchdog/.gitignore
new file mode 100644
index 000000000000..5aac51575c7e
--- /dev/null
+++ b/tools/testing/selftests/watchdog/.gitignore
@@ -0,0 +1 @@
+watchdog-test
diff --git a/tools/testing/selftests/watchdog/Makefile b/tools/testing/selftests/watchdog/Makefile
new file mode 100644
index 000000000000..f863c664e3d1
--- /dev/null
+++ b/tools/testing/selftests/watchdog/Makefile
@@ -0,0 +1,8 @@
+TEST_PROGS := watchdog-test
+
+all: $(TEST_PROGS)
+
+include ../lib.mk
+
+clean:
+ rm -fr $(TEST_PROGS)
diff --git a/Documentation/watchdog/src/watchdog-test.c b/tools/testing/selftests/watchdog/watchdog-test.c
index 6983d05097e2..6983d05097e2 100644
--- a/Documentation/watchdog/src/watchdog-test.c
+++ b/tools/testing/selftests/watchdog/watchdog-test.c
diff --git a/tools/testing/selftests/zram/README b/tools/testing/selftests/zram/README
index eb17917c8a3a..7972cc512408 100644
--- a/tools/testing/selftests/zram/README
+++ b/tools/testing/selftests/zram/README
@@ -13,7 +13,7 @@ Statistics for individual zram devices are exported through sysfs nodes at
Kconfig required:
CONFIG_ZRAM=y
-CONFIG_ZRAM_LZ4_COMPRESS=y
+CONFIG_CRYPTO_LZ4=y
CONFIG_ZPOOL=y
CONFIG_ZSMALLOC=y
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index db9668869f6f..8035cc1eb955 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -84,7 +84,8 @@ static void async_pf_execute(struct work_struct *work)
* mm and might be done in another context, so we must
* use FOLL_REMOTE.
*/
- __get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE);
+ __get_user_pages_unlocked(NULL, mm, addr, 1, NULL,
+ FOLL_WRITE | FOLL_REMOTE);
kvm_async_page_present_sync(vcpu, apf);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 81dfc73d3df3..2907b7b78654 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1346,21 +1346,19 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
static int get_user_page_nowait(unsigned long start, int write,
struct page **page)
{
- int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
+ int flags = FOLL_NOWAIT | FOLL_HWPOISON;
if (write)
flags |= FOLL_WRITE;
- return __get_user_pages(current, current->mm, start, 1, flags, page,
- NULL, NULL);
+ return get_user_pages(start, 1, flags, page, NULL);
}
static inline int check_user_page_hwpoison(unsigned long addr)
{
- int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
+ int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
- rc = __get_user_pages(current, current->mm, addr, 1,
- flags, NULL, NULL, NULL);
+ rc = get_user_pages(addr, 1, flags, NULL, NULL);
return rc == -EHWPOISON;
}
@@ -1416,10 +1414,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
down_read(&current->mm->mmap_sem);
npages = get_user_page_nowait(addr, write_fault, page);
up_read(&current->mm->mmap_sem);
- } else
+ } else {
+ unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
+
+ if (write_fault)
+ flags |= FOLL_WRITE;
+
npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
- write_fault, 0, page,
- FOLL_TOUCH|FOLL_HWPOISON);
+ page, flags);
+ }
if (npages != 1)
return npages;