summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig10
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_processor.c7
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/gsi.c105
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/osl.c6
-rw-r--r--drivers/acpi/processor_core.c60
-rw-r--r--drivers/acpi/scan.c39
-rw-r--r--drivers/acpi/tables.c52
-rw-r--r--drivers/base/devtmpfs.c32
-rw-r--r--drivers/block/drbd/drbd_debugfs.c8
-rw-r--r--drivers/block/rbd.c26
-rw-r--r--drivers/clocksource/arm_arch_timer.c132
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/crypto/Kconfig3
-rw-r--r--drivers/dma-buf/dma-buf.c47
-rw-r--r--drivers/dma/Kconfig29
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/amba-pl08x.c11
-rw-r--r--drivers/dma/at_hdmac.c175
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/bestcomm/bestcomm.c4
-rw-r--r--drivers/dma/dma-jz4740.c12
-rw-r--r--drivers/dma/dma-jz4780.c877
-rw-r--r--drivers/dma/dmaengine.c18
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/dw/core.c18
-rw-r--r--drivers/dma/edma.c2
-rw-r--r--drivers/dma/fsl_raid.c904
-rw-r--r--drivers/dma/fsl_raid.h306
-rw-r--r--drivers/dma/img-mdc-dma.c6
-rw-r--r--drivers/dma/imx-sdma.c4
-rw-r--r--drivers/dma/ioat/dca.c4
-rw-r--r--drivers/dma/ioat/dma.c4
-rw-r--r--drivers/dma/ioat/dma.h4
-rw-r--r--drivers/dma/ioat/dma_v2.c4
-rw-r--r--drivers/dma/ioat/dma_v2.h4
-rw-r--r--drivers/dma/ioat/dma_v3.c4
-rw-r--r--drivers/dma/ioat/hw.h4
-rw-r--r--drivers/dma/ioat/pci.c4
-rw-r--r--drivers/dma/ioat/registers.h4
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/dma/k3dma.c8
-rw-r--r--drivers/dma/mmp_pdma.c2
-rw-r--r--drivers/dma/mmp_tdma.c2
-rw-r--r--drivers/dma/mpc512x_dma.c6
-rw-r--r--drivers/dma/mv_xor.c6
-rw-r--r--drivers/dma/mv_xor.h4
-rw-r--r--drivers/dma/pch_dma.c1
-rw-r--r--drivers/dma/pl330.c26
-rw-r--r--drivers/dma/ppc4xx/adma.c4
-rw-r--r--drivers/dma/qcom_bam_dma.c44
-rw-r--r--drivers/dma/s3c24xx-dma.c13
-rw-r--r--drivers/dma/sa11x0-dma.c12
-rw-r--r--drivers/dma/sh/Kconfig15
-rw-r--r--drivers/dma/sh/Makefile2
-rw-r--r--drivers/dma/sh/rcar-audmapp.c376
-rw-r--r--drivers/dma/sh/shdma-base.c73
-rw-r--r--drivers/dma/sh/shdmac.c4
-rw-r--r--drivers/dma/sh/usb-dmac.c910
-rw-r--r--drivers/dma/sirf-dma.c2
-rw-r--r--drivers/dma/ste_dma40.c6
-rw-r--r--drivers/dma/sun6i-dma.c8
-rwxr-xr-xdrivers/dma/xgene-dma.c2089
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c10
-rw-r--r--drivers/gpu/drm/drm_prime.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c9
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c10
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c66
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c35
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c9
-rw-r--r--drivers/gpu/drm/tegra/gem.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c9
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c9
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c51
-rw-r--r--drivers/i2c/busses/i2c-digicolor.c3
-rw-r--r--drivers/i2c/busses/i2c-mxs.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c13
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/i2c/i2c-mux.c8
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/core/uverbs_main.c22
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c16
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c457
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c9
-rw-r--r--drivers/infiniband/hw/mlx4/main.c26
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c7
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c44
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c20
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c195
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c73
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c520
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c44
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h66
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c66
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c523
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c220
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c691
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h37
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c9
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c233
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c13
-rw-r--r--drivers/iommu/intel-iommu.c142
-rw-r--r--drivers/iommu/intel_irq_remapping.c5
-rw-r--r--drivers/irqchip/irq-gic.c104
-rw-r--r--drivers/irqchip/irqchip.c3
-rw-r--r--drivers/lguest/hypercalls.c5
-rw-r--r--drivers/lguest/interrupts_and_traps.c105
-rw-r--r--drivers/lguest/lg.h2
-rw-r--r--drivers/lguest/lguest_user.c8
-rw-r--r--drivers/md/Kconfig16
-rw-r--r--drivers/md/Makefile1
-rw-r--r--drivers/md/bitmap.c189
-rw-r--r--drivers/md/bitmap.h10
-rw-r--r--drivers/md/md-cluster.c965
-rw-r--r--drivers/md/md-cluster.h29
-rw-r--r--drivers/md/md.c382
-rw-r--r--drivers/md/md.h26
-rw-r--r--drivers/md/raid0.c48
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c826
-rw-r--r--drivers/md/raid5.h59
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c8
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c8
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c8
-rw-r--r--drivers/mfd/cros_ec.c19
-rw-r--r--drivers/mmc/host/sh_mmcif.c13
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c26
-rw-r--r--drivers/mmc/host/tmio_mmc.h4
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c10
-rw-r--r--drivers/mtd/Kconfig13
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c30
-rw-r--r--drivers/mtd/devices/block2mtd.c58
-rw-r--r--drivers/mtd/devices/docg3.c11
-rw-r--r--drivers/mtd/devices/m25p80.c21
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c4
-rw-r--r--drivers/mtd/maps/ts5500_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c3
-rw-r--r--drivers/mtd/mtdcore.c52
-rw-r--r--drivers/mtd/mtdpart.c68
-rw-r--r--drivers/mtd/nand/atmel_nand.c26
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h3
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h1
-rw-r--r--drivers/mtd/nand/denali.c6
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c4
-rw-r--r--drivers/mtd/nand/fsmc_nand.c7
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c16
-rw-r--r--drivers/mtd/nand/mxc_nand.c146
-rw-r--r--drivers/mtd/nand/nand_base.c41
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c48
-rw-r--r--drivers/mtd/nand/s3c2410.c3
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/onenand/onenand_base.c12
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c11
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c61
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c6
-rw-r--r--drivers/mtd/tests/mtd_test.h12
-rw-r--r--drivers/mtd/tests/nandbiterrs.c4
-rw-r--r--drivers/mtd/tests/oobtest.c90
-rw-r--r--drivers/mtd/tests/pagetest.c10
-rw-r--r--drivers/mtd/tests/readtest.c5
-rw-r--r--drivers/mtd/tests/speedtest.c38
-rw-r--r--drivers/mtd/tests/stresstest.c9
-rw-r--r--drivers/mtd/tests/subpagetest.c29
-rw-r--r--drivers/mtd/tests/torturetest.c23
-rw-r--r--drivers/mtd/ubi/build.c6
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/bonding/bond_procfs.c1
-rw-r--r--drivers/net/bonding/bonding_priv.h25
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/ethernet/8390/etherh.c2
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h5
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c37
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c6
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c10
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/phy/mdio-gpio.c14
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c60
-rw-r--r--drivers/net/ppp/ppp_mppe.c36
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/base.c28
-rw-r--r--drivers/of/fdt.c24
-rw-r--r--drivers/of/unittest.c62
-rw-r--r--drivers/oprofile/oprofilefs.c16
-rw-r--r--drivers/platform/chrome/Kconfig26
-rw-r--r--drivers/platform/chrome/Makefile3
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c35
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c274
-rw-r--r--drivers/platform/chrome/cros_ec_dev.h53
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c367
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c319
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c271
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/apple-gmux.c48
-rw-r--r--drivers/platform/x86/dell-laptop.c1089
-rw-r--r--drivers/platform/x86/intel_oaktrail.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c320
-rw-r--r--drivers/platform/x86/toshiba_acpi.c256
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c133
-rw-r--r--drivers/platform/x86/wmi.c5
-rw-r--r--drivers/powercap/intel_rapl.c1
-rw-r--r--drivers/pwm/core.c2
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c4
-rw-r--r--drivers/pwm/pwm-mxs.c8
-rw-r--r--drivers/pwm/pwm-pca9685.c2
-rw-r--r--drivers/pwm/pwm-samsung.c32
-rw-r--r--drivers/s390/kvm/virtio_ccw.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c174
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/spi/spi-rspi.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/staging/android/ion/ion.c9
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c20
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c28
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c8
-rw-r--r--drivers/target/Kconfig5
-rw-r--r--drivers/target/Makefile2
-rw-r--r--drivers/target/iscsi/Makefile1
-rw-r--r--drivers/target/iscsi/iscsi_target.c131
-rw-r--r--drivers/target/iscsi/iscsi_target.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c208
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.h7
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c60
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c25
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.c495
-rw-r--r--drivers/target/iscsi/iscsi_target_tq.h84
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c1
-rw-r--r--drivers/target/loopback/tcm_loop.c242
-rw-r--r--drivers/target/loopback/tcm_loop.h1
-rw-r--r--drivers/target/sbp/sbp_target.c68
-rw-r--r--drivers/target/target_core_configfs.c192
-rw-r--r--drivers/target/target_core_fabric_configfs.c38
-rw-r--r--drivers/target/target_core_file.c261
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/target/target_core_internal.h6
-rw-r--r--drivers/target/target_core_pr.c48
-rw-r--r--drivers/target/target_core_rd.c137
-rw-r--r--drivers/target/target_core_sbc.c109
-rw-r--r--drivers/target/target_core_spc.c16
-rw-r--r--drivers/target/target_core_tmr.c4
-rw-r--r--drivers/target/target_core_tpg.c2
-rw-r--r--drivers/target/target_core_transport.c162
-rw-r--r--drivers/target/target_core_user.c52
-rw-r--r--drivers/target/target_core_xcopy.c46
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c89
-rw-r--r--drivers/tty/serial/8250/8250_core.c20
-rw-r--r--drivers/tty/serial/8250/8250_early.c5
-rw-r--r--drivers/tty/serial/of_serial.c3
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/legacy/tcm_usb_gadget.c57
-rw-r--r--drivers/vhost/scsi.c126
-rw-r--r--drivers/virtio/Kconfig10
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_balloon.c21
-rw-r--r--drivers/virtio/virtio_input.c384
-rw-r--r--drivers/virtio/virtio_mmio.c8
-rw-r--r--drivers/virtio/virtio_pci_modern.c123
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c27
-rw-r--r--drivers/watchdog/octeon-wdt-main.c201
-rw-r--r--drivers/watchdog/pnx4008_wdt.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c21
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c4
-rw-r--r--drivers/xen/Kconfig4
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/xen-scsiback.c74
302 files changed, 16307 insertions, 5514 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index e6c3ddd92665..ab2cbb51c6aa 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -5,7 +5,7 @@
menuconfig ACPI
bool "ACPI (Advanced Configuration and Power Interface) Support"
depends on !IA64_HP_SIM
- depends on IA64 || X86
+ depends on IA64 || X86 || (ARM64 && EXPERT)
depends on PCI
select PNP
default y
@@ -48,9 +48,16 @@ config ACPI_LEGACY_TABLES_LOOKUP
config ARCH_MIGHT_HAVE_ACPI_PDC
bool
+config ACPI_GENERIC_GSI
+ bool
+
+config ACPI_SYSTEM_POWER_STATES_SUPPORT
+ bool
+
config ACPI_SLEEP
bool
depends on SUSPEND || HIBERNATION
+ depends on ACPI_SYSTEM_POWER_STATES_SUPPORT
default y
config ACPI_PROCFS_POWER
@@ -163,6 +170,7 @@ config ACPI_PROCESSOR
tristate "Processor"
select THERMAL
select CPU_IDLE
+ depends on X86 || IA64
default y
help
This driver installs ACPI as the idle handler for Linux and uses
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 623b117ad1a2..8a063e276530 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -23,7 +23,7 @@ acpi-y += nvs.o
# Power management related files
acpi-y += wakeup.o
-acpi-y += sleep.o
+acpi-$(CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT) += sleep.o
acpi-y += device_pm.o
acpi-$(CONFIG_ACPI_SLEEP) += proc.o
@@ -56,6 +56,7 @@ ifdef CONFIG_ACPI_VIDEO
acpi-y += video_detect.o
endif
acpi-y += acpi_lpat.o
+acpi-$(CONFIG_ACPI_GENERIC_GSI) += gsi.o
# These are (potentially) separate modules
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 1020b1b53a17..58f335ca2e75 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
acpi_status status;
int ret;
- if (pr->phys_id == -1)
+ if (pr->phys_id == PHYS_CPUID_INVALID)
return -ENODEV;
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
@@ -215,7 +215,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
- int phys_id, cpu_index, device_declaration = 0;
+ phys_cpuid_t phys_id;
+ int cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
@@ -263,7 +264,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
}
phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
- if (phys_id < 0)
+ if (phys_id == PHYS_CPUID_INVALID)
acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
pr->phys_id = phys_id;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 8b67bd0f6bb5..c412fdb28d34 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -448,6 +448,9 @@ static int __init acpi_bus_init_irq(void)
case ACPI_IRQ_MODEL_IOSAPIC:
message = "IOSAPIC";
break;
+ case ACPI_IRQ_MODEL_GIC:
+ message = "GIC";
+ break;
case ACPI_IRQ_MODEL_PLATFORM:
message = "platform specific model";
break;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 220d6406c9e9..5e8fed448850 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -861,7 +861,7 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
}
}
mutex_unlock(&ec->mutex);
- list_for_each_entry(handler, &free_list, node)
+ list_for_each_entry_safe(handler, tmp, &free_list, node)
acpi_ec_put_query_handler(handler);
}
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
diff --git a/drivers/acpi/gsi.c b/drivers/acpi/gsi.c
new file mode 100644
index 000000000000..38208f2d0e69
--- /dev/null
+++ b/drivers/acpi/gsi.c
@@ -0,0 +1,105 @@
+/*
+ * ACPI GSI IRQ layer
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+
+enum acpi_irq_model_id acpi_irq_model;
+
+static unsigned int acpi_gsi_get_irq_type(int trigger, int polarity)
+{
+ switch (polarity) {
+ case ACPI_ACTIVE_LOW:
+ return trigger == ACPI_EDGE_SENSITIVE ?
+ IRQ_TYPE_EDGE_FALLING :
+ IRQ_TYPE_LEVEL_LOW;
+ case ACPI_ACTIVE_HIGH:
+ return trigger == ACPI_EDGE_SENSITIVE ?
+ IRQ_TYPE_EDGE_RISING :
+ IRQ_TYPE_LEVEL_HIGH;
+ case ACPI_ACTIVE_BOTH:
+ if (trigger == ACPI_EDGE_SENSITIVE)
+ return IRQ_TYPE_EDGE_BOTH;
+ default:
+ return IRQ_TYPE_NONE;
+ }
+}
+
+/**
+ * acpi_gsi_to_irq() - Retrieve the linux irq number for a given GSI
+ * @gsi: GSI IRQ number to map
+ * @irq: pointer where linux IRQ number is stored
+ *
+ * irq location updated with irq value [>0 on success, 0 on failure]
+ *
+ * Returns: linux IRQ number on success (>0)
+ * -EINVAL on failure
+ */
+int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
+{
+ /*
+ * Only default domain is supported at present, always find
+ * the mapping corresponding to default domain by passing NULL
+ * as irq_domain parameter
+ */
+ *irq = irq_find_mapping(NULL, gsi);
+ /*
+ * *irq == 0 means no mapping, that should
+ * be reported as a failure
+ */
+ return (*irq > 0) ? *irq : -EINVAL;
+}
+EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
+
+/**
+ * acpi_register_gsi() - Map a GSI to a linux IRQ number
+ * @dev: device for which IRQ has to be mapped
+ * @gsi: GSI IRQ number
+ * @trigger: trigger type of the GSI number to be mapped
+ * @polarity: polarity of the GSI to be mapped
+ *
+ * Returns: a valid linux IRQ number on success
+ * -EINVAL on failure
+ */
+int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
+ int polarity)
+{
+ unsigned int irq;
+ unsigned int irq_type = acpi_gsi_get_irq_type(trigger, polarity);
+
+ /*
+ * There is no way at present to look-up the IRQ domain on ACPI,
+ * hence always create mapping referring to the default domain
+ * by passing NULL as irq_domain parameter
+ */
+ irq = irq_create_mapping(NULL, gsi);
+ if (!irq)
+ return -EINVAL;
+
+ /* Set irq type if specified and different than the current one */
+ if (irq_type != IRQ_TYPE_NONE &&
+ irq_type != irq_get_trigger_type(irq))
+ irq_set_irq_type(irq, irq_type);
+ return irq;
+}
+EXPORT_SYMBOL_GPL(acpi_register_gsi);
+
+/**
+ * acpi_unregister_gsi() - Free a GSI<->linux IRQ number mapping
+ * @gsi: GSI IRQ number
+ */
+void acpi_unregister_gsi(u32 gsi)
+{
+ int irq = irq_find_mapping(NULL, gsi);
+
+ irq_dispose_mapping(irq);
+}
+EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 56b321aa2b1c..ba4a61e964be 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -161,7 +161,11 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
/*--------------------------------------------------------------------------
Suspend/Resume
-------------------------------------------------------------------------- */
+#ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT
extern int acpi_sleep_init(void);
+#else
+static inline int acpi_sleep_init(void) { return -ENXIO; }
+#endif
#ifdef CONFIG_ACPI_SLEEP
int acpi_sleep_proc_init(void);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index f9eeae871593..39748bb3a543 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -336,11 +336,11 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
return NULL;
}
-#ifndef CONFIG_IA64
-#define should_use_kmap(pfn) page_is_ram(pfn)
-#else
+#if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
/* ioremap will take care of cache attributes */
#define should_use_kmap(pfn) 0
+#else
+#define should_use_kmap(pfn) page_is_ram(pfn)
#endif
static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 7962651cdbd4..b1ec78b8a645 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -32,7 +32,7 @@ static struct acpi_table_madt *get_madt_table(void)
}
static int map_lapic_id(struct acpi_subtable_header *entry,
- u32 acpi_id, int *apic_id)
+ u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_apic *lapic =
container_of(entry, struct acpi_madt_local_apic, header);
@@ -48,7 +48,7 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
}
static int map_x2apic_id(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, int *apic_id)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_x2apic *apic =
container_of(entry, struct acpi_madt_local_x2apic, header);
@@ -65,7 +65,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
}
static int map_lsapic_id(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, int *apic_id)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_sapic *lsapic =
container_of(entry, struct acpi_madt_local_sapic, header);
@@ -83,10 +83,35 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
return 0;
}
-static int map_madt_entry(int type, u32 acpi_id)
+/*
+ * Retrieve the ARM CPU physical identifier (MPIDR)
+ */
+static int map_gicc_mpidr(struct acpi_subtable_header *entry,
+ int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
+{
+ struct acpi_madt_generic_interrupt *gicc =
+ container_of(entry, struct acpi_madt_generic_interrupt, header);
+
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return -ENODEV;
+
+ /* device_declaration means Device object in DSDT, in the
+ * GIC interrupt model, logical processors are required to
+ * have a Processor Device object in the DSDT, so we should
+ * check device_declaration here
+ */
+ if (device_declaration && (gicc->uid == acpi_id)) {
+ *mpidr = gicc->arm_mpidr;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static phys_cpuid_t map_madt_entry(int type, u32 acpi_id)
{
unsigned long madt_end, entry;
- int phys_id = -1; /* CPU hardware ID */
+ phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
struct acpi_table_madt *madt;
madt = get_madt_table();
@@ -111,18 +136,21 @@ static int map_madt_entry(int type, u32 acpi_id)
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
+ } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
+ if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
+ break;
}
entry += header->length;
}
return phys_id;
}
-static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
+static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_subtable_header *header;
- int phys_id = -1;
+ phys_cpuid_t phys_id = PHYS_CPUID_INVALID;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
@@ -143,33 +171,35 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
map_x2apic_id(header, type, acpi_id, &phys_id);
+ else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
+ map_gicc_mpidr(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
return phys_id;
}
-int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
+phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
- int phys_id;
+ phys_cpuid_t phys_id;
phys_id = map_mat_entry(handle, type, acpi_id);
- if (phys_id == -1)
+ if (phys_id == PHYS_CPUID_INVALID)
phys_id = map_madt_entry(type, acpi_id);
return phys_id;
}
-int acpi_map_cpuid(int phys_id, u32 acpi_id)
+int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
int i;
#endif
- if (phys_id == -1) {
+ if (phys_id == PHYS_CPUID_INVALID) {
/*
* On UP processor, there is no _MAT or MADT table.
- * So above phys_id is always set to -1.
+ * So above phys_id is always set to PHYS_CPUID_INVALID.
*
* BIOS may define multiple CPU handles even for UP processor.
* For example,
@@ -190,7 +220,7 @@ int acpi_map_cpuid(int phys_id, u32 acpi_id)
if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
- return phys_id;
+ return -1;
}
#ifdef CONFIG_SMP
@@ -208,7 +238,7 @@ int acpi_map_cpuid(int phys_id, u32 acpi_id)
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
- int phys_id;
+ phys_cpuid_t phys_id;
phys_id = acpi_get_phys_id(handle, type, acpi_id);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 69bc0d888c01..03141aa4ea95 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -375,7 +375,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
struct acpi_device_physical_node *pn;
bool offline = true;
- mutex_lock(&adev->physical_node_lock);
+ /*
+ * acpi_container_offline() calls this for all of the container's
+ * children under the container's physical_node_lock lock.
+ */
+ mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
list_for_each_entry(pn, &adev->physical_node_list, node)
if (device_supports_offline(pn->dev) && !pn->dev->offline) {
@@ -2388,9 +2392,6 @@ static void acpi_default_enumeration(struct acpi_device *device)
struct list_head resource_list;
bool is_spi_i2c_slave = false;
- if (!device->pnp.type.platform_id || device->handler)
- return;
-
/*
* Do not enemerate SPI/I2C slaves as they will be enuerated by their
* respective parents.
@@ -2403,6 +2404,29 @@ static void acpi_default_enumeration(struct acpi_device *device)
acpi_create_platform_device(device);
}
+static const struct acpi_device_id generic_device_ids[] = {
+ {"PRP0001", },
+ {"", },
+};
+
+static int acpi_generic_device_attach(struct acpi_device *adev,
+ const struct acpi_device_id *not_used)
+{
+ /*
+ * Since PRP0001 is the only ID handled here, the test below can be
+ * unconditional.
+ */
+ if (adev->data.of_compatible)
+ acpi_default_enumeration(adev);
+
+ return 1;
+}
+
+static struct acpi_scan_handler generic_device_handler = {
+ .ids = generic_device_ids,
+ .attach = acpi_generic_device_attach,
+};
+
static int acpi_scan_attach_handler(struct acpi_device *device)
{
struct acpi_hardware_id *hwid;
@@ -2428,8 +2452,6 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
break;
}
}
- if (!ret)
- acpi_default_enumeration(device);
return ret;
}
@@ -2471,6 +2493,9 @@ static void acpi_bus_attach(struct acpi_device *device)
ret = device_attach(&device->dev);
if (ret < 0)
return;
+
+ if (!ret && device->pnp.type.platform_id)
+ acpi_default_enumeration(device);
}
device->flags.visited = true;
@@ -2629,6 +2654,8 @@ int __init acpi_scan_init(void)
acpi_pnp_init();
acpi_int340x_thermal_init();
+ acpi_scan_add_handler(&generic_device_handler);
+
mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 93b81523a2fe..2e19189da0ee 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -23,6 +23,8 @@
*
*/
+/* Uncomment next line to get verbose printout */
+/* #define DEBUG */
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/init.h>
@@ -61,9 +63,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_apic *p =
(struct acpi_madt_local_apic *)header;
- pr_info("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
- p->processor_id, p->id,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_debug("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
+ p->processor_id, p->id,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -71,9 +73,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_x2apic *p =
(struct acpi_madt_local_x2apic *)header;
- pr_info("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
- p->local_apic_id, p->uid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_debug("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
+ p->local_apic_id, p->uid,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -81,8 +83,8 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_io_apic *p =
(struct acpi_madt_io_apic *)header;
- pr_info("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
- p->id, p->address, p->global_irq_base);
+ pr_debug("IOAPIC (id[0x%02x] address[0x%08x] gsi_base[%d])\n",
+ p->id, p->address, p->global_irq_base);
}
break;
@@ -155,9 +157,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_io_sapic *p =
(struct acpi_madt_io_sapic *)header;
- pr_info("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
- p->id, (void *)(unsigned long)p->address,
- p->global_irq_base);
+ pr_debug("IOSAPIC (id[0x%x] address[%p] gsi_base[%d])\n",
+ p->id, (void *)(unsigned long)p->address,
+ p->global_irq_base);
}
break;
@@ -165,9 +167,9 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
struct acpi_madt_local_sapic *p =
(struct acpi_madt_local_sapic *)header;
- pr_info("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
- p->processor_id, p->id, p->eid,
- (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+ pr_debug("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
+ p->processor_id, p->id, p->eid,
+ (p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
}
break;
@@ -183,6 +185,28 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
}
break;
+ case ACPI_MADT_TYPE_GENERIC_INTERRUPT:
+ {
+ struct acpi_madt_generic_interrupt *p =
+ (struct acpi_madt_generic_interrupt *)header;
+ pr_debug("GICC (acpi_id[0x%04x] address[%llx] MPIDR[0x%llx] %s)\n",
+ p->uid, p->base_address,
+ p->arm_mpidr,
+ (p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
+
+ }
+ break;
+
+ case ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR:
+ {
+ struct acpi_madt_generic_distributor *p =
+ (struct acpi_madt_generic_distributor *)header;
+ pr_debug("GIC Distributor (gic_id[0x%04x] address[%llx] gsi_base[%d])\n",
+ p->gic_id, p->base_address,
+ p->global_irq_base);
+ }
+ break;
+
default:
pr_warn("Found unsupported MADT entry (type = 0x%x)\n",
header->type);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 25798db14553..68f03141e432 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -157,10 +157,10 @@ static int dev_mkdir(const char *name, umode_t mode)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(path.dentry->d_inode, dentry, mode);
+ err = vfs_mkdir(d_inode(path.dentry), dentry, mode);
if (!err)
/* mark as kernel-created inode */
- dentry->d_inode->i_private = &thread;
+ d_inode(dentry)->i_private = &thread;
done_path_create(&path, dentry);
return err;
}
@@ -207,7 +207,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mknod(path.dentry->d_inode, dentry, mode, dev->devt);
+ err = vfs_mknod(d_inode(path.dentry), dentry, mode, dev->devt);
if (!err) {
struct iattr newattrs;
@@ -215,12 +215,12 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
newattrs.ia_uid = uid;
newattrs.ia_gid = gid;
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
- mutex_lock(&dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(dentry)->i_mutex);
notify_change(dentry, &newattrs, NULL);
- mutex_unlock(&dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
/* mark as kernel-created inode */
- dentry->d_inode->i_private = &thread;
+ d_inode(dentry)->i_private = &thread;
}
done_path_create(&path, dentry);
return err;
@@ -235,16 +235,16 @@ static int dev_rmdir(const char *name)
dentry = kern_path_locked(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (dentry->d_inode) {
- if (dentry->d_inode->i_private == &thread)
- err = vfs_rmdir(parent.dentry->d_inode, dentry);
+ if (d_really_is_positive(dentry)) {
+ if (d_inode(dentry)->i_private == &thread)
+ err = vfs_rmdir(d_inode(parent.dentry), dentry);
else
err = -EPERM;
} else {
err = -ENOENT;
}
dput(dentry);
- mutex_unlock(&parent.dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent.dentry)->i_mutex);
path_put(&parent);
return err;
}
@@ -306,11 +306,11 @@ static int handle_remove(const char *nodename, struct device *dev)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- if (dentry->d_inode) {
+ if (d_really_is_positive(dentry)) {
struct kstat stat;
struct path p = {.mnt = parent.mnt, .dentry = dentry};
err = vfs_getattr(&p, &stat);
- if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
+ if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
struct iattr newattrs;
/*
* before unlinking this node, reset permissions
@@ -321,10 +321,10 @@ static int handle_remove(const char *nodename, struct device *dev)
newattrs.ia_mode = stat.mode & ~0777;
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
- mutex_lock(&dentry->d_inode->i_mutex);
+ mutex_lock(&d_inode(dentry)->i_mutex);
notify_change(dentry, &newattrs, NULL);
- mutex_unlock(&dentry->d_inode->i_mutex);
- err = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
+ mutex_unlock(&d_inode(dentry)->i_mutex);
+ err = vfs_unlink(d_inode(parent.dentry), dentry, NULL);
if (!err || err == -ENOENT)
deleted = 1;
}
@@ -332,7 +332,7 @@ static int handle_remove(const char *nodename, struct device *dev)
err = -ENOENT;
}
dput(dentry);
- mutex_unlock(&parent.dentry->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent.dentry)->i_mutex);
path_put(&parent);
if (deleted && strchr(nodename, '/'))
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index 9a950022ff88..a6ee3d750c30 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -424,7 +424,7 @@ static int in_flight_summary_show(struct seq_file *m, void *pos)
* So we have our own inline version of it above. :-( */
static inline int debugfs_positive(struct dentry *dentry)
{
- return dentry->d_inode && !d_unhashed(dentry);
+ return d_really_is_positive(dentry) && !d_unhashed(dentry);
}
/* make sure at *open* time that the respective object won't go away. */
@@ -439,15 +439,15 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo
* or has debugfs_remove() already been called? */
parent = file->f_path.dentry->d_parent;
/* not sure if this can happen: */
- if (!parent || !parent->d_inode)
+ if (!parent || d_really_is_negative(parent))
goto out;
/* serialize with d_delete() */
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
/* Make sure the object is still alive */
if (debugfs_positive(file->f_path.dentry)
&& kref_get_unless_zero(kref))
ret = 0;
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
if (!ret) {
ret = single_open(file, show, data);
if (ret)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b40af3203089..812523330a78 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3762,8 +3762,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
goto out_tag_set;
}
- /* We use the default size, but let's be explicit about it. */
- blk_queue_physical_block_size(q, SECTOR_SIZE);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
/* set io sizes to object size */
segment_size = rbd_obj_bytes(&rbd_dev->header);
@@ -5301,8 +5301,13 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
if (mapping) {
ret = rbd_dev_header_watch_sync(rbd_dev);
- if (ret)
+ if (ret) {
+ if (ret == -ENOENT)
+ pr_info("image %s/%s does not exist\n",
+ rbd_dev->spec->pool_name,
+ rbd_dev->spec->image_name);
goto out_header_name;
+ }
}
ret = rbd_dev_header_info(rbd_dev);
@@ -5319,8 +5324,14 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
ret = rbd_spec_fill_snap_id(rbd_dev);
else
ret = rbd_spec_fill_names(rbd_dev);
- if (ret)
+ if (ret) {
+ if (ret == -ENOENT)
+ pr_info("snap %s/%s@%s does not exist\n",
+ rbd_dev->spec->pool_name,
+ rbd_dev->spec->image_name,
+ rbd_dev->spec->snap_name);
goto err_out_probe;
+ }
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
ret = rbd_dev_v2_parent_info(rbd_dev);
@@ -5390,8 +5401,11 @@ static ssize_t do_rbd_add(struct bus_type *bus,
/* pick the pool */
rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
- if (rc < 0)
+ if (rc < 0) {
+ if (rc == -ENOENT)
+ pr_info("pool %s does not exist\n", spec->pool_name);
goto err_out_client;
+ }
spec->pool_id = (u64)rc;
/* The ceph file layout needs to fit pool id in 32 bits */
@@ -5673,7 +5687,7 @@ static int __init rbd_init(void)
/*
* The number of active work items is limited by the number of
- * rbd devices, so leave @max_active at default.
+ * rbd devices * queue depth, so leave @max_active at default.
*/
rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
if (!rbd_wq) {
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 266469691e58..0aa135ddbf80 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/sched_clock.h>
+#include <linux/acpi.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
@@ -371,8 +372,12 @@ arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
if (arch_timer_rate)
return;
- /* Try to determine the frequency from the device tree or CNTFRQ */
- if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
+ /*
+ * Try to determine the frequency from the device tree or CNTFRQ,
+ * if ACPI is enabled, get the frequency from CNTFRQ ONLY.
+ */
+ if (!acpi_disabled ||
+ of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
if (cntbase)
arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
else
@@ -691,28 +696,8 @@ static void __init arch_timer_common_init(void)
arch_timer_arch_init();
}
-static void __init arch_timer_init(struct device_node *np)
+static void __init arch_timer_init(void)
{
- int i;
-
- if (arch_timers_present & ARCH_CP15_TIMER) {
- pr_warn("arch_timer: multiple nodes in dt, skipping\n");
- return;
- }
-
- arch_timers_present |= ARCH_CP15_TIMER;
- for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
- arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
- arch_timer_detect_rate(NULL, np);
-
- /*
- * If we cannot rely on firmware initializing the timer registers then
- * we should use the physical timers instead.
- */
- if (IS_ENABLED(CONFIG_ARM) &&
- of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
- arch_timer_use_virtual = false;
-
/*
* If HYP mode is available, we know that the physical timer
* has been configured to be accessible from PL1. Use it, so
@@ -731,13 +716,39 @@ static void __init arch_timer_init(struct device_node *np)
}
}
- arch_timer_c3stop = !of_property_read_bool(np, "always-on");
-
arch_timer_register();
arch_timer_common_init();
}
-CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
-CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);
+
+static void __init arch_timer_of_init(struct device_node *np)
+{
+ int i;
+
+ if (arch_timers_present & ARCH_CP15_TIMER) {
+ pr_warn("arch_timer: multiple nodes in dt, skipping\n");
+ return;
+ }
+
+ arch_timers_present |= ARCH_CP15_TIMER;
+ for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
+ arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
+
+ arch_timer_detect_rate(NULL, np);
+
+ arch_timer_c3stop = !of_property_read_bool(np, "always-on");
+
+ /*
+ * If we cannot rely on firmware initializing the timer registers then
+ * we should use the physical timers instead.
+ */
+ if (IS_ENABLED(CONFIG_ARM) &&
+ of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
+ arch_timer_use_virtual = false;
+
+ arch_timer_init();
+}
+CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
+CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
static void __init arch_timer_mem_init(struct device_node *np)
{
@@ -804,3 +815,70 @@ static void __init arch_timer_mem_init(struct device_node *np)
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
arch_timer_mem_init);
+
+#ifdef CONFIG_ACPI
+static int __init map_generic_timer_interrupt(u32 interrupt, u32 flags)
+{
+ int trigger, polarity;
+
+ if (!interrupt)
+ return 0;
+
+ trigger = (flags & ACPI_GTDT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE
+ : ACPI_LEVEL_SENSITIVE;
+
+ polarity = (flags & ACPI_GTDT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW
+ : ACPI_ACTIVE_HIGH;
+
+ return acpi_register_gsi(NULL, interrupt, trigger, polarity);
+}
+
+/* Initialize per-processor generic timer */
+static int __init arch_timer_acpi_init(struct acpi_table_header *table)
+{
+ struct acpi_table_gtdt *gtdt;
+
+ if (arch_timers_present & ARCH_CP15_TIMER) {
+ pr_warn("arch_timer: already initialized, skipping\n");
+ return -EINVAL;
+ }
+
+ gtdt = container_of(table, struct acpi_table_gtdt, header);
+
+ arch_timers_present |= ARCH_CP15_TIMER;
+
+ arch_timer_ppi[PHYS_SECURE_PPI] =
+ map_generic_timer_interrupt(gtdt->secure_el1_interrupt,
+ gtdt->secure_el1_flags);
+
+ arch_timer_ppi[PHYS_NONSECURE_PPI] =
+ map_generic_timer_interrupt(gtdt->non_secure_el1_interrupt,
+ gtdt->non_secure_el1_flags);
+
+ arch_timer_ppi[VIRT_PPI] =
+ map_generic_timer_interrupt(gtdt->virtual_timer_interrupt,
+ gtdt->virtual_timer_flags);
+
+ arch_timer_ppi[HYP_PPI] =
+ map_generic_timer_interrupt(gtdt->non_secure_el2_interrupt,
+ gtdt->non_secure_el2_flags);
+
+ /* Get the frequency from CNTFRQ */
+ arch_timer_detect_rate(NULL, NULL);
+
+ /* Always-on capability */
+ arch_timer_c3stop = !(gtdt->non_secure_el1_flags & ACPI_GTDT_ALWAYS_ON);
+
+ arch_timer_init();
+ return 0;
+}
+
+/* Initialize all the generic timers presented in GTDT */
+void __init acpi_generic_timer_init(void)
+{
+ if (acpi_disabled)
+ return;
+
+ acpi_table_parse(ACPI_SIG_GTDT, arch_timer_acpi_init);
+}
+#endif
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index c5b81beccc8e..6414661ac1c4 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -31,6 +31,7 @@
#include <asm/div64.h>
#include <asm/msr.h>
#include <asm/cpu_device_id.h>
+#include <asm/cpufeature.h>
#define BYT_RATIOS 0x66a
#define BYT_VIDS 0x66b
@@ -649,7 +650,7 @@ static struct cpu_defaults byt_params = {
.pid_policy = {
.sample_rate_ms = 10,
.deadband = 0,
- .setpoint = 97,
+ .setpoint = 60,
.p_gain_pct = 14,
.d_gain_pct = 0,
.i_gain_pct = 4,
@@ -1200,8 +1201,7 @@ static int __init intel_pstate_init(void)
{
int cpu, rc = 0;
const struct x86_cpu_id *id;
- struct cpu_defaults *cpu_info;
- struct cpuinfo_x86 *c = &boot_cpu_data;
+ struct cpu_defaults *cpu_def;
if (no_load)
return -ENODEV;
@@ -1217,10 +1217,10 @@ static int __init intel_pstate_init(void)
if (intel_pstate_platform_pwr_mgmt_exists())
return -ENODEV;
- cpu_info = (struct cpu_defaults *)id->driver_data;
+ cpu_def = (struct cpu_defaults *)id->driver_data;
- copy_pid_params(&cpu_info->pid_policy);
- copy_cpu_funcs(&cpu_info->funcs);
+ copy_pid_params(&cpu_def->pid_policy);
+ copy_cpu_funcs(&cpu_def->funcs);
if (intel_pstate_msrs_not_valid())
return -ENODEV;
@@ -1231,7 +1231,7 @@ static int __init intel_pstate_init(void)
if (!all_cpu_data)
return -ENOMEM;
- if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
+ if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
intel_pstate_hwp_enable();
if (!hwp_active && hwp_only)
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 800bf41718e1..033c0c86f6ec 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -446,8 +446,9 @@ config CRYPTO_DEV_VMX
source "drivers/crypto/vmx/Kconfig"
config CRYPTO_DEV_IMGTEC_HASH
- depends on MIPS || COMPILE_TEST
tristate "Imagination Technologies hardware hash accelerator"
+ depends on MIPS || COMPILE_TEST
+ depends on HAS_DMA
select CRYPTO_ALGAPI
select CRYPTO_MD5
select CRYPTO_SHA1
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5be225c2ba98..c5a9138a6a8d 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -265,43 +265,40 @@ static inline int is_dma_buf_file(struct file *file)
}
/**
- * dma_buf_export_named - Creates a new dma_buf, and associates an anon file
+ * dma_buf_export - Creates a new dma_buf, and associates an anon file
* with this buffer, so it can be exported.
* Also connect the allocator specific data and ops to the buffer.
* Additionally, provide a name string for exporter; useful in debugging.
*
- * @priv: [in] Attach private data of allocator to this buffer
- * @ops: [in] Attach allocator-defined dma buf ops to the new buffer.
- * @size: [in] Size of the buffer
- * @flags: [in] mode flags for the file.
- * @exp_name: [in] name of the exporting module - useful for debugging.
- * @resv: [in] reservation-object, NULL to allocate default one.
+ * @exp_info: [in] holds all the export related information provided
+ * by the exporter. see struct dma_buf_export_info
+ * for further details.
*
* Returns, on success, a newly created dma_buf object, which wraps the
* supplied private data and operations for dma_buf_ops. On either missing
* ops, or error in allocating struct dma_buf, will return negative error.
*
*/
-struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
- size_t size, int flags, const char *exp_name,
- struct reservation_object *resv)
+struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
{
struct dma_buf *dmabuf;
+ struct reservation_object *resv = exp_info->resv;
struct file *file;
size_t alloc_size = sizeof(struct dma_buf);
- if (!resv)
+ if (!exp_info->resv)
alloc_size += sizeof(struct reservation_object);
else
/* prevent &dma_buf[1] == dma_buf->resv */
alloc_size += 1;
- if (WARN_ON(!priv || !ops
- || !ops->map_dma_buf
- || !ops->unmap_dma_buf
- || !ops->release
- || !ops->kmap_atomic
- || !ops->kmap
- || !ops->mmap)) {
+ if (WARN_ON(!exp_info->priv
+ || !exp_info->ops
+ || !exp_info->ops->map_dma_buf
+ || !exp_info->ops->unmap_dma_buf
+ || !exp_info->ops->release
+ || !exp_info->ops->kmap_atomic
+ || !exp_info->ops->kmap
+ || !exp_info->ops->mmap)) {
return ERR_PTR(-EINVAL);
}
@@ -309,10 +306,10 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
if (dmabuf == NULL)
return ERR_PTR(-ENOMEM);
- dmabuf->priv = priv;
- dmabuf->ops = ops;
- dmabuf->size = size;
- dmabuf->exp_name = exp_name;
+ dmabuf->priv = exp_info->priv;
+ dmabuf->ops = exp_info->ops;
+ dmabuf->size = exp_info->size;
+ dmabuf->exp_name = exp_info->exp_name;
init_waitqueue_head(&dmabuf->poll);
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
@@ -323,7 +320,8 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
}
dmabuf->resv = resv;
- file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
+ file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
+ exp_info->flags);
if (IS_ERR(file)) {
kfree(dmabuf);
return ERR_CAST(file);
@@ -341,8 +339,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
return dmabuf;
}
-EXPORT_SYMBOL_GPL(dma_buf_export_named);
-
+EXPORT_SYMBOL_GPL(dma_buf_export);
/**
* dma_buf_fd - returns a file descriptor for the given dma_buf
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 91eced044321..fd7ac13f2574 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -112,6 +112,17 @@ config FSL_DMA
EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
some Txxx and Bxxx parts.
+config FSL_RAID
+ tristate "Freescale RAID engine Support"
+ depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ select DMA_ENGINE
+ select DMA_ENGINE_RAID
+ ---help---
+ Enable support for Freescale RAID Engine. RAID Engine is
+ available on some QorIQ SoCs (like P5020/P5040). It has
+ the capability to offload memcpy, xor and pq computation
+ for raid5/6.
+
source "drivers/dma/hsu/Kconfig"
config MPC512X_DMA
@@ -347,6 +358,16 @@ config DMA_JZ4740
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+config DMA_JZ4780
+ tristate "JZ4780 DMA support"
+ depends on MACH_JZ4780
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This selects support for the DMA controller in Ingenic JZ4780 SoCs.
+ If you have a board based on such a SoC and wish to use DMA for
+ devices which can use the DMA controller, say Y or M here.
+
config K3_DMA
tristate "Hisilicon K3 DMA support"
depends on ARCH_HI3xxx
@@ -414,6 +435,14 @@ config IMG_MDC_DMA
help
Enable support for the IMG multi-threaded DMA controller (MDC).
+config XGENE_DMA
+ tristate "APM X-Gene DMA support"
+ select DMA_ENGINE
+ select DMA_ENGINE_RAID
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ help
+ Enable support for the APM X-Gene SoC DMA engine.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 7e8301cb489d..69f77d5ba53b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -41,9 +41,11 @@ obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_K3_DMA) += k3dma.o
obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
+obj-$(CONFIG_FSL_RAID) += fsl_raid.o
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
obj-y += xilinx/
@@ -51,3 +53,4 @@ obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
+obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 83aa55d6fa5d..49d396ec06e5 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -15,10 +15,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is in this distribution in the file
* called COPYING.
*
@@ -1195,11 +1191,6 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
/*
* The DMA ENGINE API
*/
-static int pl08x_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
static void pl08x_free_chan_resources(struct dma_chan *chan)
{
/* Ensure all queued descriptors are freed */
@@ -2066,7 +2057,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Initialize memcpy engine */
dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
pl08x->memcpy.dev = &adev->dev;
- pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
@@ -2085,7 +2075,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
pl08x->slave.dev = &adev->dev;
- pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
pl08x->slave.device_tx_status = pl08x_dma_tx_status;
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 0b4fc6fb48ce..57b2141ddddc 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -65,6 +65,21 @@ static void atc_issue_pending(struct dma_chan *chan);
/*----------------------------------------------------------------------*/
+static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
+ size_t len)
+{
+ unsigned int width;
+
+ if (!((src | dst | len) & 3))
+ width = 2;
+ else if (!((src | dst | len) & 1))
+ width = 1;
+ else
+ width = 0;
+
+ return width;
+}
+
static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
{
return list_first_entry(&atchan->active_list,
@@ -659,16 +674,10 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* We can be a lot more clever here, but this should take care
* of the most common optimization.
*/
- if (!((src | dest | len) & 3)) {
- ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
- src_width = dst_width = 2;
- } else if (!((src | dest | len) & 1)) {
- ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
- src_width = dst_width = 1;
- } else {
- ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
- src_width = dst_width = 0;
- }
+ src_width = dst_width = atc_get_xfer_width(src, dest, len);
+
+ ctrla = ATC_SRC_WIDTH(src_width) |
+ ATC_DST_WIDTH(dst_width);
for (offset = 0; offset < len; offset += xfer_count << src_width) {
xfer_count = min_t(size_t, (len - offset) >> src_width,
@@ -862,6 +871,144 @@ err:
}
/**
+ * atc_prep_dma_sg - prepare memory to memory scather-gather operation
+ * @chan: the channel to prepare operation on
+ * @dst_sg: destination scatterlist
+ * @dst_nents: number of destination scatterlist entries
+ * @src_sg: source scatterlist
+ * @src_nents: number of source scatterlist entries
+ * @flags: tx descriptor status flags
+ */
+static struct dma_async_tx_descriptor *
+atc_prep_dma_sg(struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_desc *desc = NULL;
+ struct at_desc *first = NULL;
+ struct at_desc *prev = NULL;
+ unsigned int src_width;
+ unsigned int dst_width;
+ size_t xfer_count;
+ u32 ctrla;
+ u32 ctrlb;
+ size_t dst_len = 0, src_len = 0;
+ dma_addr_t dst = 0, src = 0;
+ size_t len = 0, total_len = 0;
+
+ if (unlikely(dst_nents == 0 || src_nents == 0))
+ return NULL;
+
+ if (unlikely(dst_sg == NULL || src_sg == NULL))
+ return NULL;
+
+ ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
+ | ATC_SRC_ADDR_MODE_INCR
+ | ATC_DST_ADDR_MODE_INCR
+ | ATC_FC_MEM2MEM;
+
+ /*
+ * loop until there is either no more source or no more destination
+ * scatterlist entry
+ */
+ while (true) {
+
+ /* prepare the next transfer */
+ if (dst_len == 0) {
+
+ /* no more destination scatterlist entries */
+ if (!dst_sg || !dst_nents)
+ break;
+
+ dst = sg_dma_address(dst_sg);
+ dst_len = sg_dma_len(dst_sg);
+
+ dst_sg = sg_next(dst_sg);
+ dst_nents--;
+ }
+
+ if (src_len == 0) {
+
+ /* no more source scatterlist entries */
+ if (!src_sg || !src_nents)
+ break;
+
+ src = sg_dma_address(src_sg);
+ src_len = sg_dma_len(src_sg);
+
+ src_sg = sg_next(src_sg);
+ src_nents--;
+ }
+
+ len = min_t(size_t, src_len, dst_len);
+ if (len == 0)
+ continue;
+
+ /* take care for the alignment */
+ src_width = dst_width = atc_get_xfer_width(src, dst, len);
+
+ ctrla = ATC_SRC_WIDTH(src_width) |
+ ATC_DST_WIDTH(dst_width);
+
+ /*
+ * The number of transfers to set up refer to the source width
+ * that depends on the alignment.
+ */
+ xfer_count = len >> src_width;
+ if (xfer_count > ATC_BTSIZE_MAX) {
+ xfer_count = ATC_BTSIZE_MAX;
+ len = ATC_BTSIZE_MAX << src_width;
+ }
+
+ /* create the transfer */
+ desc = atc_desc_get(atchan);
+ if (!desc)
+ goto err_desc_get;
+
+ desc->lli.saddr = src;
+ desc->lli.daddr = dst;
+ desc->lli.ctrla = ctrla | xfer_count;
+ desc->lli.ctrlb = ctrlb;
+
+ desc->txd.cookie = 0;
+ desc->len = len;
+
+ /*
+ * Although we only need the transfer width for the first and
+ * the last descriptor, its easier to set it to all descriptors.
+ */
+ desc->tx_width = src_width;
+
+ atc_desc_chain(&first, &prev, desc);
+
+ /* update the lengths and addresses for the next loop cycle */
+ dst_len -= len;
+ src_len -= len;
+ dst += len;
+ src += len;
+
+ total_len += len;
+ }
+
+ /* First descriptor of the chain embedds additional information */
+ first->txd.cookie = -EBUSY;
+ first->total_len = total_len;
+
+ /* set end-of-link to the last link descriptor of list*/
+ set_desc_eol(desc);
+
+ first->txd.flags = flags; /* client is in control of this ack */
+
+ return &first->txd;
+
+err_desc_get:
+ atc_desc_put(atchan, first);
+ return NULL;
+}
+
+/**
* atc_dma_cyclic_check_values
* Check for too big/unaligned periods and unaligned DMA buffer
*/
@@ -1461,8 +1608,10 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* setup platform data for each SoC */
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
+ dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
+ dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
/* get DMA parameters from controller type */
plat_dat = at_dma_get_driver_data(pdev);
@@ -1582,11 +1731,15 @@ static int __init at_dma_probe(struct platform_device *pdev)
atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
}
+ if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
+ atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
+
dma_writel(atdma, EN, AT_DMA_ENABLE);
- dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
+ dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
+ dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
plat_dat->nr_channels);
dma_async_device_register(&atdma->dma_common);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index d9891d3461f6..933e4b338459 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1154,8 +1154,10 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
dev_dbg(chan2dev(chan), "%s\n", __func__);
spin_lock_bh(&atchan->lock);
- if (!at_xdmac_chan_is_paused(atchan))
+ if (!at_xdmac_chan_is_paused(atchan)) {
+ spin_unlock_bh(&atchan->lock);
return 0;
+ }
at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index fa378d88f6c8..180fedb418cc 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -30,7 +30,7 @@
#define DRIVER_NAME "bestcomm-core"
/* MPC5200 device tree match tables */
-static struct of_device_id mpc52xx_sram_ids[] = {
+static const struct of_device_id mpc52xx_sram_ids[] = {
{ .compatible = "fsl,mpc5200-sram", },
{ .compatible = "mpc5200-sram", },
{}
@@ -481,7 +481,7 @@ static int mpc52xx_bcom_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id mpc52xx_bcom_of_match[] = {
+static const struct of_device_id mpc52xx_bcom_of_match[] = {
{ .compatible = "fsl,mpc5200-bestcomm", },
{ .compatible = "mpc5200-bestcomm", },
{},
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 84884418fd30..7638b24ce8d0 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -7,10 +7,6 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#include <linux/dmaengine.h>
@@ -343,7 +339,7 @@ static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan)
{
spin_lock(&chan->vchan.lock);
if (chan->desc) {
- if (chan->desc && chan->desc->cyclic) {
+ if (chan->desc->cyclic) {
vchan_cyclic_callback(&chan->desc->vdesc);
} else {
if (chan->next_sg == chan->desc->num_sgs) {
@@ -496,11 +492,6 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
return status;
}
-static int jz4740_dma_alloc_chan_resources(struct dma_chan *c)
-{
- return 0;
-}
-
static void jz4740_dma_free_chan_resources(struct dma_chan *c)
{
vchan_free_chan_resources(to_virt_chan(c));
@@ -543,7 +534,6 @@ static int jz4740_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, dd->cap_mask);
dma_cap_set(DMA_CYCLIC, dd->cap_mask);
- dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources;
dd->device_free_chan_resources = jz4740_dma_free_chan_resources;
dd->device_tx_status = jz4740_dma_tx_status;
dd->device_issue_pending = jz4740_dma_issue_pending;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
new file mode 100644
index 000000000000..26d2f0e09ea3
--- /dev/null
+++ b/drivers/dma/dma-jz4780.c
@@ -0,0 +1,877 @@
+/*
+ * Ingenic JZ4780 DMA controller
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex@alex-smith.me.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define JZ_DMA_NR_CHANNELS 32
+
+/* Global registers. */
+#define JZ_DMA_REG_DMAC 0x1000
+#define JZ_DMA_REG_DIRQP 0x1004
+#define JZ_DMA_REG_DDR 0x1008
+#define JZ_DMA_REG_DDRS 0x100c
+#define JZ_DMA_REG_DMACP 0x101c
+#define JZ_DMA_REG_DSIRQP 0x1020
+#define JZ_DMA_REG_DSIRQM 0x1024
+#define JZ_DMA_REG_DCIRQP 0x1028
+#define JZ_DMA_REG_DCIRQM 0x102c
+
+/* Per-channel registers. */
+#define JZ_DMA_REG_CHAN(n) (n * 0x20)
+#define JZ_DMA_REG_DSA(n) (0x00 + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DTA(n) (0x04 + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DTC(n) (0x08 + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DRT(n) (0x0c + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DCS(n) (0x10 + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DCM(n) (0x14 + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DDA(n) (0x18 + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DSD(n) (0x1c + JZ_DMA_REG_CHAN(n))
+
+#define JZ_DMA_DMAC_DMAE BIT(0)
+#define JZ_DMA_DMAC_AR BIT(2)
+#define JZ_DMA_DMAC_HLT BIT(3)
+#define JZ_DMA_DMAC_FMSC BIT(31)
+
+#define JZ_DMA_DRT_AUTO 0x8
+
+#define JZ_DMA_DCS_CTE BIT(0)
+#define JZ_DMA_DCS_HLT BIT(2)
+#define JZ_DMA_DCS_TT BIT(3)
+#define JZ_DMA_DCS_AR BIT(4)
+#define JZ_DMA_DCS_DES8 BIT(30)
+
+#define JZ_DMA_DCM_LINK BIT(0)
+#define JZ_DMA_DCM_TIE BIT(1)
+#define JZ_DMA_DCM_STDE BIT(2)
+#define JZ_DMA_DCM_TSZ_SHIFT 8
+#define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
+#define JZ_DMA_DCM_DP_SHIFT 12
+#define JZ_DMA_DCM_SP_SHIFT 14
+#define JZ_DMA_DCM_DAI BIT(22)
+#define JZ_DMA_DCM_SAI BIT(23)
+
+#define JZ_DMA_SIZE_4_BYTE 0x0
+#define JZ_DMA_SIZE_1_BYTE 0x1
+#define JZ_DMA_SIZE_2_BYTE 0x2
+#define JZ_DMA_SIZE_16_BYTE 0x3
+#define JZ_DMA_SIZE_32_BYTE 0x4
+#define JZ_DMA_SIZE_64_BYTE 0x5
+#define JZ_DMA_SIZE_128_BYTE 0x6
+
+#define JZ_DMA_WIDTH_32_BIT 0x0
+#define JZ_DMA_WIDTH_8_BIT 0x1
+#define JZ_DMA_WIDTH_16_BIT 0x2
+
+#define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+/**
+ * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
+ * @dcm: value for the DCM (channel command) register
+ * @dsa: source address
+ * @dta: target address
+ * @dtc: transfer count (number of blocks of the transfer size specified in DCM
+ * to transfer) in the low 24 bits, offset of the next descriptor from the
+ * descriptor base address in the upper 8 bits.
+ * @sd: target/source stride difference (in stride transfer mode).
+ * @drt: request type
+ */
+struct jz4780_dma_hwdesc {
+ uint32_t dcm;
+ uint32_t dsa;
+ uint32_t dta;
+ uint32_t dtc;
+ uint32_t sd;
+ uint32_t drt;
+ uint32_t reserved[2];
+};
+
+/* Size of allocations for hardware descriptor blocks. */
+#define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE
+#define JZ_DMA_MAX_DESC \
+ (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
+
+struct jz4780_dma_desc {
+ struct virt_dma_desc vdesc;
+
+ struct jz4780_dma_hwdesc *desc;
+ dma_addr_t desc_phys;
+ unsigned int count;
+ enum dma_transaction_type type;
+ uint32_t status;
+};
+
+struct jz4780_dma_chan {
+ struct virt_dma_chan vchan;
+ unsigned int id;
+ struct dma_pool *desc_pool;
+
+ uint32_t transfer_type;
+ uint32_t transfer_shift;
+ struct dma_slave_config config;
+
+ struct jz4780_dma_desc *desc;
+ unsigned int curr_hwdesc;
+};
+
+struct jz4780_dma_dev {
+ struct dma_device dma_device;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int irq;
+
+ uint32_t chan_reserved;
+ struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
+};
+
+struct jz4780_dma_data {
+ uint32_t transfer_type;
+ int channel;
+};
+
+static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct jz4780_dma_chan, vchan.chan);
+}
+
+static inline struct jz4780_dma_desc *to_jz4780_dma_desc(
+ struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct jz4780_dma_desc, vdesc);
+}
+
+static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
+ struct jz4780_dma_chan *jzchan)
+{
+ return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev,
+ dma_device);
+}
+
+static inline uint32_t jz4780_dma_readl(struct jz4780_dma_dev *jzdma,
+ unsigned int reg)
+{
+ return readl(jzdma->base + reg);
+}
+
+static inline void jz4780_dma_writel(struct jz4780_dma_dev *jzdma,
+ unsigned int reg, uint32_t val)
+{
+ writel(val, jzdma->base + reg);
+}
+
+static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
+ struct jz4780_dma_chan *jzchan, unsigned int count,
+ enum dma_transaction_type type)
+{
+ struct jz4780_dma_desc *desc;
+
+ if (count > JZ_DMA_MAX_DESC)
+ return NULL;
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT,
+ &desc->desc_phys);
+ if (!desc->desc) {
+ kfree(desc);
+ return NULL;
+ }
+
+ desc->count = count;
+ desc->type = type;
+ return desc;
+}
+
+static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+ struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc);
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan);
+
+ dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys);
+ kfree(desc);
+}
+
+static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord)
+{
+ *ord = ffs(val) - 1;
+
+ switch (*ord) {
+ case 0:
+ return JZ_DMA_SIZE_1_BYTE;
+ case 1:
+ return JZ_DMA_SIZE_2_BYTE;
+ case 2:
+ return JZ_DMA_SIZE_4_BYTE;
+ case 4:
+ return JZ_DMA_SIZE_16_BYTE;
+ case 5:
+ return JZ_DMA_SIZE_32_BYTE;
+ case 6:
+ return JZ_DMA_SIZE_64_BYTE;
+ case 7:
+ return JZ_DMA_SIZE_128_BYTE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
+ struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
+ enum dma_transfer_direction direction)
+{
+ struct dma_slave_config *config = &jzchan->config;
+ uint32_t width, maxburst, tsz;
+ int ord;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ desc->dcm = JZ_DMA_DCM_SAI;
+ desc->dsa = addr;
+ desc->dta = config->dst_addr;
+ desc->drt = jzchan->transfer_type;
+
+ width = config->dst_addr_width;
+ maxburst = config->dst_maxburst;
+ } else {
+ desc->dcm = JZ_DMA_DCM_DAI;
+ desc->dsa = config->src_addr;
+ desc->dta = addr;
+ desc->drt = jzchan->transfer_type;
+
+ width = config->src_addr_width;
+ maxburst = config->src_maxburst;
+ }
+
+ /*
+ * This calculates the maximum transfer size that can be used with the
+ * given address, length, width and maximum burst size. The address
+ * must be aligned to the transfer size, the total length must be
+ * divisible by the transfer size, and we must not use more than the
+ * maximum burst specified by the user.
+ */
+ tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), &ord);
+ jzchan->transfer_shift = ord;
+
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ width = JZ_DMA_WIDTH_32_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT;
+ desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
+ desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
+
+ desc->dtc = len >> ord;
+}
+
+static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_desc *desc;
+ unsigned int i;
+ int err;
+
+ desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
+ if (!desc)
+ return NULL;
+
+ for (i = 0; i < sg_len; i++) {
+ err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
+ sg_dma_address(&sgl[i]),
+ sg_dma_len(&sgl[i]),
+ direction);
+ if (err < 0)
+ return ERR_PTR(err);
+
+
+ desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
+
+ if (i != (sg_len - 1)) {
+ /* Automatically proceeed to the next descriptor. */
+ desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
+
+ /*
+ * The upper 8 bits of the DTC field in the descriptor
+ * must be set to (offset from descriptor base of next
+ * descriptor >> 4).
+ */
+ desc->desc[i].dtc |=
+ (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
+ }
+ }
+
+ return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_desc *desc;
+ unsigned int periods, i;
+ int err;
+
+ if (buf_len % period_len)
+ return NULL;
+
+ periods = buf_len / period_len;
+
+ desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
+ if (!desc)
+ return NULL;
+
+ for (i = 0; i < periods; i++) {
+ err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
+ period_len, direction);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ buf_addr += period_len;
+
+ /*
+ * Set the link bit to indicate that the controller should
+ * automatically proceed to the next descriptor. In
+ * jz4780_dma_begin(), this will be cleared if we need to issue
+ * an interrupt after each period.
+ */
+ desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK;
+
+ /*
+ * The upper 8 bits of the DTC field in the descriptor must be
+ * set to (offset from descriptor base of next descriptor >> 4).
+ * If this is the last descriptor, link it back to the first,
+ * i.e. leave offset set to 0, otherwise point to the next one.
+ */
+ if (i != (periods - 1)) {
+ desc->desc[i].dtc |=
+ (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
+ }
+ }
+
+ return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
+}
+
+struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_desc *desc;
+ uint32_t tsz;
+ int ord;
+
+ desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
+ if (!desc)
+ return NULL;
+
+ tsz = jz4780_dma_transfer_size(dest | src | len, &ord);
+ if (tsz < 0)
+ return ERR_PTR(tsz);
+
+ desc->desc[0].dsa = src;
+ desc->desc[0].dta = dest;
+ desc->desc[0].drt = JZ_DMA_DRT_AUTO;
+ desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
+ tsz << JZ_DMA_DCM_TSZ_SHIFT |
+ JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
+ JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
+ desc->desc[0].dtc = len >> ord;
+
+ return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
+}
+
+static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
+{
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
+ struct virt_dma_desc *vdesc;
+ unsigned int i;
+ dma_addr_t desc_phys;
+
+ if (!jzchan->desc) {
+ vdesc = vchan_next_desc(&jzchan->vchan);
+ if (!vdesc)
+ return;
+
+ list_del(&vdesc->node);
+
+ jzchan->desc = to_jz4780_dma_desc(vdesc);
+ jzchan->curr_hwdesc = 0;
+
+ if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) {
+ /*
+ * The DMA controller doesn't support triggering an
+ * interrupt after processing each descriptor, only
+ * after processing an entire terminated list of
+ * descriptors. For a cyclic DMA setup the list of
+ * descriptors is not terminated so we can never get an
+ * interrupt.
+ *
+ * If the user requested a callback for a cyclic DMA
+ * setup then we workaround this hardware limitation
+ * here by degrading to a set of unlinked descriptors
+ * which we will submit in sequence in response to the
+ * completion of processing the previous descriptor.
+ */
+ for (i = 0; i < jzchan->desc->count; i++)
+ jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK;
+ }
+ } else {
+ /*
+ * There is an existing transfer, therefore this must be one
+ * for which we unlinked the descriptors above. Advance to the
+ * next one in the list.
+ */
+ jzchan->curr_hwdesc =
+ (jzchan->curr_hwdesc + 1) % jzchan->desc->count;
+ }
+
+ /* Use 8-word descriptors. */
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), JZ_DMA_DCS_DES8);
+
+ /* Write descriptor address and initiate descriptor fetch. */
+ desc_phys = jzchan->desc->desc_phys +
+ (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DDA(jzchan->id), desc_phys);
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
+
+ /* Enable the channel. */
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id),
+ JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE);
+}
+
+static void jz4780_dma_issue_pending(struct dma_chan *chan)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&jzchan->vchan.lock, flags);
+
+ if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc)
+ jz4780_dma_begin(jzchan);
+
+ spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
+}
+
+static int jz4780_dma_terminate_all(struct jz4780_dma_chan *jzchan)
+{
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&jzchan->vchan.lock, flags);
+
+ /* Clear the DMA status and stop the transfer. */
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
+ if (jzchan->desc) {
+ jz4780_dma_desc_free(&jzchan->desc->vdesc);
+ jzchan->desc = NULL;
+ }
+
+ vchan_get_all_descriptors(&jzchan->vchan, &head);
+
+ spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&jzchan->vchan, &head);
+ return 0;
+}
+
+static int jz4780_dma_slave_config(struct jz4780_dma_chan *jzchan,
+ const struct dma_slave_config *config)
+{
+ if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
+ return -EINVAL;
+
+ /* Copy the reset of the slave configuration, it is used later. */
+ memcpy(&jzchan->config, config, sizeof(jzchan->config));
+
+ return 0;
+}
+
+static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
+ struct jz4780_dma_desc *desc, unsigned int next_sg)
+{
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
+ unsigned int residue, count;
+ unsigned int i;
+
+ residue = 0;
+
+ for (i = next_sg; i < desc->count; i++)
+ residue += desc->desc[i].dtc << jzchan->transfer_shift;
+
+ if (next_sg != 0) {
+ count = jz4780_dma_readl(jzdma,
+ JZ_DMA_REG_DTC(jzchan->id));
+ residue += count << jzchan->transfer_shift;
+ }
+
+ return residue;
+}
+
+static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ if ((status == DMA_COMPLETE) || (txstate == NULL))
+ return status;
+
+ spin_lock_irqsave(&jzchan->vchan.lock, flags);
+
+ vdesc = vchan_find_desc(&jzchan->vchan, cookie);
+ if (vdesc) {
+ /* On the issued list, so hasn't been processed yet */
+ txstate->residue = jz4780_dma_desc_residue(jzchan,
+ to_jz4780_dma_desc(vdesc), 0);
+ } else if (cookie == jzchan->desc->vdesc.tx.cookie) {
+ txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
+ (jzchan->curr_hwdesc + 1) % jzchan->desc->count);
+ } else
+ txstate->residue = 0;
+
+ if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
+ && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
+ status = DMA_ERROR;
+
+ spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
+ return status;
+}
+
+static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
+ struct jz4780_dma_chan *jzchan)
+{
+ uint32_t dcs;
+
+ spin_lock(&jzchan->vchan.lock);
+
+ dcs = jz4780_dma_readl(jzdma, JZ_DMA_REG_DCS(jzchan->id));
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
+
+ if (dcs & JZ_DMA_DCS_AR) {
+ dev_warn(&jzchan->vchan.chan.dev->device,
+ "address error (DCS=0x%x)\n", dcs);
+ }
+
+ if (dcs & JZ_DMA_DCS_HLT) {
+ dev_warn(&jzchan->vchan.chan.dev->device,
+ "channel halt (DCS=0x%x)\n", dcs);
+ }
+
+ if (jzchan->desc) {
+ jzchan->desc->status = dcs;
+
+ if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
+ if (jzchan->desc->type == DMA_CYCLIC) {
+ vchan_cyclic_callback(&jzchan->desc->vdesc);
+ } else {
+ vchan_cookie_complete(&jzchan->desc->vdesc);
+ jzchan->desc = NULL;
+ }
+
+ jz4780_dma_begin(jzchan);
+ }
+ } else {
+ dev_err(&jzchan->vchan.chan.dev->device,
+ "channel IRQ with no active transfer\n");
+ }
+
+ spin_unlock(&jzchan->vchan.lock);
+}
+
+static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
+{
+ struct jz4780_dma_dev *jzdma = data;
+ uint32_t pending, dmac;
+ int i;
+
+ pending = jz4780_dma_readl(jzdma, JZ_DMA_REG_DIRQP);
+
+ for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
+ if (!(pending & (1<<i)))
+ continue;
+
+ jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
+ }
+
+ /* Clear halt and address error status of all channels. */
+ dmac = jz4780_dma_readl(jzdma, JZ_DMA_REG_DMAC);
+ dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
+
+ /* Clear interrupt pending status. */
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
+
+ return IRQ_HANDLED;
+}
+
+static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+
+ jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device),
+ chan->device->dev,
+ JZ_DMA_DESC_BLOCK_SIZE,
+ PAGE_SIZE, 0);
+ if (!jzchan->desc_pool) {
+ dev_err(&chan->dev->device,
+ "failed to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void jz4780_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+
+ vchan_free_chan_resources(&jzchan->vchan);
+ dma_pool_destroy(jzchan->desc_pool);
+ jzchan->desc_pool = NULL;
+}
+
+static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
+ struct jz4780_dma_data *data = param;
+
+ if (data->channel > -1) {
+ if (data->channel != jzchan->id)
+ return false;
+ } else if (jzdma->chan_reserved & BIT(jzchan->id)) {
+ return false;
+ }
+
+ jzchan->transfer_type = data->transfer_type;
+
+ return true;
+}
+
+static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
+ dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
+ struct jz4780_dma_data data;
+
+ if (dma_spec->args_count != 2)
+ return NULL;
+
+ data.transfer_type = dma_spec->args[0];
+ data.channel = dma_spec->args[1];
+
+ if (data.channel > -1) {
+ if (data.channel >= JZ_DMA_NR_CHANNELS) {
+ dev_err(jzdma->dma_device.dev,
+ "device requested non-existent channel %u\n",
+ data.channel);
+ return NULL;
+ }
+
+ /* Can only select a channel marked as reserved. */
+ if (!(jzdma->chan_reserved & BIT(data.channel))) {
+ dev_err(jzdma->dma_device.dev,
+ "device requested unreserved channel %u\n",
+ data.channel);
+ return NULL;
+ }
+ }
+
+ return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
+}
+
+static int jz4780_dma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct jz4780_dma_dev *jzdma;
+ struct jz4780_dma_chan *jzchan;
+ struct dma_device *dd;
+ struct resource *res;
+ int i, ret;
+
+ jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
+ if (!jzdma)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, jzdma);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get I/O memory\n");
+ return -EINVAL;
+ }
+
+ jzdma->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(jzdma->base))
+ return PTR_ERR(jzdma->base);
+
+ jzdma->irq = platform_get_irq(pdev, 0);
+ if (jzdma->irq < 0) {
+ dev_err(dev, "failed to get IRQ: %d\n", ret);
+ return jzdma->irq;
+ }
+
+ ret = devm_request_irq(dev, jzdma->irq, jz4780_dma_irq_handler, 0,
+ dev_name(dev), jzdma);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
+ return -EINVAL;
+ }
+
+ jzdma->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(jzdma->clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(jzdma->clk);
+ }
+
+ clk_prepare_enable(jzdma->clk);
+
+ /* Property is optional, if it doesn't exist the value will remain 0. */
+ of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels",
+ 0, &jzdma->chan_reserved);
+
+ dd = &jzdma->dma_device;
+
+ dma_cap_set(DMA_MEMCPY, dd->cap_mask);
+ dma_cap_set(DMA_SLAVE, dd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dd->cap_mask);
+
+ dd->dev = dev;
+ dd->copy_align = 2; /* 2^2 = 4 byte alignment */
+ dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
+ dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
+ dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
+ dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
+ dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
+ dd->device_config = jz4780_dma_slave_config;
+ dd->device_terminate_all = jz4780_dma_terminate_all;
+ dd->device_tx_status = jz4780_dma_tx_status;
+ dd->device_issue_pending = jz4780_dma_issue_pending;
+ dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
+ dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
+ dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+
+ /*
+ * Enable DMA controller, mark all channels as not programmable.
+ * Also set the FMSC bit - it increases MSC performance, so it makes
+ * little sense not to enable it.
+ */
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC,
+ JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC);
+ jz4780_dma_writel(jzdma, JZ_DMA_REG_DMACP, 0);
+
+ INIT_LIST_HEAD(&dd->channels);
+
+ for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
+ jzchan = &jzdma->chan[i];
+ jzchan->id = i;
+
+ vchan_init(&jzchan->vchan, dd);
+ jzchan->vchan.desc_free = jz4780_dma_desc_free;
+ }
+
+ ret = dma_async_device_register(dd);
+ if (ret) {
+ dev_err(dev, "failed to register device\n");
+ goto err_disable_clk;
+ }
+
+ /* Register with OF DMA helpers. */
+ ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate,
+ jzdma);
+ if (ret) {
+ dev_err(dev, "failed to register OF DMA controller\n");
+ goto err_unregister_dev;
+ }
+
+ dev_info(dev, "JZ4780 DMA controller initialised\n");
+ return 0;
+
+err_unregister_dev:
+ dma_async_device_unregister(dd);
+
+err_disable_clk:
+ clk_disable_unprepare(jzdma->clk);
+ return ret;
+}
+
+static int jz4780_dma_remove(struct platform_device *pdev)
+{
+ struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ devm_free_irq(&pdev->dev, jzdma->irq, jzdma);
+ dma_async_device_unregister(&jzdma->dma_device);
+ return 0;
+}
+
+static const struct of_device_id jz4780_dma_dt_match[] = {
+ { .compatible = "ingenic,jz4780-dma", .data = NULL },
+ {},
+};
+MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
+
+static struct platform_driver jz4780_dma_driver = {
+ .probe = jz4780_dma_probe,
+ .remove = jz4780_dma_remove,
+ .driver = {
+ .name = "jz4780-dma",
+ .of_match_table = of_match_ptr(jz4780_dma_dt_match),
+ },
+};
+
+static int __init jz4780_dma_init(void)
+{
+ return platform_driver_register(&jz4780_dma_driver);
+}
+subsys_initcall(jz4780_dma_init);
+
+static void __exit jz4780_dma_exit(void)
+{
+ platform_driver_unregister(&jz4780_dma_driver);
+}
+module_exit(jz4780_dma_exit);
+
+MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
+MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ac336a961dea..0e035a8cf401 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
@@ -355,20 +351,6 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
}
EXPORT_SYMBOL(dma_find_channel);
-/*
- * net_dma_find_channel - find a channel for net_dma
- * net_dma has alignment requirements
- */
-struct dma_chan *net_dma_find_channel(void)
-{
- struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
- if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
- return NULL;
-
- return chan;
-}
-EXPORT_SYMBOL(net_dma_find_channel);
-
/**
* dma_issue_pending_all - flush all pending operations across all channels
*/
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index dcfe964cc8dc..36e02f0f645e 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -3,7 +3,7 @@
#
config DW_DMAC_CORE
- tristate "Synopsys DesignWare AHB DMA support"
+ tristate
select DMA_ENGINE
config DW_DMAC
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index a8ad05291b27..1022c2e1a2b0 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -230,7 +230,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan),
- "BUG: Attempted to start non-idle channel\n");
+ "%s: BUG: Attempted to start non-idle channel\n",
+ __func__);
dwc_dump_chan_regs(dwc);
/* The tasklet will hopefully advance the queue... */
@@ -814,11 +815,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
- if (!desc) {
- dev_err(chan2dev(chan),
- "not enough descriptors available\n");
+ if (!desc)
goto err_desc_get;
- }
desc->lli.sar = mem;
desc->lli.dar = reg;
@@ -874,11 +872,8 @@ slave_sg_todev_fill_desc:
slave_sg_fromdev_fill_desc:
desc = dwc_desc_get(dwc);
- if (!desc) {
- dev_err(chan2dev(chan),
- "not enough descriptors available\n");
+ if (!desc)
goto err_desc_get;
- }
desc->lli.sar = reg;
desc->lli.dar = mem;
@@ -922,6 +917,8 @@ slave_sg_fromdev_fill_desc:
return &first->txd;
err_desc_get:
+ dev_err(chan2dev(chan),
+ "not enough descriptors available. Direction %d\n", direction);
dwc_desc_put(dwc, first);
return NULL;
}
@@ -1261,7 +1258,8 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
/* Assert channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan),
- "BUG: Attempted to start non-idle channel\n");
+ "%s: BUG: Attempted to start non-idle channel\n",
+ __func__);
dwc_dump_chan_regs(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return -EBUSY;
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 53dbd3b3384c..bf09db7ca9ee 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -812,7 +812,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
LIST_HEAD(descs);
a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
- chan, EVENTQ_DEFAULT);
+ echan, EVENTQ_DEFAULT);
if (a_ch_num < 0) {
ret = -ENODEV;
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
new file mode 100644
index 000000000000..4d9470f16552
--- /dev/null
+++ b/drivers/dma/fsl_raid.c
@@ -0,0 +1,904 @@
+/*
+ * drivers/dma/fsl_raid.c
+ *
+ * Freescale RAID Engine device driver
+ *
+ * Author:
+ * Harninder Rai <harninder.rai@freescale.com>
+ * Naveen Burmi <naveenburmi@freescale.com>
+ *
+ * Rewrite:
+ * Xuelin Shi <xuelin.shi@freescale.com>
+ *
+ * Copyright (c) 2010-2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Theory of operation:
+ *
+ * General capabilities:
+ * RAID Engine (RE) block is capable of offloading XOR, memcpy and P/Q
+ * calculations required in RAID5 and RAID6 operations. RE driver
+ * registers with Linux's ASYNC layer as dma driver. RE hardware
+ * maintains strict ordering of the requests through chained
+ * command queueing.
+ *
+ * Data flow:
+ * Software RAID layer of Linux (MD layer) maintains RAID partitions,
+ * strips, stripes etc. It sends requests to the underlying ASYNC layer
+ * which further passes it to RE driver. ASYNC layer decides which request
+ * goes to which job ring of RE hardware. For every request processed by
+ * RAID Engine, driver gets an interrupt unless coalescing is set. The
+ * per job ring interrupt handler checks the status register for errors,
+ * clears the interrupt and leave the post interrupt processing to the irq
+ * thread.
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+#include "dmaengine.h"
+#include "fsl_raid.h"
+
+#define FSL_RE_MAX_XOR_SRCS 16
+#define FSL_RE_MAX_PQ_SRCS 16
+#define FSL_RE_MIN_DESCS 256
+#define FSL_RE_MAX_DESCS (4 * FSL_RE_MIN_DESCS)
+#define FSL_RE_FRAME_FORMAT 0x1
+#define FSL_RE_MAX_DATA_LEN (1024*1024)
+
+#define to_fsl_re_dma_desc(tx) container_of(tx, struct fsl_re_desc, async_tx)
+
+/* Add descriptors into per chan software queue - submit_q */
+static dma_cookie_t fsl_re_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct fsl_re_desc *desc;
+ struct fsl_re_chan *re_chan;
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ desc = to_fsl_re_dma_desc(tx);
+ re_chan = container_of(tx->chan, struct fsl_re_chan, chan);
+
+ spin_lock_irqsave(&re_chan->desc_lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&desc->node, &re_chan->submit_q);
+ spin_unlock_irqrestore(&re_chan->desc_lock, flags);
+
+ return cookie;
+}
+
+/* Copy descriptor from per chan software queue into hardware job ring */
+static void fsl_re_issue_pending(struct dma_chan *chan)
+{
+ struct fsl_re_chan *re_chan;
+ int avail;
+ struct fsl_re_desc *desc, *_desc;
+ unsigned long flags;
+
+ re_chan = container_of(chan, struct fsl_re_chan, chan);
+
+ spin_lock_irqsave(&re_chan->desc_lock, flags);
+ avail = FSL_RE_SLOT_AVAIL(
+ in_be32(&re_chan->jrregs->inbring_slot_avail));
+
+ list_for_each_entry_safe(desc, _desc, &re_chan->submit_q, node) {
+ if (!avail)
+ break;
+
+ list_move_tail(&desc->node, &re_chan->active_q);
+
+ memcpy(&re_chan->inb_ring_virt_addr[re_chan->inb_count],
+ &desc->hwdesc, sizeof(struct fsl_re_hw_desc));
+
+ re_chan->inb_count = (re_chan->inb_count + 1) &
+ FSL_RE_RING_SIZE_MASK;
+ out_be32(&re_chan->jrregs->inbring_add_job, FSL_RE_ADD_JOB(1));
+ avail--;
+ }
+ spin_unlock_irqrestore(&re_chan->desc_lock, flags);
+}
+
+static void fsl_re_desc_done(struct fsl_re_desc *desc)
+{
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ dma_cookie_complete(&desc->async_tx);
+
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+ if (callback)
+ callback(callback_param);
+
+ dma_descriptor_unmap(&desc->async_tx);
+}
+
+static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)
+{
+ struct fsl_re_desc *desc, *_desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&re_chan->desc_lock, flags);
+ list_for_each_entry_safe(desc, _desc, &re_chan->ack_q, node) {
+ if (async_tx_test_ack(&desc->async_tx))
+ list_move_tail(&desc->node, &re_chan->free_q);
+ }
+ spin_unlock_irqrestore(&re_chan->desc_lock, flags);
+
+ fsl_re_issue_pending(&re_chan->chan);
+}
+
+static void fsl_re_dequeue(unsigned long data)
+{
+ struct fsl_re_chan *re_chan;
+ struct fsl_re_desc *desc, *_desc;
+ struct fsl_re_hw_desc *hwdesc;
+ unsigned long flags;
+ unsigned int count, oub_count;
+ int found;
+
+ re_chan = dev_get_drvdata((struct device *)data);
+
+ fsl_re_cleanup_descs(re_chan);
+
+ spin_lock_irqsave(&re_chan->desc_lock, flags);
+ count = FSL_RE_SLOT_FULL(in_be32(&re_chan->jrregs->oubring_slot_full));
+ while (count--) {
+ found = 0;
+ hwdesc = &re_chan->oub_ring_virt_addr[re_chan->oub_count];
+ list_for_each_entry_safe(desc, _desc, &re_chan->active_q,
+ node) {
+ /* compare the hw dma addr to find the completed */
+ if (desc->hwdesc.lbea32 == hwdesc->lbea32 &&
+ desc->hwdesc.addr_low == hwdesc->addr_low) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ fsl_re_desc_done(desc);
+ list_move_tail(&desc->node, &re_chan->ack_q);
+ } else {
+ dev_err(re_chan->dev,
+ "found hwdesc not in sw queue, discard it\n");
+ }
+
+ oub_count = (re_chan->oub_count + 1) & FSL_RE_RING_SIZE_MASK;
+ re_chan->oub_count = oub_count;
+
+ out_be32(&re_chan->jrregs->oubring_job_rmvd,
+ FSL_RE_RMVD_JOB(1));
+ }
+ spin_unlock_irqrestore(&re_chan->desc_lock, flags);
+}
+
+/* Per Job Ring interrupt handler */
+static irqreturn_t fsl_re_isr(int irq, void *data)
+{
+ struct fsl_re_chan *re_chan;
+ u32 irqstate, status;
+
+ re_chan = dev_get_drvdata((struct device *)data);
+
+ irqstate = in_be32(&re_chan->jrregs->jr_interrupt_status);
+ if (!irqstate)
+ return IRQ_NONE;
+
+ /*
+ * There's no way in upper layer (read MD layer) to recover from
+ * error conditions except restart everything. In long term we
+ * need to do something more than just crashing
+ */
+ if (irqstate & FSL_RE_ERROR) {
+ status = in_be32(&re_chan->jrregs->jr_status);
+ dev_err(re_chan->dev, "chan error irqstate: %x, status: %x\n",
+ irqstate, status);
+ }
+
+ /* Clear interrupt */
+ out_be32(&re_chan->jrregs->jr_interrupt_status, FSL_RE_CLR_INTR);
+
+ tasklet_schedule(&re_chan->irqtask);
+
+ return IRQ_HANDLED;
+}
+
+static enum dma_status fsl_re_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(chan, cookie, txstate);
+}
+
+static void fill_cfd_frame(struct fsl_re_cmpnd_frame *cf, u8 index,
+ size_t length, dma_addr_t addr, bool final)
+{
+ u32 efrl = length & FSL_RE_CF_LENGTH_MASK;
+
+ efrl |= final << FSL_RE_CF_FINAL_SHIFT;
+ cf[index].efrl32 = efrl;
+ cf[index].addr_high = upper_32_bits(addr);
+ cf[index].addr_low = lower_32_bits(addr);
+}
+
+static struct fsl_re_desc *fsl_re_init_desc(struct fsl_re_chan *re_chan,
+ struct fsl_re_desc *desc,
+ void *cf, dma_addr_t paddr)
+{
+ desc->re_chan = re_chan;
+ desc->async_tx.tx_submit = fsl_re_tx_submit;
+ dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan);
+ INIT_LIST_HEAD(&desc->node);
+
+ desc->hwdesc.fmt32 = FSL_RE_FRAME_FORMAT << FSL_RE_HWDESC_FMT_SHIFT;
+ desc->hwdesc.lbea32 = upper_32_bits(paddr);
+ desc->hwdesc.addr_low = lower_32_bits(paddr);
+ desc->cf_addr = cf;
+ desc->cf_paddr = paddr;
+
+ desc->cdb_addr = (void *)(cf + FSL_RE_CF_DESC_SIZE);
+ desc->cdb_paddr = paddr + FSL_RE_CF_DESC_SIZE;
+
+ return desc;
+}
+
+static struct fsl_re_desc *fsl_re_chan_alloc_desc(struct fsl_re_chan *re_chan,
+ unsigned long flags)
+{
+ struct fsl_re_desc *desc = NULL;
+ void *cf;
+ dma_addr_t paddr;
+ unsigned long lock_flag;
+
+ fsl_re_cleanup_descs(re_chan);
+
+ spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
+ if (!list_empty(&re_chan->free_q)) {
+ /* take one desc from free_q */
+ desc = list_first_entry(&re_chan->free_q,
+ struct fsl_re_desc, node);
+ list_del(&desc->node);
+
+ desc->async_tx.flags = flags;
+ }
+ spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
+
+ if (!desc) {
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_NOWAIT,
+ &paddr);
+ if (!cf) {
+ kfree(desc);
+ return NULL;
+ }
+
+ desc = fsl_re_init_desc(re_chan, desc, cf, paddr);
+ desc->async_tx.flags = flags;
+
+ spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
+ re_chan->alloc_count++;
+ spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
+ }
+
+ return desc;
+}
+
+static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ unsigned long flags)
+{
+ struct fsl_re_chan *re_chan;
+ struct fsl_re_desc *desc;
+ struct fsl_re_xor_cdb *xor;
+ struct fsl_re_cmpnd_frame *cf;
+ u32 cdb;
+ unsigned int i, j;
+ unsigned int save_src_cnt = src_cnt;
+ int cont_q = 0;
+
+ re_chan = container_of(chan, struct fsl_re_chan, chan);
+ if (len > FSL_RE_MAX_DATA_LEN) {
+ dev_err(re_chan->dev, "genq tx length %lu, max length %d\n",
+ len, FSL_RE_MAX_DATA_LEN);
+ return NULL;
+ }
+
+ desc = fsl_re_chan_alloc_desc(re_chan, flags);
+ if (desc <= 0)
+ return NULL;
+
+ if (scf && (flags & DMA_PREP_CONTINUE)) {
+ cont_q = 1;
+ src_cnt += 1;
+ }
+
+ /* Filling xor CDB */
+ cdb = FSL_RE_XOR_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
+ cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT;
+ cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
+ cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT;
+ cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
+ xor = desc->cdb_addr;
+ xor->cdb32 = cdb;
+
+ if (scf) {
+ /* compute q = src0*coef0^src1*coef1^..., * is GF(8) mult */
+ for (i = 0; i < save_src_cnt; i++)
+ xor->gfm[i] = scf[i];
+ if (cont_q)
+ xor->gfm[i++] = 1;
+ } else {
+ /* compute P, that is XOR all srcs */
+ for (i = 0; i < src_cnt; i++)
+ xor->gfm[i] = 1;
+ }
+
+ /* Filling frame 0 of compound frame descriptor with CDB */
+ cf = desc->cf_addr;
+ fill_cfd_frame(cf, 0, sizeof(*xor), desc->cdb_paddr, 0);
+
+ /* Fill CFD's 1st frame with dest buffer */
+ fill_cfd_frame(cf, 1, len, dest, 0);
+
+ /* Fill CFD's rest of the frames with source buffers */
+ for (i = 2, j = 0; j < save_src_cnt; i++, j++)
+ fill_cfd_frame(cf, i, len, src[j], 0);
+
+ if (cont_q)
+ fill_cfd_frame(cf, i++, len, dest, 0);
+
+ /* Setting the final bit in the last source buffer frame in CFD */
+ cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
+
+ return &desc->async_tx;
+}
+
+/*
+ * Prep function for P parity calculation.In RAID Engine terminology,
+ * XOR calculation is called GenQ calculation done through GenQ command
+ */
+static struct dma_async_tx_descriptor *fsl_re_prep_dma_xor(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags)
+{
+ /* NULL let genq take all coef as 1 */
+ return fsl_re_prep_dma_genq(chan, dest, src, src_cnt, NULL, len, flags);
+}
+
+/*
+ * Prep function for P/Q parity calculation.In RAID Engine terminology,
+ * P/Q calculation is called GenQQ done through GenQQ command
+ */
+static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq(
+ struct dma_chan *chan, dma_addr_t *dest, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ unsigned long flags)
+{
+ struct fsl_re_chan *re_chan;
+ struct fsl_re_desc *desc;
+ struct fsl_re_pq_cdb *pq;
+ struct fsl_re_cmpnd_frame *cf;
+ u32 cdb;
+ u8 *p;
+ int gfmq_len, i, j;
+ unsigned int save_src_cnt = src_cnt;
+
+ re_chan = container_of(chan, struct fsl_re_chan, chan);
+ if (len > FSL_RE_MAX_DATA_LEN) {
+ dev_err(re_chan->dev, "pq tx length is %lu, max length is %d\n",
+ len, FSL_RE_MAX_DATA_LEN);
+ return NULL;
+ }
+
+ /*
+ * RE requires at least 2 sources, if given only one source, we pass the
+ * second source same as the first one.
+ * With only one source, generating P is meaningless, only generate Q.
+ */
+ if (src_cnt == 1) {
+ struct dma_async_tx_descriptor *tx;
+ dma_addr_t dma_src[2];
+ unsigned char coef[2];
+
+ dma_src[0] = *src;
+ coef[0] = *scf;
+ dma_src[1] = *src;
+ coef[1] = 0;
+ tx = fsl_re_prep_dma_genq(chan, dest[1], dma_src, 2, coef, len,
+ flags);
+ if (tx)
+ desc = to_fsl_re_dma_desc(tx);
+
+ return tx;
+ }
+
+ /*
+ * During RAID6 array creation, Linux's MD layer gets P and Q
+ * calculated separately in two steps. But our RAID Engine has
+ * the capability to calculate both P and Q with a single command
+ * Hence to merge well with MD layer, we need to provide a hook
+ * here and call re_jq_prep_dma_genq() function
+ */
+
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ return fsl_re_prep_dma_genq(chan, dest[1], src, src_cnt,
+ scf, len, flags);
+
+ if (flags & DMA_PREP_CONTINUE)
+ src_cnt += 3;
+
+ desc = fsl_re_chan_alloc_desc(re_chan, flags);
+ if (desc <= 0)
+ return NULL;
+
+ /* Filling GenQQ CDB */
+ cdb = FSL_RE_PQ_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
+ cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT;
+ cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
+ cdb |= FSL_RE_BUFFER_OUTPUT << FSL_RE_CDB_BUFFER_SHIFT;
+ cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
+
+ pq = desc->cdb_addr;
+ pq->cdb32 = cdb;
+
+ p = pq->gfm_q1;
+ /* Init gfm_q1[] */
+ for (i = 0; i < src_cnt; i++)
+ p[i] = 1;
+
+ /* Align gfm[] to 32bit */
+ gfmq_len = ALIGN(src_cnt, 4);
+
+ /* Init gfm_q2[] */
+ p += gfmq_len;
+ for (i = 0; i < src_cnt; i++)
+ p[i] = scf[i];
+
+ /* Filling frame 0 of compound frame descriptor with CDB */
+ cf = desc->cf_addr;
+ fill_cfd_frame(cf, 0, sizeof(struct fsl_re_pq_cdb), desc->cdb_paddr, 0);
+
+ /* Fill CFD's 1st & 2nd frame with dest buffers */
+ for (i = 1, j = 0; i < 3; i++, j++)
+ fill_cfd_frame(cf, i, len, dest[j], 0);
+
+ /* Fill CFD's rest of the frames with source buffers */
+ for (i = 3, j = 0; j < save_src_cnt; i++, j++)
+ fill_cfd_frame(cf, i, len, src[j], 0);
+
+ /* PQ computation continuation */
+ if (flags & DMA_PREP_CONTINUE) {
+ if (src_cnt - save_src_cnt == 3) {
+ p[save_src_cnt] = 0;
+ p[save_src_cnt + 1] = 0;
+ p[save_src_cnt + 2] = 1;
+ fill_cfd_frame(cf, i++, len, dest[0], 0);
+ fill_cfd_frame(cf, i++, len, dest[1], 0);
+ fill_cfd_frame(cf, i++, len, dest[1], 0);
+ } else {
+ dev_err(re_chan->dev, "PQ tx continuation error!\n");
+ return NULL;
+ }
+ }
+
+ /* Setting the final bit in the last source buffer frame in CFD */
+ cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
+
+ return &desc->async_tx;
+}
+
+/*
+ * Prep function for memcpy. In RAID Engine, memcpy is done through MOVE
+ * command. Logic of this function will need to be modified once multipage
+ * support is added in Linux's MD/ASYNC Layer
+ */
+static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct fsl_re_chan *re_chan;
+ struct fsl_re_desc *desc;
+ size_t length;
+ struct fsl_re_cmpnd_frame *cf;
+ struct fsl_re_move_cdb *move;
+ u32 cdb;
+
+ re_chan = container_of(chan, struct fsl_re_chan, chan);
+
+ if (len > FSL_RE_MAX_DATA_LEN) {
+ dev_err(re_chan->dev, "cp tx length is %lu, max length is %d\n",
+ len, FSL_RE_MAX_DATA_LEN);
+ return NULL;
+ }
+
+ desc = fsl_re_chan_alloc_desc(re_chan, flags);
+ if (desc <= 0)
+ return NULL;
+
+ /* Filling move CDB */
+ cdb = FSL_RE_MOVE_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
+ cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
+ cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT;
+ cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
+
+ move = desc->cdb_addr;
+ move->cdb32 = cdb;
+
+ /* Filling frame 0 of CFD with move CDB */
+ cf = desc->cf_addr;
+ fill_cfd_frame(cf, 0, sizeof(*move), desc->cdb_paddr, 0);
+
+ length = min_t(size_t, len, FSL_RE_MAX_DATA_LEN);
+
+ /* Fill CFD's 1st frame with dest buffer */
+ fill_cfd_frame(cf, 1, length, dest, 0);
+
+ /* Fill CFD's 2nd frame with src buffer */
+ fill_cfd_frame(cf, 2, length, src, 1);
+
+ return &desc->async_tx;
+}
+
+static int fsl_re_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_re_chan *re_chan;
+ struct fsl_re_desc *desc;
+ void *cf;
+ dma_addr_t paddr;
+ int i;
+
+ re_chan = container_of(chan, struct fsl_re_chan, chan);
+ for (i = 0; i < FSL_RE_MIN_DESCS; i++) {
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ break;
+
+ cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_KERNEL,
+ &paddr);
+ if (!cf) {
+ kfree(desc);
+ break;
+ }
+
+ INIT_LIST_HEAD(&desc->node);
+ fsl_re_init_desc(re_chan, desc, cf, paddr);
+
+ list_add_tail(&desc->node, &re_chan->free_q);
+ re_chan->alloc_count++;
+ }
+ return re_chan->alloc_count;
+}
+
+static void fsl_re_free_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_re_chan *re_chan;
+ struct fsl_re_desc *desc;
+
+ re_chan = container_of(chan, struct fsl_re_chan, chan);
+ while (re_chan->alloc_count--) {
+ desc = list_first_entry(&re_chan->free_q,
+ struct fsl_re_desc,
+ node);
+
+ list_del(&desc->node);
+ dma_pool_free(re_chan->re_dev->cf_desc_pool, desc->cf_addr,
+ desc->cf_paddr);
+ kfree(desc);
+ }
+
+ if (!list_empty(&re_chan->free_q))
+ dev_err(re_chan->dev, "chan resource cannot be cleaned!\n");
+}
+
+static int fsl_re_chan_probe(struct platform_device *ofdev,
+ struct device_node *np, u8 q, u32 off)
+{
+ struct device *dev, *chandev;
+ struct fsl_re_drv_private *re_priv;
+ struct fsl_re_chan *chan;
+ struct dma_device *dma_dev;
+ u32 ptr;
+ u32 status;
+ int ret = 0, rc;
+ struct platform_device *chan_ofdev;
+
+ dev = &ofdev->dev;
+ re_priv = dev_get_drvdata(dev);
+ dma_dev = &re_priv->dma_dev;
+
+ chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ /* create platform device for chan node */
+ chan_ofdev = of_platform_device_create(np, NULL, dev);
+ if (!chan_ofdev) {
+ dev_err(dev, "Not able to create ofdev for jr %d\n", q);
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ /* read reg property from dts */
+ rc = of_property_read_u32(np, "reg", &ptr);
+ if (rc) {
+ dev_err(dev, "Reg property not found in jr %d\n", q);
+ ret = -ENODEV;
+ goto err_free;
+ }
+
+ chan->jrregs = (struct fsl_re_chan_cfg *)((u8 *)re_priv->re_regs +
+ off + ptr);
+
+ /* read irq property from dts */
+ chan->irq = irq_of_parse_and_map(np, 0);
+ if (chan->irq == NO_IRQ) {
+ dev_err(dev, "No IRQ defined for JR %d\n", q);
+ ret = -ENODEV;
+ goto err_free;
+ }
+
+ snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q);
+
+ chandev = &chan_ofdev->dev;
+ tasklet_init(&chan->irqtask, fsl_re_dequeue, (unsigned long)chandev);
+
+ ret = request_irq(chan->irq, fsl_re_isr, 0, chan->name, chandev);
+ if (ret) {
+ dev_err(dev, "Unable to register interrupt for JR %d\n", q);
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ re_priv->re_jrs[q] = chan;
+ chan->chan.device = dma_dev;
+ chan->chan.private = chan;
+ chan->dev = chandev;
+ chan->re_dev = re_priv;
+
+ spin_lock_init(&chan->desc_lock);
+ INIT_LIST_HEAD(&chan->ack_q);
+ INIT_LIST_HEAD(&chan->active_q);
+ INIT_LIST_HEAD(&chan->submit_q);
+ INIT_LIST_HEAD(&chan->free_q);
+
+ chan->inb_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
+ GFP_KERNEL, &chan->inb_phys_addr);
+ if (!chan->inb_ring_virt_addr) {
+ dev_err(dev, "No dma memory for inb_ring_virt_addr\n");
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ chan->oub_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
+ GFP_KERNEL, &chan->oub_phys_addr);
+ if (!chan->oub_ring_virt_addr) {
+ dev_err(dev, "No dma memory for oub_ring_virt_addr\n");
+ ret = -ENOMEM;
+ goto err_free_1;
+ }
+
+ /* Program the Inbound/Outbound ring base addresses and size */
+ out_be32(&chan->jrregs->inbring_base_h,
+ chan->inb_phys_addr & FSL_RE_ADDR_BIT_MASK);
+ out_be32(&chan->jrregs->oubring_base_h,
+ chan->oub_phys_addr & FSL_RE_ADDR_BIT_MASK);
+ out_be32(&chan->jrregs->inbring_base_l,
+ chan->inb_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
+ out_be32(&chan->jrregs->oubring_base_l,
+ chan->oub_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
+ out_be32(&chan->jrregs->inbring_size,
+ FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
+ out_be32(&chan->jrregs->oubring_size,
+ FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
+
+ /* Read LIODN value from u-boot */
+ status = in_be32(&chan->jrregs->jr_config_1) & FSL_RE_REG_LIODN_MASK;
+
+ /* Program the CFG reg */
+ out_be32(&chan->jrregs->jr_config_1,
+ FSL_RE_CFG1_CBSI | FSL_RE_CFG1_CBS0 | status);
+
+ dev_set_drvdata(chandev, chan);
+
+ /* Enable RE/CHAN */
+ out_be32(&chan->jrregs->jr_command, FSL_RE_ENABLE);
+
+ return 0;
+
+err_free_1:
+ dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
+ chan->inb_phys_addr);
+err_free:
+ return ret;
+}
+
+/* Probe function for RAID Engine */
+static int fsl_re_probe(struct platform_device *ofdev)
+{
+ struct fsl_re_drv_private *re_priv;
+ struct device_node *np;
+ struct device_node *child;
+ u32 off;
+ u8 ridx = 0;
+ struct dma_device *dma_dev;
+ struct resource *res;
+ int rc;
+ struct device *dev = &ofdev->dev;
+
+ re_priv = devm_kzalloc(dev, sizeof(*re_priv), GFP_KERNEL);
+ if (!re_priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ /* IOMAP the entire RAID Engine region */
+ re_priv->re_regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!re_priv->re_regs)
+ return -EBUSY;
+
+ /* Program the RE mode */
+ out_be32(&re_priv->re_regs->global_config, FSL_RE_NON_DPAA_MODE);
+
+ /* Program Galois Field polynomial */
+ out_be32(&re_priv->re_regs->galois_field_config, FSL_RE_GFM_POLY);
+
+ dev_info(dev, "version %x, mode %x, gfp %x\n",
+ in_be32(&re_priv->re_regs->re_version_id),
+ in_be32(&re_priv->re_regs->global_config),
+ in_be32(&re_priv->re_regs->galois_field_config));
+
+ dma_dev = &re_priv->dma_dev;
+ dma_dev->dev = dev;
+ INIT_LIST_HEAD(&dma_dev->channels);
+ dma_set_mask(dev, DMA_BIT_MASK(40));
+
+ dma_dev->device_alloc_chan_resources = fsl_re_alloc_chan_resources;
+ dma_dev->device_tx_status = fsl_re_tx_status;
+ dma_dev->device_issue_pending = fsl_re_issue_pending;
+
+ dma_dev->max_xor = FSL_RE_MAX_XOR_SRCS;
+ dma_dev->device_prep_dma_xor = fsl_re_prep_dma_xor;
+ dma_cap_set(DMA_XOR, dma_dev->cap_mask);
+
+ dma_dev->max_pq = FSL_RE_MAX_PQ_SRCS;
+ dma_dev->device_prep_dma_pq = fsl_re_prep_dma_pq;
+ dma_cap_set(DMA_PQ, dma_dev->cap_mask);
+
+ dma_dev->device_prep_dma_memcpy = fsl_re_prep_dma_memcpy;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+
+ dma_dev->device_free_chan_resources = fsl_re_free_chan_resources;
+
+ re_priv->total_chans = 0;
+
+ re_priv->cf_desc_pool = dmam_pool_create("fsl_re_cf_desc_pool", dev,
+ FSL_RE_CF_CDB_SIZE,
+ FSL_RE_CF_CDB_ALIGN, 0);
+
+ if (!re_priv->cf_desc_pool) {
+ dev_err(dev, "No memory for fsl re_cf desc pool\n");
+ return -ENOMEM;
+ }
+
+ re_priv->hw_desc_pool = dmam_pool_create("fsl_re_hw_desc_pool", dev,
+ sizeof(struct fsl_re_hw_desc) * FSL_RE_RING_SIZE,
+ FSL_RE_FRAME_ALIGN, 0);
+ if (!re_priv->hw_desc_pool) {
+ dev_err(dev, "No memory for fsl re_hw desc pool\n");
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(dev, re_priv);
+
+ /* Parse Device tree to find out the total number of JQs present */
+ for_each_compatible_node(np, NULL, "fsl,raideng-v1.0-job-queue") {
+ rc = of_property_read_u32(np, "reg", &off);
+ if (rc) {
+ dev_err(dev, "Reg property not found in JQ node\n");
+ return -ENODEV;
+ }
+ /* Find out the Job Rings present under each JQ */
+ for_each_child_of_node(np, child) {
+ rc = of_device_is_compatible(child,
+ "fsl,raideng-v1.0-job-ring");
+ if (rc) {
+ fsl_re_chan_probe(ofdev, child, ridx++, off);
+ re_priv->total_chans++;
+ }
+ }
+ }
+
+ dma_async_device_register(dma_dev);
+
+ return 0;
+}
+
+static void fsl_re_remove_chan(struct fsl_re_chan *chan)
+{
+ dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
+ chan->inb_phys_addr);
+
+ dma_pool_free(chan->re_dev->hw_desc_pool, chan->oub_ring_virt_addr,
+ chan->oub_phys_addr);
+}
+
+static int fsl_re_remove(struct platform_device *ofdev)
+{
+ struct fsl_re_drv_private *re_priv;
+ struct device *dev;
+ int i;
+
+ dev = &ofdev->dev;
+ re_priv = dev_get_drvdata(dev);
+
+ /* Cleanup chan related memory areas */
+ for (i = 0; i < re_priv->total_chans; i++)
+ fsl_re_remove_chan(re_priv->re_jrs[i]);
+
+ /* Unregister the driver */
+ dma_async_device_unregister(&re_priv->dma_dev);
+
+ return 0;
+}
+
+static struct of_device_id fsl_re_ids[] = {
+ { .compatible = "fsl,raideng-v1.0", },
+ {}
+};
+
+static struct platform_driver fsl_re_driver = {
+ .driver = {
+ .name = "fsl-raideng",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_re_ids,
+ },
+ .probe = fsl_re_probe,
+ .remove = fsl_re_remove,
+};
+
+module_platform_driver(fsl_re_driver);
+
+MODULE_AUTHOR("Harninder Rai <harninder.rai@freescale.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Freescale RAID Engine Device Driver");
diff --git a/drivers/dma/fsl_raid.h b/drivers/dma/fsl_raid.h
new file mode 100644
index 000000000000..69d743c04973
--- /dev/null
+++ b/drivers/dma/fsl_raid.h
@@ -0,0 +1,306 @@
+/*
+ * drivers/dma/fsl_raid.h
+ *
+ * Freescale RAID Engine device driver
+ *
+ * Author:
+ * Harninder Rai <harninder.rai@freescale.com>
+ * Naveen Burmi <naveenburmi@freescale.com>
+ *
+ * Rewrite:
+ * Xuelin Shi <xuelin.shi@freescale.com>
+
+ * Copyright (c) 2010-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define FSL_RE_MAX_CHANS 4
+#define FSL_RE_DPAA_MODE BIT(30)
+#define FSL_RE_NON_DPAA_MODE BIT(31)
+#define FSL_RE_GFM_POLY 0x1d000000
+#define FSL_RE_ADD_JOB(x) ((x) << 16)
+#define FSL_RE_RMVD_JOB(x) ((x) << 16)
+#define FSL_RE_CFG1_CBSI 0x08000000
+#define FSL_RE_CFG1_CBS0 0x00080000
+#define FSL_RE_SLOT_FULL_SHIFT 8
+#define FSL_RE_SLOT_FULL(x) ((x) >> FSL_RE_SLOT_FULL_SHIFT)
+#define FSL_RE_SLOT_AVAIL_SHIFT 8
+#define FSL_RE_SLOT_AVAIL(x) ((x) >> FSL_RE_SLOT_AVAIL_SHIFT)
+#define FSL_RE_PQ_OPCODE 0x1B
+#define FSL_RE_XOR_OPCODE 0x1A
+#define FSL_RE_MOVE_OPCODE 0x8
+#define FSL_RE_FRAME_ALIGN 16
+#define FSL_RE_BLOCK_SIZE 0x3 /* 4096 bytes */
+#define FSL_RE_CACHEABLE_IO 0x0
+#define FSL_RE_BUFFER_OUTPUT 0x0
+#define FSL_RE_INTR_ON_ERROR 0x1
+#define FSL_RE_DATA_DEP 0x1
+#define FSL_RE_ENABLE_DPI 0x0
+#define FSL_RE_RING_SIZE 0x400
+#define FSL_RE_RING_SIZE_MASK (FSL_RE_RING_SIZE - 1)
+#define FSL_RE_RING_SIZE_SHIFT 8
+#define FSL_RE_ADDR_BIT_SHIFT 4
+#define FSL_RE_ADDR_BIT_MASK (BIT(FSL_RE_ADDR_BIT_SHIFT) - 1)
+#define FSL_RE_ERROR 0x40000000
+#define FSL_RE_INTR 0x80000000
+#define FSL_RE_CLR_INTR 0x80000000
+#define FSL_RE_PAUSE 0x80000000
+#define FSL_RE_ENABLE 0x80000000
+#define FSL_RE_REG_LIODN_MASK 0x00000FFF
+
+#define FSL_RE_CDB_OPCODE_MASK 0xF8000000
+#define FSL_RE_CDB_OPCODE_SHIFT 27
+#define FSL_RE_CDB_EXCLEN_MASK 0x03000000
+#define FSL_RE_CDB_EXCLEN_SHIFT 24
+#define FSL_RE_CDB_EXCLQ1_MASK 0x00F00000
+#define FSL_RE_CDB_EXCLQ1_SHIFT 20
+#define FSL_RE_CDB_EXCLQ2_MASK 0x000F0000
+#define FSL_RE_CDB_EXCLQ2_SHIFT 16
+#define FSL_RE_CDB_BLKSIZE_MASK 0x0000C000
+#define FSL_RE_CDB_BLKSIZE_SHIFT 14
+#define FSL_RE_CDB_CACHE_MASK 0x00003000
+#define FSL_RE_CDB_CACHE_SHIFT 12
+#define FSL_RE_CDB_BUFFER_MASK 0x00000800
+#define FSL_RE_CDB_BUFFER_SHIFT 11
+#define FSL_RE_CDB_ERROR_MASK 0x00000400
+#define FSL_RE_CDB_ERROR_SHIFT 10
+#define FSL_RE_CDB_NRCS_MASK 0x0000003C
+#define FSL_RE_CDB_NRCS_SHIFT 6
+#define FSL_RE_CDB_DEPEND_MASK 0x00000008
+#define FSL_RE_CDB_DEPEND_SHIFT 3
+#define FSL_RE_CDB_DPI_MASK 0x00000004
+#define FSL_RE_CDB_DPI_SHIFT 2
+
+/*
+ * the largest cf block is 19*sizeof(struct cmpnd_frame), which is 304 bytes.
+ * here 19 = 1(cdb)+2(dest)+16(src), align to 64bytes, that is 320 bytes.
+ * the largest cdb block: struct pq_cdb which is 180 bytes, adding to cf block
+ * 320+180=500, align to 64bytes, that is 512 bytes.
+ */
+#define FSL_RE_CF_DESC_SIZE 320
+#define FSL_RE_CF_CDB_SIZE 512
+#define FSL_RE_CF_CDB_ALIGN 64
+
+struct fsl_re_ctrl {
+ /* General Configuration Registers */
+ __be32 global_config; /* Global Configuration Register */
+ u8 rsvd1[4];
+ __be32 galois_field_config; /* Galois Field Configuration Register */
+ u8 rsvd2[4];
+ __be32 jq_wrr_config; /* WRR Configuration register */
+ u8 rsvd3[4];
+ __be32 crc_config; /* CRC Configuration register */
+ u8 rsvd4[228];
+ __be32 system_reset; /* System Reset Register */
+ u8 rsvd5[252];
+ __be32 global_status; /* Global Status Register */
+ u8 rsvd6[832];
+ __be32 re_liodn_base; /* LIODN Base Register */
+ u8 rsvd7[1712];
+ __be32 re_version_id; /* Version ID register of RE */
+ __be32 re_version_id_2; /* Version ID 2 register of RE */
+ u8 rsvd8[512];
+ __be32 host_config; /* Host I/F Configuration Register */
+};
+
+struct fsl_re_chan_cfg {
+ /* Registers for JR interface */
+ __be32 jr_config_0; /* Job Queue Configuration 0 Register */
+ __be32 jr_config_1; /* Job Queue Configuration 1 Register */
+ __be32 jr_interrupt_status; /* Job Queue Interrupt Status Register */
+ u8 rsvd1[4];
+ __be32 jr_command; /* Job Queue Command Register */
+ u8 rsvd2[4];
+ __be32 jr_status; /* Job Queue Status Register */
+ u8 rsvd3[228];
+
+ /* Input Ring */
+ __be32 inbring_base_h; /* Inbound Ring Base Address Register - High */
+ __be32 inbring_base_l; /* Inbound Ring Base Address Register - Low */
+ __be32 inbring_size; /* Inbound Ring Size Register */
+ u8 rsvd4[4];
+ __be32 inbring_slot_avail; /* Inbound Ring Slot Available Register */
+ u8 rsvd5[4];
+ __be32 inbring_add_job; /* Inbound Ring Add Job Register */
+ u8 rsvd6[4];
+ __be32 inbring_cnsmr_indx; /* Inbound Ring Consumer Index Register */
+ u8 rsvd7[220];
+
+ /* Output Ring */
+ __be32 oubring_base_h; /* Outbound Ring Base Address Register - High */
+ __be32 oubring_base_l; /* Outbound Ring Base Address Register - Low */
+ __be32 oubring_size; /* Outbound Ring Size Register */
+ u8 rsvd8[4];
+ __be32 oubring_job_rmvd; /* Outbound Ring Job Removed Register */
+ u8 rsvd9[4];
+ __be32 oubring_slot_full; /* Outbound Ring Slot Full Register */
+ u8 rsvd10[4];
+ __be32 oubring_prdcr_indx; /* Outbound Ring Producer Index */
+};
+
+/*
+ * Command Descriptor Block (CDB) for unicast move command.
+ * In RAID Engine terms, memcpy is done through move command
+ */
+struct fsl_re_move_cdb {
+ __be32 cdb32;
+};
+
+/* Data protection/integrity related fields */
+#define FSL_RE_DPI_APPS_MASK 0xC0000000
+#define FSL_RE_DPI_APPS_SHIFT 30
+#define FSL_RE_DPI_REF_MASK 0x30000000
+#define FSL_RE_DPI_REF_SHIFT 28
+#define FSL_RE_DPI_GUARD_MASK 0x0C000000
+#define FSL_RE_DPI_GUARD_SHIFT 26
+#define FSL_RE_DPI_ATTR_MASK 0x03000000
+#define FSL_RE_DPI_ATTR_SHIFT 24
+#define FSL_RE_DPI_META_MASK 0x0000FFFF
+
+struct fsl_re_dpi {
+ __be32 dpi32;
+ __be32 ref;
+};
+
+/*
+ * CDB for GenQ command. In RAID Engine terminology, XOR is
+ * done through this command
+ */
+struct fsl_re_xor_cdb {
+ __be32 cdb32;
+ u8 gfm[16];
+ struct fsl_re_dpi dpi_dest_spec;
+ struct fsl_re_dpi dpi_src_spec[16];
+};
+
+/* CDB for no-op command */
+struct fsl_re_noop_cdb {
+ __be32 cdb32;
+};
+
+/*
+ * CDB for GenQQ command. In RAID Engine terminology, P/Q is
+ * done through this command
+ */
+struct fsl_re_pq_cdb {
+ __be32 cdb32;
+ u8 gfm_q1[16];
+ u8 gfm_q2[16];
+ struct fsl_re_dpi dpi_dest_spec[2];
+ struct fsl_re_dpi dpi_src_spec[16];
+};
+
+/* Compound frame */
+#define FSL_RE_CF_ADDR_HIGH_MASK 0x000000FF
+#define FSL_RE_CF_EXT_MASK 0x80000000
+#define FSL_RE_CF_EXT_SHIFT 31
+#define FSL_RE_CF_FINAL_MASK 0x40000000
+#define FSL_RE_CF_FINAL_SHIFT 30
+#define FSL_RE_CF_LENGTH_MASK 0x000FFFFF
+#define FSL_RE_CF_BPID_MASK 0x00FF0000
+#define FSL_RE_CF_BPID_SHIFT 16
+#define FSL_RE_CF_OFFSET_MASK 0x00001FFF
+
+struct fsl_re_cmpnd_frame {
+ __be32 addr_high;
+ __be32 addr_low;
+ __be32 efrl32;
+ __be32 rbro32;
+};
+
+/* Frame descriptor */
+#define FSL_RE_HWDESC_LIODN_MASK 0x3F000000
+#define FSL_RE_HWDESC_LIODN_SHIFT 24
+#define FSL_RE_HWDESC_BPID_MASK 0x00FF0000
+#define FSL_RE_HWDESC_BPID_SHIFT 16
+#define FSL_RE_HWDESC_ELIODN_MASK 0x0000F000
+#define FSL_RE_HWDESC_ELIODN_SHIFT 12
+#define FSL_RE_HWDESC_FMT_SHIFT 29
+#define FSL_RE_HWDESC_FMT_MASK (0x3 << FSL_RE_HWDESC_FMT_SHIFT)
+
+struct fsl_re_hw_desc {
+ __be32 lbea32;
+ __be32 addr_low;
+ __be32 fmt32;
+ __be32 status;
+};
+
+/* Raid Engine device private data */
+struct fsl_re_drv_private {
+ u8 total_chans;
+ struct dma_device dma_dev;
+ struct fsl_re_ctrl *re_regs;
+ struct fsl_re_chan *re_jrs[FSL_RE_MAX_CHANS];
+ struct dma_pool *cf_desc_pool;
+ struct dma_pool *hw_desc_pool;
+};
+
+/* Per job ring data structure */
+struct fsl_re_chan {
+ char name[16];
+ spinlock_t desc_lock; /* queue lock */
+ struct list_head ack_q; /* wait to acked queue */
+ struct list_head active_q; /* already issued on hw, not completed */
+ struct list_head submit_q;
+ struct list_head free_q; /* alloc available queue */
+ struct device *dev;
+ struct fsl_re_drv_private *re_dev;
+ struct dma_chan chan;
+ struct fsl_re_chan_cfg *jrregs;
+ int irq;
+ struct tasklet_struct irqtask;
+ u32 alloc_count;
+
+ /* hw descriptor ring for inbound queue*/
+ dma_addr_t inb_phys_addr;
+ struct fsl_re_hw_desc *inb_ring_virt_addr;
+ u32 inb_count;
+
+ /* hw descriptor ring for outbound queue */
+ dma_addr_t oub_phys_addr;
+ struct fsl_re_hw_desc *oub_ring_virt_addr;
+ u32 oub_count;
+};
+
+/* Async transaction descriptor */
+struct fsl_re_desc {
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head node;
+ struct fsl_re_hw_desc hwdesc;
+ struct fsl_re_chan *re_chan;
+
+ /* hwdesc will point to cf_addr */
+ void *cf_addr;
+ dma_addr_t cf_paddr;
+
+ void *cdb_addr;
+ dma_addr_t cdb_paddr;
+ int status;
+};
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index ed045a9ad634..9ca56830cc63 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -689,11 +689,6 @@ static int mdc_slave_config(struct dma_chan *chan,
return 0;
}
-static int mdc_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
static void mdc_free_chan_resources(struct dma_chan *chan)
{
struct mdc_chan *mchan = to_mdc_chan(chan);
@@ -910,7 +905,6 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg;
mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic;
mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy;
- mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources;
mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources;
mdma->dma_dev.device_tx_status = mdc_tx_status;
mdma->dma_dev.device_issue_pending = mdc_issue_pending;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 66a0efb9651d..62bbd79338e0 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1260,6 +1260,7 @@ static void sdma_issue_pending(struct dma_chan *chan)
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
static void sdma_add_scripts(struct sdma_engine *sdma,
const struct sdma_script_start_addrs *addr)
@@ -1306,6 +1307,9 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
case 2:
sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
break;
+ case 3:
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
+ break;
default:
dev_err(sdma->dev, "unknown firmware version\n");
goto err_firmware;
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 3b55bb8d969a..ea1e107ae884 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 940c1502a8b5..ee0aa9f4ccfa 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index d63f68b1aa35..30f5c7eede16 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 695483e6be32..69c7dfcad023 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 470292767e68..bf24ebe874b0 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 194ec20c9408..64790a45ef5d 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -15,10 +15,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 02177ecf09f8..a3e731edce57 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index 5501eb072d69..76f0dc688a19 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 2f1cfa0f1f47..909352f74c89 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 263d9f6a207e..998826854fdd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
*/
/*
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 6f7f43529ccb..647e362f01fd 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -313,11 +313,6 @@ static void k3_dma_tasklet(unsigned long arg)
}
}
-static int k3_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
static void k3_dma_free_chan_resources(struct dma_chan *chan)
{
struct k3_dma_chan *c = to_k3_chan(chan);
@@ -654,7 +649,7 @@ static void k3_dma_free_desc(struct virt_dma_desc *vd)
kfree(ds);
}
-static struct of_device_id k3_pdma_dt_ids[] = {
+static const struct of_device_id k3_pdma_dt_ids[] = {
{ .compatible = "hisilicon,k3-dma-1.0", },
{}
};
@@ -728,7 +723,6 @@ static int k3_dma_probe(struct platform_device *op)
dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
d->slave.dev = &op->dev;
- d->slave.device_alloc_chan_resources = k3_dma_alloc_chan_resources;
d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
d->slave.device_tx_status = k3_dma_tx_status;
d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index eb410044e1af..462a0229a743 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -973,7 +973,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
return 0;
}
-static struct of_device_id mmp_pdma_dt_ids[] = {
+static const struct of_device_id mmp_pdma_dt_ids[] = {
{ .compatible = "marvell,pdma-1.0", },
{}
};
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index b6f4e1fc9c78..449e785def17 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -613,7 +613,7 @@ struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
return dma_request_channel(mask, mmp_tdma_filter_fn, &param);
}
-static struct of_device_id mmp_tdma_dt_ids[] = {
+static const struct of_device_id mmp_tdma_dt_ids[] = {
{ .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
{ .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
{}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 57d2457545f3..e6281e7aa46e 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -21,10 +21,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
@@ -1072,7 +1068,7 @@ static int mpc_dma_remove(struct platform_device *op)
return 0;
}
-static struct of_device_id mpc_dma_match[] = {
+static const struct of_device_id mpc_dma_match[] = {
{ .compatible = "fsl,mpc5121-dma", },
{ .compatible = "fsl,mpc8308-dma", },
{},
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index b03e8137b918..1c56001df676 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -10,10 +10,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/init.h>
@@ -1249,7 +1245,7 @@ static int mv_xor_remove(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static struct of_device_id mv_xor_dt_ids[] = {
+static const struct of_device_id mv_xor_dt_ids[] = {
{ .compatible = "marvell,orion-xor", },
{},
};
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 78edc7e44569..91958dba39a2 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -9,10 +9,6 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#ifndef MV_XOR_H
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 35c143cb88da..b859792dde95 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -949,6 +949,7 @@ err_free_res:
err_disable_pdev:
pci_disable_device(pdev);
err_free_mem:
+ kfree(pd);
return err;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 0e1f56772855..a7d9d3029b14 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -556,7 +556,7 @@ static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAADDH;
buf[0] |= (da << 1);
- *((u16 *)&buf[1]) = val;
+ *((__le16 *)&buf[1]) = cpu_to_le16(val);
PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n",
da == 1 ? "DA" : "SA", val);
@@ -710,7 +710,7 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
buf[0] = CMD_DMAMOV;
buf[1] = dst;
- *((u32 *)&buf[2]) = val;
+ *((__le32 *)&buf[2]) = cpu_to_le32(val);
PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
@@ -888,7 +888,7 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
buf[1] = chan & 0x7;
- *((u32 *)&buf[2]) = addr;
+ *((__le32 *)&buf[2]) = cpu_to_le32(addr);
return SZ_DMAGO;
}
@@ -928,7 +928,7 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd,
}
writel(val, regs + DBGINST0);
- val = *((u32 *)&insn[2]);
+ val = le32_to_cpu(*((__le32 *)&insn[2]));
writel(val, regs + DBGINST1);
/* If timed out due to halted state-machine */
@@ -2162,7 +2162,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
* DMA transfer again. This pause feature was implemented to
* allow safely read residue before channel termination.
*/
-int pl330_pause(struct dma_chan *chan)
+static int pl330_pause(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
struct pl330_dmac *pl330 = pch->dmac;
@@ -2203,8 +2203,8 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
}
-int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
- struct dma_pl330_desc *desc)
+static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
+ struct dma_pl330_desc *desc)
{
struct pl330_thread *thrd = pch->thread;
struct pl330_dmac *pl330 = pch->dmac;
@@ -2259,7 +2259,17 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
transferred = 0;
residual += desc->bytes_requested - transferred;
if (desc->txd.cookie == cookie) {
- ret = desc->status;
+ switch (desc->status) {
+ case DONE:
+ ret = DMA_COMPLETE;
+ break;
+ case PREP:
+ case BUSY:
+ ret = DMA_IN_PROGRESS;
+ break;
+ default:
+ WARN_ON(1);
+ }
break;
}
if (desc->last)
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index fa764a39cd36..9217f893b0d1 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -16,10 +16,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index 9c914d625906..5a250cdc8376 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -171,6 +171,35 @@ static const struct reg_offset_data bam_v1_4_reg_info[] = {
[BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 },
};
+static const struct reg_offset_data bam_v1_7_reg_info[] = {
+ [BAM_CTRL] = { 0x00000, 0x00, 0x00, 0x00 },
+ [BAM_REVISION] = { 0x01000, 0x00, 0x00, 0x00 },
+ [BAM_NUM_PIPES] = { 0x01008, 0x00, 0x00, 0x00 },
+ [BAM_DESC_CNT_TRSHLD] = { 0x00008, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS] = { 0x03010, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_MSK] = { 0x03014, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_UNMASKED] = { 0x03018, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_STTS] = { 0x00014, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_CLR] = { 0x00018, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_EN] = { 0x0001C, 0x00, 0x00, 0x00 },
+ [BAM_CNFG_BITS] = { 0x0007C, 0x00, 0x00, 0x00 },
+ [BAM_IRQ_SRCS_EE] = { 0x03000, 0x00, 0x00, 0x1000 },
+ [BAM_IRQ_SRCS_MSK_EE] = { 0x03004, 0x00, 0x00, 0x1000 },
+ [BAM_P_CTRL] = { 0x13000, 0x1000, 0x00, 0x00 },
+ [BAM_P_RST] = { 0x13004, 0x1000, 0x00, 0x00 },
+ [BAM_P_HALT] = { 0x13008, 0x1000, 0x00, 0x00 },
+ [BAM_P_IRQ_STTS] = { 0x13010, 0x1000, 0x00, 0x00 },
+ [BAM_P_IRQ_CLR] = { 0x13014, 0x1000, 0x00, 0x00 },
+ [BAM_P_IRQ_EN] = { 0x13018, 0x1000, 0x00, 0x00 },
+ [BAM_P_EVNT_DEST_ADDR] = { 0x1382C, 0x00, 0x1000, 0x00 },
+ [BAM_P_EVNT_REG] = { 0x13818, 0x00, 0x1000, 0x00 },
+ [BAM_P_SW_OFSTS] = { 0x13800, 0x00, 0x1000, 0x00 },
+ [BAM_P_DATA_FIFO_ADDR] = { 0x13824, 0x00, 0x1000, 0x00 },
+ [BAM_P_DESC_FIFO_ADDR] = { 0x1381C, 0x00, 0x1000, 0x00 },
+ [BAM_P_EVNT_GEN_TRSHLD] = { 0x13828, 0x00, 0x1000, 0x00 },
+ [BAM_P_FIFO_SIZES] = { 0x13820, 0x00, 0x1000, 0x00 },
+};
+
/* BAM CTRL */
#define BAM_SW_RST BIT(0)
#define BAM_EN BIT(1)
@@ -1051,6 +1080,7 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
static const struct of_device_id bam_of_match[] = {
{ .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
{ .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
+ { .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info },
{}
};
@@ -1113,7 +1143,7 @@ static int bam_dma_probe(struct platform_device *pdev)
if (!bdev->channels) {
ret = -ENOMEM;
- goto err_disable_clk;
+ goto err_tasklet_kill;
}
/* allocate and initialize channels */
@@ -1125,7 +1155,7 @@ static int bam_dma_probe(struct platform_device *pdev)
ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
IRQF_TRIGGER_HIGH, "bam_dma", bdev);
if (ret)
- goto err_disable_clk;
+ goto err_bam_channel_exit;
/* set max dma segment size */
bdev->common.dev = bdev->dev;
@@ -1133,7 +1163,7 @@ static int bam_dma_probe(struct platform_device *pdev)
ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE);
if (ret) {
dev_err(bdev->dev, "cannot set maximum segment size\n");
- goto err_disable_clk;
+ goto err_bam_channel_exit;
}
platform_set_drvdata(pdev, bdev);
@@ -1161,7 +1191,7 @@ static int bam_dma_probe(struct platform_device *pdev)
ret = dma_async_device_register(&bdev->common);
if (ret) {
dev_err(bdev->dev, "failed to register dma async device\n");
- goto err_disable_clk;
+ goto err_bam_channel_exit;
}
ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
@@ -1173,8 +1203,14 @@ static int bam_dma_probe(struct platform_device *pdev)
err_unregister_dma:
dma_async_device_unregister(&bdev->common);
+err_bam_channel_exit:
+ for (i = 0; i < bdev->num_channels; i++)
+ tasklet_kill(&bdev->channels[i].vc.task);
+err_tasklet_kill:
+ tasklet_kill(&bdev->task);
err_disable_clk:
clk_disable_unprepare(bdev->bamclk);
+
return ret;
}
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 2f91da3db836..01dcaf21b988 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -749,11 +749,6 @@ unlock:
return ret;
}
-static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
{
/* Ensure all queued descriptors are freed */
@@ -1238,7 +1233,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
if (!s3cdma->phy_chans)
return -ENOMEM;
- /* aquire irqs and clocks for all physical channels */
+ /* acquire irqs and clocks for all physical channels */
for (i = 0; i < pdata->num_phy_channels; i++) {
struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
char clk_name[6];
@@ -1266,7 +1261,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
sprintf(clk_name, "dma.%d", i);
phy->clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(phy->clk) && sdata->has_clocks) {
- dev_err(&pdev->dev, "unable to aquire clock for channel %d, error %lu",
+ dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n",
i, PTR_ERR(phy->clk));
continue;
}
@@ -1290,8 +1285,6 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
s3cdma->memcpy.dev = &pdev->dev;
- s3cdma->memcpy.device_alloc_chan_resources =
- s3c24xx_dma_alloc_chan_resources;
s3cdma->memcpy.device_free_chan_resources =
s3c24xx_dma_free_chan_resources;
s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
@@ -1305,8 +1298,6 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
s3cdma->slave.dev = &pdev->dev;
- s3cdma->slave.device_alloc_chan_resources =
- s3c24xx_dma_alloc_chan_resources;
s3cdma->slave.device_free_chan_resources =
s3c24xx_dma_free_chan_resources;
s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 5adf5407a8cb..43db255050d2 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -389,11 +389,6 @@ static void sa11x0_dma_tasklet(unsigned long arg)
}
-static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
{
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
@@ -835,7 +830,6 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
INIT_LIST_HEAD(&dmadev->channels);
dmadev->dev = dev;
- dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
dmadev->device_config = sa11x0_dma_device_config;
dmadev->device_pause = sa11x0_dma_device_pause;
@@ -948,6 +942,12 @@ static int sa11x0_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
+ d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
+ d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
if (ret) {
dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 8190ad225a1b..0f371524a4d9 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -51,12 +51,6 @@ config RCAR_HPB_DMAE
help
Enable support for the Renesas R-Car series DMA controllers.
-config RCAR_AUDMAC_PP
- tristate "Renesas R-Car Audio DMAC Peripheral Peripheral support"
- depends on SH_DMAE_BASE
- help
- Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
-
config RCAR_DMAC
tristate "Renesas R-Car Gen2 DMA Controller"
depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -64,3 +58,12 @@ config RCAR_DMAC
help
This driver supports the general purpose DMA controller found in the
Renesas R-Car second generation SoCs.
+
+config RENESAS_USB_DMAC
+ tristate "Renesas USB-DMA Controller"
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select RENESAS_DMA
+ select DMA_VIRTUAL_CHANNELS
+ help
+ This driver supports the USB-DMA controller found in the Renesas
+ SoCs.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 2852f9db61a4..b8a598066ce2 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -15,5 +15,5 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_SUDMAC) += sudmac.o
obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
-obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
+obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
deleted file mode 100644
index d95bbdd721f4..000000000000
--- a/drivers/dma/sh/rcar-audmapp.c
+++ /dev/null
@@ -1,376 +0,0 @@
-/*
- * This is for Renesas R-Car Audio-DMAC-peri-peri.
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * based on the drivers/dma/sh/shdma.c
- *
- * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
- * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
- * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- */
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/dmaengine.h>
-#include <linux/of_dma.h>
-#include <linux/platform_data/dma-rcar-audmapp.h>
-#include <linux/platform_device.h>
-#include <linux/shdma-base.h>
-
-/*
- * DMA register
- */
-#define PDMASAR 0x00
-#define PDMADAR 0x04
-#define PDMACHCR 0x0c
-
-/* PDMACHCR */
-#define PDMACHCR_DE (1 << 0)
-
-#define AUDMAPP_MAX_CHANNELS 29
-
-/* Default MEMCPY transfer size = 2^2 = 4 bytes */
-#define LOG2_DEFAULT_XFER_SIZE 2
-#define AUDMAPP_SLAVE_NUMBER 256
-#define AUDMAPP_LEN_MAX (16 * 1024 * 1024)
-
-struct audmapp_chan {
- struct shdma_chan shdma_chan;
- void __iomem *base;
- dma_addr_t slave_addr;
- u32 chcr;
-};
-
-struct audmapp_device {
- struct shdma_dev shdma_dev;
- struct audmapp_pdata *pdata;
- struct device *dev;
- void __iomem *chan_reg;
-};
-
-struct audmapp_desc {
- struct shdma_desc shdma_desc;
- dma_addr_t src;
- dma_addr_t dst;
-};
-
-#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
-
-#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan)
-#define to_desc(sdesc) container_of(sdesc, struct audmapp_desc, shdma_desc)
-#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \
- struct audmapp_device, shdma_dev.dma_dev)
-
-static void audmapp_write(struct audmapp_chan *auchan, u32 data, u32 reg)
-{
- struct audmapp_device *audev = to_dev(auchan);
- struct device *dev = audev->dev;
-
- dev_dbg(dev, "w %p : %08x\n", auchan->base + reg, data);
-
- iowrite32(data, auchan->base + reg);
-}
-
-static u32 audmapp_read(struct audmapp_chan *auchan, u32 reg)
-{
- return ioread32(auchan->base + reg);
-}
-
-static void audmapp_halt(struct shdma_chan *schan)
-{
- struct audmapp_chan *auchan = to_chan(schan);
- int i;
-
- audmapp_write(auchan, 0, PDMACHCR);
-
- for (i = 0; i < 1024; i++) {
- if (0 == audmapp_read(auchan, PDMACHCR))
- return;
- udelay(1);
- }
-}
-
-static void audmapp_start_xfer(struct shdma_chan *schan,
- struct shdma_desc *sdesc)
-{
- struct audmapp_chan *auchan = to_chan(schan);
- struct audmapp_device *audev = to_dev(auchan);
- struct audmapp_desc *desc = to_desc(sdesc);
- struct device *dev = audev->dev;
- u32 chcr = auchan->chcr | PDMACHCR_DE;
-
- dev_dbg(dev, "src/dst/chcr = %pad/%pad/%08x\n",
- &desc->src, &desc->dst, chcr);
-
- audmapp_write(auchan, desc->src, PDMASAR);
- audmapp_write(auchan, desc->dst, PDMADAR);
- audmapp_write(auchan, chcr, PDMACHCR);
-}
-
-static int audmapp_get_config(struct audmapp_chan *auchan, int slave_id,
- u32 *chcr, dma_addr_t *dst)
-{
- struct audmapp_device *audev = to_dev(auchan);
- struct audmapp_pdata *pdata = audev->pdata;
- struct audmapp_slave_config *cfg;
- int i;
-
- *chcr = 0;
- *dst = 0;
-
- if (!pdata) { /* DT */
- *chcr = ((u32)slave_id) << 16;
- auchan->shdma_chan.slave_id = (slave_id) >> 8;
- return 0;
- }
-
- /* non-DT */
-
- if (slave_id >= AUDMAPP_SLAVE_NUMBER)
- return -ENXIO;
-
- for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
- if (cfg->slave_id == slave_id) {
- *chcr = cfg->chcr;
- *dst = cfg->dst;
- return 0;
- }
-
- return -ENXIO;
-}
-
-static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
- dma_addr_t slave_addr, bool try)
-{
- struct audmapp_chan *auchan = to_chan(schan);
- u32 chcr;
- dma_addr_t dst;
- int ret;
-
- ret = audmapp_get_config(auchan, slave_id, &chcr, &dst);
- if (ret < 0)
- return ret;
-
- if (try)
- return 0;
-
- auchan->chcr = chcr;
- auchan->slave_addr = slave_addr ? : dst;
-
- return 0;
-}
-
-static int audmapp_desc_setup(struct shdma_chan *schan,
- struct shdma_desc *sdesc,
- dma_addr_t src, dma_addr_t dst, size_t *len)
-{
- struct audmapp_desc *desc = to_desc(sdesc);
-
- if (*len > (size_t)AUDMAPP_LEN_MAX)
- *len = (size_t)AUDMAPP_LEN_MAX;
-
- desc->src = src;
- desc->dst = dst;
-
- return 0;
-}
-
-static void audmapp_setup_xfer(struct shdma_chan *schan,
- int slave_id)
-{
-}
-
-static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan)
-{
- struct audmapp_chan *auchan = to_chan(schan);
-
- return auchan->slave_addr;
-}
-
-static bool audmapp_channel_busy(struct shdma_chan *schan)
-{
- struct audmapp_chan *auchan = to_chan(schan);
- u32 chcr = audmapp_read(auchan, PDMACHCR);
-
- return chcr & ~PDMACHCR_DE;
-}
-
-static bool audmapp_desc_completed(struct shdma_chan *schan,
- struct shdma_desc *sdesc)
-{
- return true;
-}
-
-static struct shdma_desc *audmapp_embedded_desc(void *buf, int i)
-{
- return &((struct audmapp_desc *)buf)[i].shdma_desc;
-}
-
-static const struct shdma_ops audmapp_shdma_ops = {
- .halt_channel = audmapp_halt,
- .desc_setup = audmapp_desc_setup,
- .set_slave = audmapp_set_slave,
- .start_xfer = audmapp_start_xfer,
- .embedded_desc = audmapp_embedded_desc,
- .setup_xfer = audmapp_setup_xfer,
- .slave_addr = audmapp_slave_addr,
- .channel_busy = audmapp_channel_busy,
- .desc_completed = audmapp_desc_completed,
-};
-
-static int audmapp_chan_probe(struct platform_device *pdev,
- struct audmapp_device *audev, int id)
-{
- struct shdma_dev *sdev = &audev->shdma_dev;
- struct audmapp_chan *auchan;
- struct shdma_chan *schan;
- struct device *dev = audev->dev;
-
- auchan = devm_kzalloc(dev, sizeof(*auchan), GFP_KERNEL);
- if (!auchan)
- return -ENOMEM;
-
- schan = &auchan->shdma_chan;
- schan->max_xfer_len = AUDMAPP_LEN_MAX;
-
- shdma_chan_probe(sdev, schan, id);
-
- auchan->base = audev->chan_reg + 0x20 + (0x10 * id);
- dev_dbg(dev, "%02d : %p / %p", id, auchan->base, audev->chan_reg);
-
- return 0;
-}
-
-static void audmapp_chan_remove(struct audmapp_device *audev)
-{
- struct shdma_chan *schan;
- int i;
-
- shdma_for_each_chan(schan, &audev->shdma_dev, i) {
- BUG_ON(!schan);
- shdma_chan_remove(schan);
- }
-}
-
-static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- dma_cap_mask_t mask;
- struct dma_chan *chan;
- u32 chcr = dma_spec->args[0];
-
- if (dma_spec->args_count != 1)
- return NULL;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- chan = dma_request_channel(mask, shdma_chan_filter, NULL);
- if (chan)
- to_shdma_chan(chan)->hw_req = chcr;
-
- return chan;
-}
-
-static int audmapp_probe(struct platform_device *pdev)
-{
- struct audmapp_pdata *pdata = pdev->dev.platform_data;
- struct device_node *np = pdev->dev.of_node;
- struct audmapp_device *audev;
- struct shdma_dev *sdev;
- struct dma_device *dma_dev;
- struct resource *res;
- int err, i;
-
- if (np)
- of_dma_controller_register(np, audmapp_of_xlate, pdev);
- else if (!pdata)
- return -ENODEV;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- audev = devm_kzalloc(&pdev->dev, sizeof(*audev), GFP_KERNEL);
- if (!audev)
- return -ENOMEM;
-
- audev->dev = &pdev->dev;
- audev->pdata = pdata;
- audev->chan_reg = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(audev->chan_reg))
- return PTR_ERR(audev->chan_reg);
-
- sdev = &audev->shdma_dev;
- sdev->ops = &audmapp_shdma_ops;
- sdev->desc_size = sizeof(struct audmapp_desc);
-
- dma_dev = &sdev->dma_dev;
- dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
- dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
-
- err = shdma_init(&pdev->dev, sdev, AUDMAPP_MAX_CHANNELS);
- if (err < 0)
- return err;
-
- platform_set_drvdata(pdev, audev);
-
- /* Create DMA Channel */
- for (i = 0; i < AUDMAPP_MAX_CHANNELS; i++) {
- err = audmapp_chan_probe(pdev, audev, i);
- if (err)
- goto chan_probe_err;
- }
-
- err = dma_async_device_register(dma_dev);
- if (err < 0)
- goto chan_probe_err;
-
- return err;
-
-chan_probe_err:
- audmapp_chan_remove(audev);
- shdma_cleanup(sdev);
-
- return err;
-}
-
-static int audmapp_remove(struct platform_device *pdev)
-{
- struct audmapp_device *audev = platform_get_drvdata(pdev);
- struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
-
- dma_async_device_unregister(dma_dev);
-
- audmapp_chan_remove(audev);
- shdma_cleanup(&audev->shdma_dev);
-
- return 0;
-}
-
-static const struct of_device_id audmapp_of_match[] = {
- { .compatible = "renesas,rcar-audmapp", },
- {},
-};
-
-static struct platform_driver audmapp_driver = {
- .probe = audmapp_probe,
- .remove = audmapp_remove,
- .driver = {
- .name = "rcar-audmapp-engine",
- .of_match_table = audmapp_of_match,
- },
-};
-module_platform_driver(audmapp_driver);
-
-MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
-MODULE_DESCRIPTION("Renesas R-Car Audio DMAC peri-peri driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 8ee383d339a5..10fcabad80f3 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -171,8 +171,7 @@ static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
return NULL;
}
-static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
- dma_addr_t slave_addr)
+static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
{
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
const struct shdma_ops *ops = sdev->ops;
@@ -183,25 +182,23 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
ret = ops->set_slave(schan, match, slave_addr, true);
if (ret < 0)
return ret;
-
- slave_id = schan->slave_id;
} else {
- match = slave_id;
+ match = schan->real_slave_id;
}
- if (slave_id < 0 || slave_id >= slave_num)
+ if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
return -EINVAL;
- if (test_and_set_bit(slave_id, shdma_slave_used))
+ if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
return -EBUSY;
ret = ops->set_slave(schan, match, slave_addr, false);
if (ret < 0) {
- clear_bit(slave_id, shdma_slave_used);
+ clear_bit(schan->real_slave_id, shdma_slave_used);
return ret;
}
- schan->slave_id = slave_id;
+ schan->slave_id = schan->real_slave_id;
return 0;
}
@@ -221,10 +218,12 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan)
*/
if (slave) {
/* Legacy mode: .private is set in filter */
- ret = shdma_setup_slave(schan, slave->slave_id, 0);
+ schan->real_slave_id = slave->slave_id;
+ ret = shdma_setup_slave(schan, 0);
if (ret < 0)
goto esetslave;
} else {
+ /* Normal mode: real_slave_id was set by filter */
schan->slave_id = -EINVAL;
}
@@ -258,11 +257,14 @@ esetslave:
/*
* This is the standard shdma filter function to be used as a replacement to the
- * "old" method, using the .private pointer. If for some reason you allocate a
- * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
+ * "old" method, using the .private pointer.
+ * You always have to pass a valid slave id as the argument, old drivers that
+ * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config
+ * need to be updated so we can remove the slave_id field from dma_slave_config.
* parameter. If this filter is used, the slave driver, after calling
* dma_request_channel(), will also have to call dmaengine_slave_config() with
- * .slave_id, .direction, and either .src_addr or .dst_addr set.
+ * .direction, and either .src_addr or .dst_addr set.
+ *
* NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
* capability! If this becomes a requirement, hardware glue drivers, using this
* services would have to provide their own filters, which first would check
@@ -276,7 +278,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
{
struct shdma_chan *schan;
struct shdma_dev *sdev;
- int match = (long)arg;
+ int slave_id = (long)arg;
int ret;
/* Only support channels handled by this driver. */
@@ -284,19 +286,39 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
shdma_alloc_chan_resources)
return false;
- if (match < 0)
+ schan = to_shdma_chan(chan);
+ sdev = to_shdma_dev(chan->device);
+
+ /*
+ * For DT, the schan->slave_id field is generated by the
+ * set_slave function from the slave ID that is passed in
+ * from xlate. For the non-DT case, the slave ID is
+ * directly passed into the filter function by the driver
+ */
+ if (schan->dev->of_node) {
+ ret = sdev->ops->set_slave(schan, slave_id, 0, true);
+ if (ret < 0)
+ return false;
+
+ schan->real_slave_id = schan->slave_id;
+ return true;
+ }
+
+ if (slave_id < 0) {
/* No slave requested - arbitrary channel */
+ dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n");
return true;
+ }
- schan = to_shdma_chan(chan);
- if (!schan->dev->of_node && match >= slave_num)
+ if (slave_id >= slave_num)
return false;
- sdev = to_shdma_dev(schan->dma_chan.device);
- ret = sdev->ops->set_slave(schan, match, 0, true);
+ ret = sdev->ops->set_slave(schan, slave_id, 0, true);
if (ret < 0)
return false;
+ schan->real_slave_id = slave_id;
+
return true;
}
EXPORT_SYMBOL(shdma_chan_filter);
@@ -452,6 +474,8 @@ static void shdma_free_chan_resources(struct dma_chan *chan)
chan->private = NULL;
}
+ schan->real_slave_id = 0;
+
spin_lock_irq(&schan->chan_lock);
list_splice_init(&schan->ld_free, &list);
@@ -764,11 +788,20 @@ static int shdma_config(struct dma_chan *chan,
*/
if (!config)
return -EINVAL;
+
+ /*
+ * overriding the slave_id through dma_slave_config is deprecated,
+ * but possibly some out-of-tree drivers still do it.
+ */
+ if (WARN_ON_ONCE(config->slave_id &&
+ config->slave_id != schan->real_slave_id))
+ schan->real_slave_id = config->slave_id;
+
/*
* We could lock this, but you shouldn't be configuring the
* channel, while using it...
*/
- return shdma_setup_slave(schan, config->slave_id,
+ return shdma_setup_slave(schan,
config->direction == DMA_DEV_TO_MEM ?
config->src_addr : config->dst_addr);
}
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 9f1d4c7dbab8..11707df1a689 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -443,7 +443,7 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
return ret;
}
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
static irqreturn_t sh_dmae_err(int irq, void *data)
{
struct sh_dmae_device *shdev = data;
@@ -689,7 +689,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
const struct sh_dmae_pdata *pdata;
unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
int chan_irq[SH_DMAE_MAX_CHANNELS];
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
unsigned long irqflags = 0;
int errirq;
#endif
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
new file mode 100644
index 000000000000..f705798ce3eb
--- /dev/null
+++ b/drivers/dma/sh/usb-dmac.c
@@ -0,0 +1,910 @@
+/*
+ * Renesas USB DMA Controller Driver
+ *
+ * Copyright (C) 2015 Renesas Electronics Corporation
+ *
+ * based on rcar-dmac.c
+ * Copyright (C) 2014 Renesas Electronics Inc.
+ * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+/*
+ * struct usb_dmac_sg - Descriptor for a hardware transfer
+ * @mem_addr: memory address
+ * @size: transfer size in bytes
+ */
+struct usb_dmac_sg {
+ dma_addr_t mem_addr;
+ u32 size;
+};
+
+/*
+ * struct usb_dmac_desc - USB DMA Transfer Descriptor
+ * @vd: base virtual channel DMA transaction descriptor
+ * @direction: direction of the DMA transfer
+ * @sg_allocated_len: length of allocated sg
+ * @sg_len: length of sg
+ * @sg_index: index of sg
+ * @residue: residue after the DMAC completed a transfer
+ * @node: node for desc_got and desc_freed
+ * @done_cookie: cookie after the DMAC completed a transfer
+ * @sg: information for the transfer
+ */
+struct usb_dmac_desc {
+ struct virt_dma_desc vd;
+ enum dma_transfer_direction direction;
+ unsigned int sg_allocated_len;
+ unsigned int sg_len;
+ unsigned int sg_index;
+ u32 residue;
+ struct list_head node;
+ dma_cookie_t done_cookie;
+ struct usb_dmac_sg sg[0];
+};
+
+#define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd)
+
+/*
+ * struct usb_dmac_chan - USB DMA Controller Channel
+ * @vc: base virtual DMA channel object
+ * @iomem: channel I/O memory base
+ * @index: index of this channel in the controller
+ * @irq: irq number of this channel
+ * @desc: the current descriptor
+ * @descs_allocated: number of descriptors allocated
+ * @desc_got: got descriptors
+ * @desc_freed: freed descriptors after the DMAC completed a transfer
+ */
+struct usb_dmac_chan {
+ struct virt_dma_chan vc;
+ void __iomem *iomem;
+ unsigned int index;
+ int irq;
+ struct usb_dmac_desc *desc;
+ int descs_allocated;
+ struct list_head desc_got;
+ struct list_head desc_freed;
+};
+
+#define to_usb_dmac_chan(c) container_of(c, struct usb_dmac_chan, vc.chan)
+
+/*
+ * struct usb_dmac - USB DMA Controller
+ * @engine: base DMA engine object
+ * @dev: the hardware device
+ * @iomem: remapped I/O memory base
+ * @n_channels: number of available channels
+ * @channels: array of DMAC channels
+ */
+struct usb_dmac {
+ struct dma_device engine;
+ struct device *dev;
+ void __iomem *iomem;
+
+ unsigned int n_channels;
+ struct usb_dmac_chan *channels;
+};
+
+#define to_usb_dmac(d) container_of(d, struct usb_dmac, engine)
+
+/* -----------------------------------------------------------------------------
+ * Registers
+ */
+
+#define USB_DMAC_CHAN_OFFSET(i) (0x20 + 0x20 * (i))
+
+#define USB_DMASWR 0x0008
+#define USB_DMASWR_SWR (1 << 0)
+#define USB_DMAOR 0x0060
+#define USB_DMAOR_AE (1 << 2)
+#define USB_DMAOR_DME (1 << 0)
+
+#define USB_DMASAR 0x0000
+#define USB_DMADAR 0x0004
+#define USB_DMATCR 0x0008
+#define USB_DMATCR_MASK 0x00ffffff
+#define USB_DMACHCR 0x0014
+#define USB_DMACHCR_FTE (1 << 24)
+#define USB_DMACHCR_NULLE (1 << 16)
+#define USB_DMACHCR_NULL (1 << 12)
+#define USB_DMACHCR_TS_8B ((0 << 7) | (0 << 6))
+#define USB_DMACHCR_TS_16B ((0 << 7) | (1 << 6))
+#define USB_DMACHCR_TS_32B ((1 << 7) | (0 << 6))
+#define USB_DMACHCR_IE (1 << 5)
+#define USB_DMACHCR_SP (1 << 2)
+#define USB_DMACHCR_TE (1 << 1)
+#define USB_DMACHCR_DE (1 << 0)
+#define USB_DMATEND 0x0018
+
+/* Hardcode the xfer_shift to 5 (32bytes) */
+#define USB_DMAC_XFER_SHIFT 5
+#define USB_DMAC_XFER_SIZE (1 << USB_DMAC_XFER_SHIFT)
+#define USB_DMAC_CHCR_TS USB_DMACHCR_TS_32B
+#define USB_DMAC_SLAVE_BUSWIDTH DMA_SLAVE_BUSWIDTH_32_BYTES
+
+/* for descriptors */
+#define USB_DMAC_INITIAL_NR_DESC 16
+#define USB_DMAC_INITIAL_NR_SG 8
+
+/* -----------------------------------------------------------------------------
+ * Device access
+ */
+
+static void usb_dmac_write(struct usb_dmac *dmac, u32 reg, u32 data)
+{
+ writel(data, dmac->iomem + reg);
+}
+
+static u32 usb_dmac_read(struct usb_dmac *dmac, u32 reg)
+{
+ return readl(dmac->iomem + reg);
+}
+
+static u32 usb_dmac_chan_read(struct usb_dmac_chan *chan, u32 reg)
+{
+ return readl(chan->iomem + reg);
+}
+
+static void usb_dmac_chan_write(struct usb_dmac_chan *chan, u32 reg, u32 data)
+{
+ writel(data, chan->iomem + reg);
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization and configuration
+ */
+
+static bool usb_dmac_chan_is_busy(struct usb_dmac_chan *chan)
+{
+ u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
+
+ return (chcr & (USB_DMACHCR_DE | USB_DMACHCR_TE)) == USB_DMACHCR_DE;
+}
+
+static u32 usb_dmac_calc_tend(u32 size)
+{
+ /*
+ * Please refer to the Figure "Example of Final Transaction Valid
+ * Data Transfer Enable (EDTEN) Setting" in the data sheet.
+ */
+ return 0xffffffff << (32 - (size % USB_DMAC_XFER_SIZE ? :
+ USB_DMAC_XFER_SIZE));
+}
+
+/* This function is already held by vc.lock */
+static void usb_dmac_chan_start_sg(struct usb_dmac_chan *chan,
+ unsigned int index)
+{
+ struct usb_dmac_desc *desc = chan->desc;
+ struct usb_dmac_sg *sg = desc->sg + index;
+ dma_addr_t src_addr = 0, dst_addr = 0;
+
+ WARN_ON_ONCE(usb_dmac_chan_is_busy(chan));
+
+ if (desc->direction == DMA_DEV_TO_MEM)
+ dst_addr = sg->mem_addr;
+ else
+ src_addr = sg->mem_addr;
+
+ dev_dbg(chan->vc.chan.device->dev,
+ "chan%u: queue sg %p: %u@%pad -> %pad\n",
+ chan->index, sg, sg->size, &src_addr, &dst_addr);
+
+ usb_dmac_chan_write(chan, USB_DMASAR, src_addr & 0xffffffff);
+ usb_dmac_chan_write(chan, USB_DMADAR, dst_addr & 0xffffffff);
+ usb_dmac_chan_write(chan, USB_DMATCR,
+ DIV_ROUND_UP(sg->size, USB_DMAC_XFER_SIZE));
+ usb_dmac_chan_write(chan, USB_DMATEND, usb_dmac_calc_tend(sg->size));
+
+ usb_dmac_chan_write(chan, USB_DMACHCR, USB_DMAC_CHCR_TS |
+ USB_DMACHCR_NULLE | USB_DMACHCR_IE | USB_DMACHCR_DE);
+}
+
+/* This function is already held by vc.lock */
+static void usb_dmac_chan_start_desc(struct usb_dmac_chan *chan)
+{
+ struct virt_dma_desc *vd;
+
+ vd = vchan_next_desc(&chan->vc);
+ if (!vd) {
+ chan->desc = NULL;
+ return;
+ }
+
+ /*
+ * Remove this request from vc->desc_issued. Otherwise, this driver
+ * will get the previous value from vchan_next_desc() after a transfer
+ * was completed.
+ */
+ list_del(&vd->node);
+
+ chan->desc = to_usb_dmac_desc(vd);
+ chan->desc->sg_index = 0;
+ usb_dmac_chan_start_sg(chan, 0);
+}
+
+static int usb_dmac_init(struct usb_dmac *dmac)
+{
+ u16 dmaor;
+
+ /* Clear all channels and enable the DMAC globally. */
+ usb_dmac_write(dmac, USB_DMAOR, USB_DMAOR_DME);
+
+ dmaor = usb_dmac_read(dmac, USB_DMAOR);
+ if ((dmaor & (USB_DMAOR_AE | USB_DMAOR_DME)) != USB_DMAOR_DME) {
+ dev_warn(dmac->dev, "DMAOR initialization failed.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Descriptors allocation and free
+ */
+static int usb_dmac_desc_alloc(struct usb_dmac_chan *chan, unsigned int sg_len,
+ gfp_t gfp)
+{
+ struct usb_dmac_desc *desc;
+ unsigned long flags;
+
+ desc = kzalloc(sizeof(*desc) + sg_len * sizeof(desc->sg[0]), gfp);
+ if (!desc)
+ return -ENOMEM;
+
+ desc->sg_allocated_len = sg_len;
+ INIT_LIST_HEAD(&desc->node);
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ list_add_tail(&desc->node, &chan->desc_freed);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ return 0;
+}
+
+static void usb_dmac_desc_free(struct usb_dmac_chan *chan)
+{
+ struct usb_dmac_desc *desc, *_desc;
+ LIST_HEAD(list);
+
+ list_splice_init(&chan->desc_freed, &list);
+ list_splice_init(&chan->desc_got, &list);
+
+ list_for_each_entry_safe(desc, _desc, &list, node) {
+ list_del(&desc->node);
+ kfree(desc);
+ }
+ chan->descs_allocated = 0;
+}
+
+static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan,
+ unsigned int sg_len, gfp_t gfp)
+{
+ struct usb_dmac_desc *desc = NULL;
+ unsigned long flags;
+
+ /* Get a freed descritpor */
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ list_for_each_entry(desc, &chan->desc_freed, node) {
+ if (sg_len <= desc->sg_allocated_len) {
+ list_move_tail(&desc->node, &chan->desc_got);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+ return desc;
+ }
+ }
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+
+ /* Allocate a new descriptor */
+ if (!usb_dmac_desc_alloc(chan, sg_len, gfp)) {
+ /* If allocated the desc, it was added to tail of the list */
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ desc = list_last_entry(&chan->desc_freed, struct usb_dmac_desc,
+ node);
+ list_move_tail(&desc->node, &chan->desc_got);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+ return desc;
+ }
+
+ return NULL;
+}
+
+static void usb_dmac_desc_put(struct usb_dmac_chan *chan,
+ struct usb_dmac_desc *desc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->vc.lock, flags);
+ list_move_tail(&desc->node, &chan->desc_freed);
+ spin_unlock_irqrestore(&chan->vc.lock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * Stop and reset
+ */
+
+static void usb_dmac_soft_reset(struct usb_dmac_chan *uchan)
+{
+ struct dma_chan *chan = &uchan->vc.chan;
+ struct usb_dmac *dmac = to_usb_dmac(chan->device);
+ int i;
+
+ /* Don't issue soft reset if any one of channels is busy */
+ for (i = 0; i < dmac->n_channels; ++i) {
+ if (usb_dmac_chan_is_busy(uchan))
+ return;
+ }
+
+ usb_dmac_write(dmac, USB_DMAOR, 0);
+ usb_dmac_write(dmac, USB_DMASWR, USB_DMASWR_SWR);
+ udelay(100);
+ usb_dmac_write(dmac, USB_DMASWR, 0);
+ usb_dmac_write(dmac, USB_DMAOR, 1);
+}
+
+static void usb_dmac_chan_halt(struct usb_dmac_chan *chan)
+{
+ u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
+
+ chcr &= ~(USB_DMACHCR_IE | USB_DMACHCR_TE | USB_DMACHCR_DE);
+ usb_dmac_chan_write(chan, USB_DMACHCR, chcr);
+
+ usb_dmac_soft_reset(chan);
+}
+
+static void usb_dmac_stop(struct usb_dmac *dmac)
+{
+ usb_dmac_write(dmac, USB_DMAOR, 0);
+}
+
+/* -----------------------------------------------------------------------------
+ * DMA engine operations
+ */
+
+static int usb_dmac_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ int ret;
+
+ while (uchan->descs_allocated < USB_DMAC_INITIAL_NR_DESC) {
+ ret = usb_dmac_desc_alloc(uchan, USB_DMAC_INITIAL_NR_SG,
+ GFP_KERNEL);
+ if (ret < 0) {
+ usb_dmac_desc_free(uchan);
+ return ret;
+ }
+ uchan->descs_allocated++;
+ }
+
+ return pm_runtime_get_sync(chan->device->dev);
+}
+
+static void usb_dmac_free_chan_resources(struct dma_chan *chan)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ unsigned long flags;
+
+ /* Protect against ISR */
+ spin_lock_irqsave(&uchan->vc.lock, flags);
+ usb_dmac_chan_halt(uchan);
+ spin_unlock_irqrestore(&uchan->vc.lock, flags);
+
+ usb_dmac_desc_free(uchan);
+ vchan_free_chan_resources(&uchan->vc);
+
+ pm_runtime_put(chan->device->dev);
+}
+
+static struct dma_async_tx_descriptor *
+usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long dma_flags, void *context)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ struct usb_dmac_desc *desc;
+ struct scatterlist *sg;
+ int i;
+
+ if (!sg_len) {
+ dev_warn(chan->device->dev,
+ "%s: bad parameter: len=%d\n", __func__, sg_len);
+ return NULL;
+ }
+
+ desc = usb_dmac_desc_get(uchan, sg_len, GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->direction = dir;
+ desc->sg_len = sg_len;
+ for_each_sg(sgl, sg, sg_len, i) {
+ desc->sg[i].mem_addr = sg_dma_address(sg);
+ desc->sg[i].size = sg_dma_len(sg);
+ }
+
+ return vchan_tx_prep(&uchan->vc, &desc->vd, dma_flags);
+}
+
+static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ struct usb_dmac_desc *desc;
+ unsigned long flags;
+ LIST_HEAD(head);
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&uchan->vc.lock, flags);
+ usb_dmac_chan_halt(uchan);
+ vchan_get_all_descriptors(&uchan->vc, &head);
+ if (uchan->desc)
+ uchan->desc = NULL;
+ list_splice_init(&uchan->desc_got, &list);
+ list_for_each_entry(desc, &list, node)
+ list_move_tail(&desc->node, &uchan->desc_freed);
+ spin_unlock_irqrestore(&uchan->vc.lock, flags);
+ vchan_dma_desc_free_list(&uchan->vc, &head);
+
+ return 0;
+}
+
+static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan,
+ struct usb_dmac_desc *desc,
+ int sg_index)
+{
+ struct usb_dmac_sg *sg = desc->sg + sg_index;
+ u32 mem_addr = sg->mem_addr & 0xffffffff;
+ unsigned int residue = sg->size;
+
+ /*
+ * We cannot use USB_DMATCR to calculate residue because USB_DMATCR
+ * has unsuited value to calculate.
+ */
+ if (desc->direction == DMA_DEV_TO_MEM)
+ residue -= usb_dmac_chan_read(chan, USB_DMADAR) - mem_addr;
+ else
+ residue -= usb_dmac_chan_read(chan, USB_DMASAR) - mem_addr;
+
+ return residue;
+}
+
+static u32 usb_dmac_chan_get_residue_if_complete(struct usb_dmac_chan *chan,
+ dma_cookie_t cookie)
+{
+ struct usb_dmac_desc *desc;
+ u32 residue = 0;
+
+ list_for_each_entry_reverse(desc, &chan->desc_freed, node) {
+ if (desc->done_cookie == cookie) {
+ residue = desc->residue;
+ break;
+ }
+ }
+
+ return residue;
+}
+
+static u32 usb_dmac_chan_get_residue(struct usb_dmac_chan *chan,
+ dma_cookie_t cookie)
+{
+ u32 residue = 0;
+ struct virt_dma_desc *vd;
+ struct usb_dmac_desc *desc = chan->desc;
+ int i;
+
+ if (!desc) {
+ vd = vchan_find_desc(&chan->vc, cookie);
+ if (!vd)
+ return 0;
+ desc = to_usb_dmac_desc(vd);
+ }
+
+ /* Compute the size of all usb_dmac_sg still to be transferred */
+ for (i = desc->sg_index + 1; i < desc->sg_len; i++)
+ residue += desc->sg[i].size;
+
+ /* Add the residue for the current sg */
+ residue += usb_dmac_get_current_residue(chan, desc, desc->sg_index);
+
+ return residue;
+}
+
+static enum dma_status usb_dmac_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ enum dma_status status;
+ unsigned int residue = 0;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ /* a client driver will get residue after DMA_COMPLETE */
+ if (!txstate)
+ return status;
+
+ spin_lock_irqsave(&uchan->vc.lock, flags);
+ if (status == DMA_COMPLETE)
+ residue = usb_dmac_chan_get_residue_if_complete(uchan, cookie);
+ else
+ residue = usb_dmac_chan_get_residue(uchan, cookie);
+ spin_unlock_irqrestore(&uchan->vc.lock, flags);
+
+ dma_set_residue(txstate, residue);
+
+ return status;
+}
+
+static void usb_dmac_issue_pending(struct dma_chan *chan)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&uchan->vc.lock, flags);
+ if (vchan_issue_pending(&uchan->vc) && !uchan->desc)
+ usb_dmac_chan_start_desc(uchan);
+ spin_unlock_irqrestore(&uchan->vc.lock, flags);
+}
+
+static void usb_dmac_virt_desc_free(struct virt_dma_desc *vd)
+{
+ struct usb_dmac_desc *desc = to_usb_dmac_desc(vd);
+ struct usb_dmac_chan *chan = to_usb_dmac_chan(vd->tx.chan);
+
+ usb_dmac_desc_put(chan, desc);
+}
+
+/* -----------------------------------------------------------------------------
+ * IRQ handling
+ */
+
+static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
+{
+ struct usb_dmac_desc *desc = chan->desc;
+
+ BUG_ON(!desc);
+
+ if (++desc->sg_index < desc->sg_len) {
+ usb_dmac_chan_start_sg(chan, desc->sg_index);
+ } else {
+ desc->residue = usb_dmac_get_current_residue(chan, desc,
+ desc->sg_index - 1);
+ desc->done_cookie = desc->vd.tx.cookie;
+ vchan_cookie_complete(&desc->vd);
+
+ /* Restart the next transfer if this driver has a next desc */
+ usb_dmac_chan_start_desc(chan);
+ }
+}
+
+static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
+{
+ struct usb_dmac_chan *chan = dev;
+ irqreturn_t ret = IRQ_NONE;
+ u32 mask = USB_DMACHCR_TE;
+ u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
+ u32 chcr;
+
+ spin_lock(&chan->vc.lock);
+
+ chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
+ if (chcr & check_bits)
+ mask |= USB_DMACHCR_DE | check_bits;
+ if (chcr & USB_DMACHCR_NULL) {
+ /* An interruption of TE will happen after we set FTE */
+ mask |= USB_DMACHCR_NULL;
+ chcr |= USB_DMACHCR_FTE;
+ ret |= IRQ_HANDLED;
+ }
+ usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
+
+ if (chcr & check_bits) {
+ usb_dmac_isr_transfer_end(chan);
+ ret |= IRQ_HANDLED;
+ }
+
+ spin_unlock(&chan->vc.lock);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * OF xlate and channel filter
+ */
+
+static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
+ struct of_phandle_args *dma_spec = arg;
+
+ if (dma_spec->np != chan->device->dev->of_node)
+ return false;
+
+ /* USB-DMAC should be used with fixed usb controller's FIFO */
+ if (uchan->index != dma_spec->args[0])
+ return false;
+
+ return true;
+}
+
+static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct usb_dmac_chan *uchan;
+ struct dma_chan *chan;
+ dma_cap_mask_t mask;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ /* Only slave DMA channels can be allocated via DT */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_channel(mask, usb_dmac_chan_filter, dma_spec);
+ if (!chan)
+ return NULL;
+
+ uchan = to_usb_dmac_chan(chan);
+
+ return chan;
+}
+
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
+
+static int usb_dmac_runtime_suspend(struct device *dev)
+{
+ struct usb_dmac *dmac = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < dmac->n_channels; ++i)
+ usb_dmac_chan_halt(&dmac->channels[i]);
+
+ return 0;
+}
+
+static int usb_dmac_runtime_resume(struct device *dev)
+{
+ struct usb_dmac *dmac = dev_get_drvdata(dev);
+
+ return usb_dmac_init(dmac);
+}
+
+static const struct dev_pm_ops usb_dmac_pm = {
+ SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
+ NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe and remove
+ */
+
+static int usb_dmac_chan_probe(struct usb_dmac *dmac,
+ struct usb_dmac_chan *uchan,
+ unsigned int index)
+{
+ struct platform_device *pdev = to_platform_device(dmac->dev);
+ char pdev_irqname[5];
+ char *irqname;
+ int ret;
+
+ uchan->index = index;
+ uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index);
+
+ /* Request the channel interrupt. */
+ sprintf(pdev_irqname, "ch%u", index);
+ uchan->irq = platform_get_irq_byname(pdev, pdev_irqname);
+ if (uchan->irq < 0) {
+ dev_err(dmac->dev, "no IRQ specified for channel %u\n", index);
+ return -ENODEV;
+ }
+
+ irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u",
+ dev_name(dmac->dev), index);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dmac->dev, uchan->irq, usb_dmac_isr_channel,
+ IRQF_SHARED, irqname, uchan);
+ if (ret) {
+ dev_err(dmac->dev, "failed to request IRQ %u (%d)\n",
+ uchan->irq, ret);
+ return ret;
+ }
+
+ uchan->vc.desc_free = usb_dmac_virt_desc_free;
+ vchan_init(&uchan->vc, &dmac->engine);
+ INIT_LIST_HEAD(&uchan->desc_freed);
+ INIT_LIST_HEAD(&uchan->desc_got);
+
+ return 0;
+}
+
+static int usb_dmac_parse_of(struct device *dev, struct usb_dmac *dmac)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels);
+ if (ret < 0) {
+ dev_err(dev, "unable to read dma-channels property\n");
+ return ret;
+ }
+
+ if (dmac->n_channels <= 0 || dmac->n_channels >= 100) {
+ dev_err(dev, "invalid number of channels %u\n",
+ dmac->n_channels);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int usb_dmac_probe(struct platform_device *pdev)
+{
+ const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
+ struct dma_device *engine;
+ struct usb_dmac *dmac;
+ struct resource *mem;
+ unsigned int i;
+ int ret;
+
+ dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+ if (!dmac)
+ return -ENOMEM;
+
+ dmac->dev = &pdev->dev;
+ platform_set_drvdata(pdev, dmac);
+
+ ret = usb_dmac_parse_of(&pdev->dev, dmac);
+ if (ret < 0)
+ return ret;
+
+ dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
+ sizeof(*dmac->channels), GFP_KERNEL);
+ if (!dmac->channels)
+ return -ENOMEM;
+
+ /* Request resources. */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dmac->iomem))
+ return PTR_ERR(dmac->iomem);
+
+ /* Enable runtime PM and initialize the device. */
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = usb_dmac_init(dmac);
+ pm_runtime_put(&pdev->dev);
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to reset device\n");
+ goto error;
+ }
+
+ /* Initialize the channels. */
+ INIT_LIST_HEAD(&dmac->engine.channels);
+
+ for (i = 0; i < dmac->n_channels; ++i) {
+ ret = usb_dmac_chan_probe(dmac, &dmac->channels[i], i);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Register the DMAC as a DMA provider for DT. */
+ ret = of_dma_controller_register(pdev->dev.of_node, usb_dmac_of_xlate,
+ NULL);
+ if (ret < 0)
+ goto error;
+
+ /*
+ * Register the DMA engine device.
+ *
+ * Default transfer size of 32 bytes requires 32-byte alignment.
+ */
+ engine = &dmac->engine;
+ dma_cap_set(DMA_SLAVE, engine->cap_mask);
+
+ engine->dev = &pdev->dev;
+
+ engine->src_addr_widths = widths;
+ engine->dst_addr_widths = widths;
+ engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+ engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+ engine->device_alloc_chan_resources = usb_dmac_alloc_chan_resources;
+ engine->device_free_chan_resources = usb_dmac_free_chan_resources;
+ engine->device_prep_slave_sg = usb_dmac_prep_slave_sg;
+ engine->device_terminate_all = usb_dmac_chan_terminate_all;
+ engine->device_tx_status = usb_dmac_tx_status;
+ engine->device_issue_pending = usb_dmac_issue_pending;
+
+ ret = dma_async_device_register(engine);
+ if (ret < 0)
+ goto error;
+
+ return 0;
+
+error:
+ of_dma_controller_free(pdev->dev.of_node);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static void usb_dmac_chan_remove(struct usb_dmac *dmac,
+ struct usb_dmac_chan *uchan)
+{
+ usb_dmac_chan_halt(uchan);
+ devm_free_irq(dmac->dev, uchan->irq, uchan);
+}
+
+static int usb_dmac_remove(struct platform_device *pdev)
+{
+ struct usb_dmac *dmac = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < dmac->n_channels; ++i)
+ usb_dmac_chan_remove(dmac, &dmac->channels[i]);
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&dmac->engine);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static void usb_dmac_shutdown(struct platform_device *pdev)
+{
+ struct usb_dmac *dmac = platform_get_drvdata(pdev);
+
+ usb_dmac_stop(dmac);
+}
+
+static const struct of_device_id usb_dmac_of_ids[] = {
+ { .compatible = "renesas,usb-dmac", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, usb_dmac_of_ids);
+
+static struct platform_driver usb_dmac_driver = {
+ .driver = {
+ .pm = &usb_dmac_pm,
+ .name = "usb-dmac",
+ .of_match_table = usb_dmac_of_ids,
+ },
+ .probe = usb_dmac_probe,
+ .remove = usb_dmac_remove,
+ .shutdown = usb_dmac_shutdown,
+};
+
+module_platform_driver(usb_dmac_driver);
+
+MODULE_DESCRIPTION("Renesas USB DMA Controller Driver");
+MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index d0086e9f2082..a1afda43b8ef 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -896,7 +896,7 @@ static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
};
-static struct of_device_id sirfsoc_dma_match[] = {
+static const struct of_device_id sirfsoc_dma_match[] = {
{ .compatible = "sirf,prima2-dmac", },
{ .compatible = "sirf,marco-dmac", },
{},
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 1332b1d4d541..3c10f034d4b9 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2514,7 +2514,8 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
sg_dma_len(&dst_sg) = size;
sg_dma_len(&src_sg) = size;
- return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
+ return d40_prep_sg(chan, &src_sg, &dst_sg, 1,
+ DMA_MEM_TO_MEM, dma_flags);
}
static struct dma_async_tx_descriptor *
@@ -2526,7 +2527,8 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
if (dst_nents != src_nents)
return NULL;
- return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
+ return d40_prep_sg(chan, src_sg, dst_sg, src_nents,
+ DMA_MEM_TO_MEM, dma_flags);
}
static struct dma_async_tx_descriptor *
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 7ebcf9bec698..11e536586812 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -796,11 +796,6 @@ static void sun6i_dma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&vchan->vc.lock, flags);
}
-static int sun6i_dma_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
static void sun6i_dma_free_chan_resources(struct dma_chan *chan)
{
struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
@@ -896,7 +891,7 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = {
.nr_max_vchans = 37,
};
-static struct of_device_id sun6i_dma_match[] = {
+static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg },
{ .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg },
{ /* sentinel */ }
@@ -957,7 +952,6 @@ static int sun6i_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);
INIT_LIST_HEAD(&sdc->slave.channels);
- sdc->slave.device_alloc_chan_resources = sun6i_dma_alloc_chan_resources;
sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources;
sdc->slave.device_tx_status = sun6i_dma_tx_status;
sdc->slave.device_issue_pending = sun6i_dma_issue_pending;
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
new file mode 100755
index 000000000000..f52e37502254
--- /dev/null
+++ b/drivers/dma/xgene-dma.c
@@ -0,0 +1,2089 @@
+/*
+ * Applied Micro X-Gene SoC DMA engine Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Authors: Rameshwar Prasad Sahu <rsahu@apm.com>
+ * Loc Ho <lho@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * NOTE: PM support is currently not available.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include "dmaengine.h"
+
+/* X-Gene DMA ring csr registers and bit definations */
+#define XGENE_DMA_RING_CONFIG 0x04
+#define XGENE_DMA_RING_ENABLE BIT(31)
+#define XGENE_DMA_RING_ID 0x08
+#define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31))
+#define XGENE_DMA_RING_ID_BUF 0x0C
+#define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21))
+#define XGENE_DMA_RING_THRESLD0_SET1 0x30
+#define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64
+#define XGENE_DMA_RING_THRESLD1_SET1 0x34
+#define XGENE_DMA_RING_THRESLD1_SET1_VAL 0xC8
+#define XGENE_DMA_RING_HYSTERESIS 0x68
+#define XGENE_DMA_RING_HYSTERESIS_VAL 0xFFFFFFFF
+#define XGENE_DMA_RING_STATE 0x6C
+#define XGENE_DMA_RING_STATE_WR_BASE 0x70
+#define XGENE_DMA_RING_NE_INT_MODE 0x017C
+#define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \
+ ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v)))
+#define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \
+ ((m) &= (~BIT(31 - (v))))
+#define XGENE_DMA_RING_CLKEN 0xC208
+#define XGENE_DMA_RING_SRST 0xC200
+#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
+#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
+#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
+#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1)
+#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
+#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
+#define XGENE_DMA_RING_CMD_OFFSET 0x2C
+#define XGENE_DMA_RING_CMD_BASE_OFFSET(v) ((v) << 6)
+#define XGENE_DMA_RING_COHERENT_SET(m) \
+ (((u32 *)(m))[2] |= BIT(4))
+#define XGENE_DMA_RING_ADDRL_SET(m, v) \
+ (((u32 *)(m))[2] |= (((v) >> 8) << 5))
+#define XGENE_DMA_RING_ADDRH_SET(m, v) \
+ (((u32 *)(m))[3] |= ((v) >> 35))
+#define XGENE_DMA_RING_ACCEPTLERR_SET(m) \
+ (((u32 *)(m))[3] |= BIT(19))
+#define XGENE_DMA_RING_SIZE_SET(m, v) \
+ (((u32 *)(m))[3] |= ((v) << 23))
+#define XGENE_DMA_RING_RECOMBBUF_SET(m) \
+ (((u32 *)(m))[3] |= BIT(27))
+#define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \
+ (((u32 *)(m))[3] |= (0x7 << 28))
+#define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \
+ (((u32 *)(m))[4] |= 0x3)
+#define XGENE_DMA_RING_SELTHRSH_SET(m) \
+ (((u32 *)(m))[4] |= BIT(3))
+#define XGENE_DMA_RING_TYPE_SET(m, v) \
+ (((u32 *)(m))[4] |= ((v) << 19))
+
+/* X-Gene DMA device csr registers and bit definitions */
+#define XGENE_DMA_IPBRR 0x0
+#define XGENE_DMA_DEV_ID_RD(v) ((v) & 0x00000FFF)
+#define XGENE_DMA_BUS_ID_RD(v) (((v) >> 12) & 3)
+#define XGENE_DMA_REV_NO_RD(v) (((v) >> 14) & 3)
+#define XGENE_DMA_GCR 0x10
+#define XGENE_DMA_CH_SETUP(v) \
+ ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF)
+#define XGENE_DMA_ENABLE(v) ((v) |= BIT(31))
+#define XGENE_DMA_DISABLE(v) ((v) &= ~BIT(31))
+#define XGENE_DMA_RAID6_CONT 0x14
+#define XGENE_DMA_RAID6_MULTI_CTRL(v) ((v) << 24)
+#define XGENE_DMA_INT 0x70
+#define XGENE_DMA_INT_MASK 0x74
+#define XGENE_DMA_INT_ALL_MASK 0xFFFFFFFF
+#define XGENE_DMA_INT_ALL_UNMASK 0x0
+#define XGENE_DMA_INT_MASK_SHIFT 0x14
+#define XGENE_DMA_RING_INT0_MASK 0x90A0
+#define XGENE_DMA_RING_INT1_MASK 0x90A8
+#define XGENE_DMA_RING_INT2_MASK 0x90B0
+#define XGENE_DMA_RING_INT3_MASK 0x90B8
+#define XGENE_DMA_RING_INT4_MASK 0x90C0
+#define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0
+#define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF
+#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
+#define XGENE_DMA_BLK_MEM_RDY 0xD074
+#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
+
+/* X-Gene SoC EFUSE csr register and bit defination */
+#define XGENE_SOC_JTAG1_SHADOW 0x18
+#define XGENE_DMA_PQ_DISABLE_MASK BIT(13)
+
+/* X-Gene DMA Descriptor format */
+#define XGENE_DMA_DESC_NV_BIT BIT_ULL(50)
+#define XGENE_DMA_DESC_IN_BIT BIT_ULL(55)
+#define XGENE_DMA_DESC_C_BIT BIT_ULL(63)
+#define XGENE_DMA_DESC_DR_BIT BIT_ULL(61)
+#define XGENE_DMA_DESC_ELERR_POS 46
+#define XGENE_DMA_DESC_RTYPE_POS 56
+#define XGENE_DMA_DESC_LERR_POS 60
+#define XGENE_DMA_DESC_FLYBY_POS 4
+#define XGENE_DMA_DESC_BUFLEN_POS 48
+#define XGENE_DMA_DESC_HOENQ_NUM_POS 48
+
+#define XGENE_DMA_DESC_NV_SET(m) \
+ (((u64 *)(m))[0] |= XGENE_DMA_DESC_NV_BIT)
+#define XGENE_DMA_DESC_IN_SET(m) \
+ (((u64 *)(m))[0] |= XGENE_DMA_DESC_IN_BIT)
+#define XGENE_DMA_DESC_RTYPE_SET(m, v) \
+ (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_RTYPE_POS))
+#define XGENE_DMA_DESC_BUFADDR_SET(m, v) \
+ (((u64 *)(m))[0] |= (v))
+#define XGENE_DMA_DESC_BUFLEN_SET(m, v) \
+ (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_BUFLEN_POS))
+#define XGENE_DMA_DESC_C_SET(m) \
+ (((u64 *)(m))[1] |= XGENE_DMA_DESC_C_BIT)
+#define XGENE_DMA_DESC_FLYBY_SET(m, v) \
+ (((u64 *)(m))[2] |= ((v) << XGENE_DMA_DESC_FLYBY_POS))
+#define XGENE_DMA_DESC_MULTI_SET(m, v, i) \
+ (((u64 *)(m))[2] |= ((u64)(v) << (((i) + 1) * 8)))
+#define XGENE_DMA_DESC_DR_SET(m) \
+ (((u64 *)(m))[2] |= XGENE_DMA_DESC_DR_BIT)
+#define XGENE_DMA_DESC_DST_ADDR_SET(m, v) \
+ (((u64 *)(m))[3] |= (v))
+#define XGENE_DMA_DESC_H0ENQ_NUM_SET(m, v) \
+ (((u64 *)(m))[3] |= ((u64)(v) << XGENE_DMA_DESC_HOENQ_NUM_POS))
+#define XGENE_DMA_DESC_ELERR_RD(m) \
+ (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3)
+#define XGENE_DMA_DESC_LERR_RD(m) \
+ (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7)
+#define XGENE_DMA_DESC_STATUS(elerr, lerr) \
+ (((elerr) << 4) | (lerr))
+
+/* X-Gene DMA descriptor empty s/w signature */
+#define XGENE_DMA_DESC_EMPTY_INDEX 0
+#define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL
+#define XGENE_DMA_DESC_SET_EMPTY(m) \
+ (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] = \
+ XGENE_DMA_DESC_EMPTY_SIGNATURE)
+#define XGENE_DMA_DESC_IS_EMPTY(m) \
+ (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] == \
+ XGENE_DMA_DESC_EMPTY_SIGNATURE)
+
+/* X-Gene DMA configurable parameters defines */
+#define XGENE_DMA_RING_NUM 512
+#define XGENE_DMA_BUFNUM 0x0
+#define XGENE_DMA_CPU_BUFNUM 0x18
+#define XGENE_DMA_RING_OWNER_DMA 0x03
+#define XGENE_DMA_RING_OWNER_CPU 0x0F
+#define XGENE_DMA_RING_TYPE_REGULAR 0x01
+#define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */
+#define XGENE_DMA_RING_NUM_CONFIG 5
+#define XGENE_DMA_MAX_CHANNEL 4
+#define XGENE_DMA_XOR_CHANNEL 0
+#define XGENE_DMA_PQ_CHANNEL 1
+#define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */
+#define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */
+#define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */
+#define XGENE_DMA_MAX_XOR_SRC 5
+#define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0
+#define XGENE_DMA_INVALID_LEN_CODE 0x7800
+
+/* X-Gene DMA descriptor error codes */
+#define ERR_DESC_AXI 0x01
+#define ERR_BAD_DESC 0x02
+#define ERR_READ_DATA_AXI 0x03
+#define ERR_WRITE_DATA_AXI 0x04
+#define ERR_FBP_TIMEOUT 0x05
+#define ERR_ECC 0x06
+#define ERR_DIFF_SIZE 0x08
+#define ERR_SCT_GAT_LEN 0x09
+#define ERR_CRC_ERR 0x11
+#define ERR_CHKSUM 0x12
+#define ERR_DIF 0x13
+
+/* X-Gene DMA error interrupt codes */
+#define ERR_DIF_SIZE_INT 0x0
+#define ERR_GS_ERR_INT 0x1
+#define ERR_FPB_TIMEO_INT 0x2
+#define ERR_WFIFO_OVF_INT 0x3
+#define ERR_RFIFO_OVF_INT 0x4
+#define ERR_WR_TIMEO_INT 0x5
+#define ERR_RD_TIMEO_INT 0x6
+#define ERR_WR_ERR_INT 0x7
+#define ERR_RD_ERR_INT 0x8
+#define ERR_BAD_DESC_INT 0x9
+#define ERR_DESC_DST_INT 0xA
+#define ERR_DESC_SRC_INT 0xB
+
+/* X-Gene DMA flyby operation code */
+#define FLYBY_2SRC_XOR 0x8
+#define FLYBY_3SRC_XOR 0x9
+#define FLYBY_4SRC_XOR 0xA
+#define FLYBY_5SRC_XOR 0xB
+
+/* X-Gene DMA SW descriptor flags */
+#define XGENE_DMA_FLAG_64B_DESC BIT(0)
+
+/* Define to dump X-Gene DMA descriptor */
+#define XGENE_DMA_DESC_DUMP(desc, m) \
+ print_hex_dump(KERN_ERR, (m), \
+ DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0)
+
+#define to_dma_desc_sw(tx) \
+ container_of(tx, struct xgene_dma_desc_sw, tx)
+#define to_dma_chan(dchan) \
+ container_of(dchan, struct xgene_dma_chan, dma_chan)
+
+#define chan_dbg(chan, fmt, arg...) \
+ dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
+#define chan_err(chan, fmt, arg...) \
+ dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
+
+struct xgene_dma_desc_hw {
+ u64 m0;
+ u64 m1;
+ u64 m2;
+ u64 m3;
+};
+
+enum xgene_dma_ring_cfgsize {
+ XGENE_DMA_RING_CFG_SIZE_512B,
+ XGENE_DMA_RING_CFG_SIZE_2KB,
+ XGENE_DMA_RING_CFG_SIZE_16KB,
+ XGENE_DMA_RING_CFG_SIZE_64KB,
+ XGENE_DMA_RING_CFG_SIZE_512KB,
+ XGENE_DMA_RING_CFG_SIZE_INVALID
+};
+
+struct xgene_dma_ring {
+ struct xgene_dma *pdma;
+ u8 buf_num;
+ u16 id;
+ u16 num;
+ u16 head;
+ u16 owner;
+ u16 slots;
+ u16 dst_ring_num;
+ u32 size;
+ void __iomem *cmd;
+ void __iomem *cmd_base;
+ dma_addr_t desc_paddr;
+ u32 state[XGENE_DMA_RING_NUM_CONFIG];
+ enum xgene_dma_ring_cfgsize cfgsize;
+ union {
+ void *desc_vaddr;
+ struct xgene_dma_desc_hw *desc_hw;
+ };
+};
+
+struct xgene_dma_desc_sw {
+ struct xgene_dma_desc_hw desc1;
+ struct xgene_dma_desc_hw desc2;
+ u32 flags;
+ struct list_head node;
+ struct list_head tx_list;
+ struct dma_async_tx_descriptor tx;
+};
+
+/**
+ * struct xgene_dma_chan - internal representation of an X-Gene DMA channel
+ * @dma_chan: dmaengine channel object member
+ * @pdma: X-Gene DMA device structure reference
+ * @dev: struct device reference for dma mapping api
+ * @id: raw id of this channel
+ * @rx_irq: channel IRQ
+ * @name: name of X-Gene DMA channel
+ * @lock: serializes enqueue/dequeue operations to the descriptor pool
+ * @pending: number of transaction request pushed to DMA controller for
+ * execution, but still waiting for completion,
+ * @max_outstanding: max number of outstanding request we can push to channel
+ * @ld_pending: descriptors which are queued to run, but have not yet been
+ * submitted to the hardware for execution
+ * @ld_running: descriptors which are currently being executing by the hardware
+ * @ld_completed: descriptors which have finished execution by the hardware.
+ * These descriptors have already had their cleanup actions run. They
+ * are waiting for the ACK bit to be set by the async tx API.
+ * @desc_pool: descriptor pool for DMA operations
+ * @tasklet: bottom half where all completed descriptors cleans
+ * @tx_ring: transmit ring descriptor that we use to prepare actual
+ * descriptors for further executions
+ * @rx_ring: receive ring descriptor that we use to get completed DMA
+ * descriptors during cleanup time
+ */
+struct xgene_dma_chan {
+ struct dma_chan dma_chan;
+ struct xgene_dma *pdma;
+ struct device *dev;
+ int id;
+ int rx_irq;
+ char name[10];
+ spinlock_t lock;
+ int pending;
+ int max_outstanding;
+ struct list_head ld_pending;
+ struct list_head ld_running;
+ struct list_head ld_completed;
+ struct dma_pool *desc_pool;
+ struct tasklet_struct tasklet;
+ struct xgene_dma_ring tx_ring;
+ struct xgene_dma_ring rx_ring;
+};
+
+/**
+ * struct xgene_dma - internal representation of an X-Gene DMA device
+ * @err_irq: DMA error irq number
+ * @ring_num: start id number for DMA ring
+ * @csr_dma: base for DMA register access
+ * @csr_ring: base for DMA ring register access
+ * @csr_ring_cmd: base for DMA ring command register access
+ * @csr_efuse: base for efuse register access
+ * @dma_dev: embedded struct dma_device
+ * @chan: reference to X-Gene DMA channels
+ */
+struct xgene_dma {
+ struct device *dev;
+ struct clk *clk;
+ int err_irq;
+ int ring_num;
+ void __iomem *csr_dma;
+ void __iomem *csr_ring;
+ void __iomem *csr_ring_cmd;
+ void __iomem *csr_efuse;
+ struct dma_device dma_dev[XGENE_DMA_MAX_CHANNEL];
+ struct xgene_dma_chan chan[XGENE_DMA_MAX_CHANNEL];
+};
+
+static const char * const xgene_dma_desc_err[] = {
+ [ERR_DESC_AXI] = "AXI error when reading src/dst link list",
+ [ERR_BAD_DESC] = "ERR or El_ERR fields not set to zero in desc",
+ [ERR_READ_DATA_AXI] = "AXI error when reading data",
+ [ERR_WRITE_DATA_AXI] = "AXI error when writing data",
+ [ERR_FBP_TIMEOUT] = "Timeout on bufpool fetch",
+ [ERR_ECC] = "ECC double bit error",
+ [ERR_DIFF_SIZE] = "Bufpool too small to hold all the DIF result",
+ [ERR_SCT_GAT_LEN] = "Gather and scatter data length not same",
+ [ERR_CRC_ERR] = "CRC error",
+ [ERR_CHKSUM] = "Checksum error",
+ [ERR_DIF] = "DIF error",
+};
+
+static const char * const xgene_dma_err[] = {
+ [ERR_DIF_SIZE_INT] = "DIF size error",
+ [ERR_GS_ERR_INT] = "Gather scatter not same size error",
+ [ERR_FPB_TIMEO_INT] = "Free pool time out error",
+ [ERR_WFIFO_OVF_INT] = "Write FIFO over flow error",
+ [ERR_RFIFO_OVF_INT] = "Read FIFO over flow error",
+ [ERR_WR_TIMEO_INT] = "Write time out error",
+ [ERR_RD_TIMEO_INT] = "Read time out error",
+ [ERR_WR_ERR_INT] = "HBF bus write error",
+ [ERR_RD_ERR_INT] = "HBF bus read error",
+ [ERR_BAD_DESC_INT] = "Ring descriptor HE0 not set error",
+ [ERR_DESC_DST_INT] = "HFB reading dst link address error",
+ [ERR_DESC_SRC_INT] = "HFB reading src link address error",
+};
+
+static bool is_pq_enabled(struct xgene_dma *pdma)
+{
+ u32 val;
+
+ val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW);
+ return !(val & XGENE_DMA_PQ_DISABLE_MASK);
+}
+
+static void xgene_dma_cpu_to_le64(u64 *desc, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ desc[i] = cpu_to_le64(desc[i]);
+}
+
+static u16 xgene_dma_encode_len(u32 len)
+{
+ return (len < XGENE_DMA_MAX_BYTE_CNT) ?
+ len : XGENE_DMA_16K_BUFFER_LEN_CODE;
+}
+
+static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
+{
+ static u8 flyby_type[] = {
+ FLYBY_2SRC_XOR, /* Dummy */
+ FLYBY_2SRC_XOR, /* Dummy */
+ FLYBY_2SRC_XOR,
+ FLYBY_3SRC_XOR,
+ FLYBY_4SRC_XOR,
+ FLYBY_5SRC_XOR
+ };
+
+ return flyby_type[src_cnt];
+}
+
+static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
+{
+ u32 __iomem *cmd_base = ring->cmd_base;
+ u32 ring_state = ioread32(&cmd_base[1]);
+
+ return XGENE_DMA_RING_DESC_CNT(ring_state);
+}
+
+static void xgene_dma_set_src_buffer(void *ext8, size_t *len,
+ dma_addr_t *paddr)
+{
+ size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ?
+ *len : XGENE_DMA_MAX_BYTE_CNT;
+
+ XGENE_DMA_DESC_BUFADDR_SET(ext8, *paddr);
+ XGENE_DMA_DESC_BUFLEN_SET(ext8, xgene_dma_encode_len(nbytes));
+ *len -= nbytes;
+ *paddr += nbytes;
+}
+
+static void xgene_dma_invalidate_buffer(void *ext8)
+{
+ XGENE_DMA_DESC_BUFLEN_SET(ext8, XGENE_DMA_INVALID_LEN_CODE);
+}
+
+static void *xgene_dma_lookup_ext8(u64 *desc, int idx)
+{
+ return (idx % 2) ? (desc + idx - 1) : (desc + idx + 1);
+}
+
+static void xgene_dma_init_desc(void *desc, u16 dst_ring_num)
+{
+ XGENE_DMA_DESC_C_SET(desc); /* Coherent IO */
+ XGENE_DMA_DESC_IN_SET(desc);
+ XGENE_DMA_DESC_H0ENQ_NUM_SET(desc, dst_ring_num);
+ XGENE_DMA_DESC_RTYPE_SET(desc, XGENE_DMA_RING_OWNER_DMA);
+}
+
+static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc_sw,
+ dma_addr_t dst, dma_addr_t src,
+ size_t len)
+{
+ void *desc1, *desc2;
+ int i;
+
+ /* Get 1st descriptor */
+ desc1 = &desc_sw->desc1;
+ xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
+
+ /* Set destination address */
+ XGENE_DMA_DESC_DR_SET(desc1);
+ XGENE_DMA_DESC_DST_ADDR_SET(desc1, dst);
+
+ /* Set 1st source address */
+ xgene_dma_set_src_buffer(desc1 + 8, &len, &src);
+
+ if (len <= 0) {
+ desc2 = NULL;
+ goto skip_additional_src;
+ }
+
+ /*
+ * We need to split this source buffer,
+ * and need to use 2nd descriptor
+ */
+ desc2 = &desc_sw->desc2;
+ XGENE_DMA_DESC_NV_SET(desc1);
+
+ /* Set 2nd to 5th source address */
+ for (i = 0; i < 4 && len; i++)
+ xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i),
+ &len, &src);
+
+ /* Invalidate unused source address field */
+ for (; i < 4; i++)
+ xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i));
+
+ /* Updated flag that we have prepared 64B descriptor */
+ desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
+
+skip_additional_src:
+ /* Hardware stores descriptor in little endian format */
+ xgene_dma_cpu_to_le64(desc1, 4);
+ if (desc2)
+ xgene_dma_cpu_to_le64(desc2, 4);
+}
+
+static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc_sw,
+ dma_addr_t *dst, dma_addr_t *src,
+ u32 src_cnt, size_t *nbytes,
+ const u8 *scf)
+{
+ void *desc1, *desc2;
+ size_t len = *nbytes;
+ int i;
+
+ desc1 = &desc_sw->desc1;
+ desc2 = &desc_sw->desc2;
+
+ /* Initialize DMA descriptor */
+ xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
+
+ /* Set destination address */
+ XGENE_DMA_DESC_DR_SET(desc1);
+ XGENE_DMA_DESC_DST_ADDR_SET(desc1, *dst);
+
+ /* We have multiple source addresses, so need to set NV bit*/
+ XGENE_DMA_DESC_NV_SET(desc1);
+
+ /* Set flyby opcode */
+ XGENE_DMA_DESC_FLYBY_SET(desc1, xgene_dma_encode_xor_flyby(src_cnt));
+
+ /* Set 1st to 5th source addresses */
+ for (i = 0; i < src_cnt; i++) {
+ len = *nbytes;
+ xgene_dma_set_src_buffer((i == 0) ? (desc1 + 8) :
+ xgene_dma_lookup_ext8(desc2, i - 1),
+ &len, &src[i]);
+ XGENE_DMA_DESC_MULTI_SET(desc1, scf[i], i);
+ }
+
+ /* Hardware stores descriptor in little endian format */
+ xgene_dma_cpu_to_le64(desc1, 4);
+ xgene_dma_cpu_to_le64(desc2, 4);
+
+ /* Update meta data */
+ *nbytes = len;
+ *dst += XGENE_DMA_MAX_BYTE_CNT;
+
+ /* We need always 64B descriptor to perform xor or pq operations */
+ desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
+}
+
+static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct xgene_dma_desc_sw *desc;
+ struct xgene_dma_chan *chan;
+ dma_cookie_t cookie;
+
+ if (unlikely(!tx))
+ return -EINVAL;
+
+ chan = to_dma_chan(tx->chan);
+ desc = to_dma_desc_sw(tx);
+
+ spin_lock_bh(&chan->lock);
+
+ cookie = dma_cookie_assign(tx);
+
+ /* Add this transaction list onto the tail of the pending queue */
+ list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
+
+ spin_unlock_bh(&chan->lock);
+
+ return cookie;
+}
+
+static void xgene_dma_clean_descriptor(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc)
+{
+ list_del(&desc->node);
+ chan_dbg(chan, "LD %p free\n", desc);
+ dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
+}
+
+static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor(
+ struct xgene_dma_chan *chan)
+{
+ struct xgene_dma_desc_sw *desc;
+ dma_addr_t phys;
+
+ desc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &phys);
+ if (!desc) {
+ chan_err(chan, "Failed to allocate LDs\n");
+ return NULL;
+ }
+
+ memset(desc, 0, sizeof(*desc));
+
+ INIT_LIST_HEAD(&desc->tx_list);
+ desc->tx.phys = phys;
+ desc->tx.tx_submit = xgene_dma_tx_submit;
+ dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan);
+
+ chan_dbg(chan, "LD %p allocated\n", desc);
+
+ return desc;
+}
+
+/**
+ * xgene_dma_clean_completed_descriptor - free all descriptors which
+ * has been completed and acked
+ * @chan: X-Gene DMA channel
+ *
+ * This function is used on all completed and acked descriptors.
+ */
+static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan *chan)
+{
+ struct xgene_dma_desc_sw *desc, *_desc;
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) {
+ if (async_tx_test_ack(&desc->tx))
+ xgene_dma_clean_descriptor(chan, desc);
+ }
+}
+
+/**
+ * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor
+ * @chan: X-Gene DMA channel
+ * @desc: descriptor to cleanup and free
+ *
+ * This function is used on a descriptor which has been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies.
+ */
+static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc)
+{
+ struct dma_async_tx_descriptor *tx = &desc->tx;
+
+ /*
+ * If this is not the last transaction in the group,
+ * then no need to complete cookie and run any callback as
+ * this is not the tx_descriptor which had been sent to caller
+ * of this DMA request
+ */
+
+ if (tx->cookie == 0)
+ return;
+
+ dma_cookie_complete(tx);
+
+ /* Run the link descriptor callback function */
+ if (tx->callback)
+ tx->callback(tx->callback_param);
+
+ dma_descriptor_unmap(tx);
+
+ /* Run any dependencies */
+ dma_run_dependencies(tx);
+}
+
+/**
+ * xgene_dma_clean_running_descriptor - move the completed descriptor from
+ * ld_running to ld_completed
+ * @chan: X-Gene DMA channel
+ * @desc: the descriptor which is completed
+ *
+ * Free the descriptor directly if acked by async_tx api,
+ * else move it to queue ld_completed.
+ */
+static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc)
+{
+ /* Remove from the list of running transactions */
+ list_del(&desc->node);
+
+ /*
+ * the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->tx)) {
+ /*
+ * Move this descriptor to the list of descriptors which is
+ * completed, but still awaiting the 'ack' bit to be set.
+ */
+ list_add_tail(&desc->node, &chan->ld_completed);
+ return;
+ }
+
+ chan_dbg(chan, "LD %p free\n", desc);
+ dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
+}
+
+static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
+ struct xgene_dma_desc_sw *desc_sw)
+{
+ struct xgene_dma_desc_hw *desc_hw;
+
+ /* Check if can push more descriptor to hw for execution */
+ if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
+ return -EBUSY;
+
+ /* Get hw descriptor from DMA tx ring */
+ desc_hw = &ring->desc_hw[ring->head];
+
+ /*
+ * Increment the head count to point next
+ * descriptor for next time
+ */
+ if (++ring->head == ring->slots)
+ ring->head = 0;
+
+ /* Copy prepared sw descriptor data to hw descriptor */
+ memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw));
+
+ /*
+ * Check if we have prepared 64B descriptor,
+ * in this case we need one more hw descriptor
+ */
+ if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) {
+ desc_hw = &ring->desc_hw[ring->head];
+
+ if (++ring->head == ring->slots)
+ ring->head = 0;
+
+ memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
+ }
+
+ /* Notify the hw that we have descriptor ready for execution */
+ iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
+ 2 : 1, ring->cmd);
+
+ return 0;
+}
+
+/**
+ * xgene_chan_xfer_ld_pending - push any pending transactions to hw
+ * @chan : X-Gene DMA channel
+ *
+ * LOCKING: must hold chan->desc_lock
+ */
+static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
+{
+ struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
+ int ret;
+
+ /*
+ * If the list of pending descriptors is empty, then we
+ * don't need to do any work at all
+ */
+ if (list_empty(&chan->ld_pending)) {
+ chan_dbg(chan, "No pending LDs\n");
+ return;
+ }
+
+ /*
+ * Move elements from the queue of pending transactions onto the list
+ * of running transactions and push it to hw for further executions
+ */
+ list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) {
+ /*
+ * Check if have pushed max number of transactions to hw
+ * as capable, so let's stop here and will push remaining
+ * elements from pening ld queue after completing some
+ * descriptors that we have already pushed
+ */
+ if (chan->pending >= chan->max_outstanding)
+ return;
+
+ ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw);
+ if (ret)
+ return;
+
+ /*
+ * Delete this element from ld pending queue and append it to
+ * ld running queue
+ */
+ list_move_tail(&desc_sw->node, &chan->ld_running);
+
+ /* Increment the pending transaction count */
+ chan->pending++;
+ }
+}
+
+/**
+ * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed
+ * and move them to ld_completed to free until flag 'ack' is set
+ * @chan: X-Gene DMA channel
+ *
+ * This function is used on descriptors which have been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies, then
+ * free these descriptors if flag 'ack' is set.
+ */
+static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
+{
+ struct xgene_dma_ring *ring = &chan->rx_ring;
+ struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
+ struct xgene_dma_desc_hw *desc_hw;
+ u8 status;
+
+ /* Clean already completed and acked descriptors */
+ xgene_dma_clean_completed_descriptor(chan);
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
+ /* Get subsequent hw descriptor from DMA rx ring */
+ desc_hw = &ring->desc_hw[ring->head];
+
+ /* Check if this descriptor has been completed */
+ if (unlikely(XGENE_DMA_DESC_IS_EMPTY(desc_hw)))
+ break;
+
+ if (++ring->head == ring->slots)
+ ring->head = 0;
+
+ /* Check if we have any error with DMA transactions */
+ status = XGENE_DMA_DESC_STATUS(
+ XGENE_DMA_DESC_ELERR_RD(le64_to_cpu(
+ desc_hw->m0)),
+ XGENE_DMA_DESC_LERR_RD(le64_to_cpu(
+ desc_hw->m0)));
+ if (status) {
+ /* Print the DMA error type */
+ chan_err(chan, "%s\n", xgene_dma_desc_err[status]);
+
+ /*
+ * We have DMA transactions error here. Dump DMA Tx
+ * and Rx descriptors for this request */
+ XGENE_DMA_DESC_DUMP(&desc_sw->desc1,
+ "X-Gene DMA TX DESC1: ");
+
+ if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC)
+ XGENE_DMA_DESC_DUMP(&desc_sw->desc2,
+ "X-Gene DMA TX DESC2: ");
+
+ XGENE_DMA_DESC_DUMP(desc_hw,
+ "X-Gene DMA RX ERR DESC: ");
+ }
+
+ /* Notify the hw about this completed descriptor */
+ iowrite32(-1, ring->cmd);
+
+ /* Mark this hw descriptor as processed */
+ XGENE_DMA_DESC_SET_EMPTY(desc_hw);
+
+ xgene_dma_run_tx_complete_actions(chan, desc_sw);
+
+ xgene_dma_clean_running_descriptor(chan, desc_sw);
+
+ /*
+ * Decrement the pending transaction count
+ * as we have processed one
+ */
+ chan->pending--;
+ }
+
+ /*
+ * Start any pending transactions automatically
+ * In the ideal case, we keep the DMA controller busy while we go
+ * ahead and free the descriptors below.
+ */
+ xgene_chan_xfer_ld_pending(chan);
+}
+
+static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct xgene_dma_chan *chan = to_dma_chan(dchan);
+
+ /* Has this channel already been allocated? */
+ if (chan->desc_pool)
+ return 1;
+
+ chan->desc_pool = dma_pool_create(chan->name, chan->dev,
+ sizeof(struct xgene_dma_desc_sw),
+ 0, 0);
+ if (!chan->desc_pool) {
+ chan_err(chan, "Failed to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
+
+ chan_dbg(chan, "Allocate descripto pool\n");
+
+ return 1;
+}
+
+/**
+ * xgene_dma_free_desc_list - Free all descriptors in a queue
+ * @chan: X-Gene DMA channel
+ * @list: the list to free
+ *
+ * LOCKING: must hold chan->desc_lock
+ */
+static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan,
+ struct list_head *list)
+{
+ struct xgene_dma_desc_sw *desc, *_desc;
+
+ list_for_each_entry_safe(desc, _desc, list, node)
+ xgene_dma_clean_descriptor(chan, desc);
+}
+
+static void xgene_dma_free_tx_desc_list(struct xgene_dma_chan *chan,
+ struct list_head *list)
+{
+ struct xgene_dma_desc_sw *desc, *_desc;
+
+ list_for_each_entry_safe(desc, _desc, list, node)
+ xgene_dma_clean_descriptor(chan, desc);
+}
+
+static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct xgene_dma_chan *chan = to_dma_chan(dchan);
+
+ chan_dbg(chan, "Free all resources\n");
+
+ if (!chan->desc_pool)
+ return;
+
+ spin_lock_bh(&chan->lock);
+
+ /* Process all running descriptor */
+ xgene_dma_cleanup_descriptors(chan);
+
+ /* Clean all link descriptor queues */
+ xgene_dma_free_desc_list(chan, &chan->ld_pending);
+ xgene_dma_free_desc_list(chan, &chan->ld_running);
+ xgene_dma_free_desc_list(chan, &chan->ld_completed);
+
+ spin_unlock_bh(&chan->lock);
+
+ /* Delete this channel DMA pool */
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
+}
+
+static struct dma_async_tx_descriptor *xgene_dma_prep_memcpy(
+ struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct xgene_dma_desc_sw *first = NULL, *new;
+ struct xgene_dma_chan *chan;
+ size_t copy;
+
+ if (unlikely(!dchan || !len))
+ return NULL;
+
+ chan = to_dma_chan(dchan);
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = xgene_dma_alloc_descriptor(chan);
+ if (!new)
+ goto fail;
+
+ /* Create the largest transaction possible */
+ copy = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
+
+ /* Prepare DMA descriptor */
+ xgene_dma_prep_cpy_desc(chan, new, dst, src, copy);
+
+ if (!first)
+ first = new;
+
+ new->tx.cookie = 0;
+ async_tx_ack(&new->tx);
+
+ /* Update metadata */
+ len -= copy;
+ dst += copy;
+ src += copy;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ new->tx.flags = flags; /* client is in control of this ack */
+ new->tx.cookie = -EBUSY;
+ list_splice(&first->tx_list, &new->tx_list);
+
+ return &new->tx;
+
+fail:
+ if (!first)
+ return NULL;
+
+ xgene_dma_free_tx_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
+ struct dma_chan *dchan, struct scatterlist *dst_sg,
+ u32 dst_nents, struct scatterlist *src_sg,
+ u32 src_nents, unsigned long flags)
+{
+ struct xgene_dma_desc_sw *first = NULL, *new = NULL;
+ struct xgene_dma_chan *chan;
+ size_t dst_avail, src_avail;
+ dma_addr_t dst, src;
+ size_t len;
+
+ if (unlikely(!dchan))
+ return NULL;
+
+ if (unlikely(!dst_nents || !src_nents))
+ return NULL;
+
+ if (unlikely(!dst_sg || !src_sg))
+ return NULL;
+
+ chan = to_dma_chan(dchan);
+
+ /* Get prepared for the loop */
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+ dst_nents--;
+ src_nents--;
+
+ /* Run until we are out of scatterlist entries */
+ while (true) {
+ /* Create the largest transaction possible */
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
+ if (len == 0)
+ goto fetch;
+
+ dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
+ src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
+
+ /* Allocate the link descriptor from DMA pool */
+ new = xgene_dma_alloc_descriptor(chan);
+ if (!new)
+ goto fail;
+
+ /* Prepare DMA descriptor */
+ xgene_dma_prep_cpy_desc(chan, new, dst, src, len);
+
+ if (!first)
+ first = new;
+
+ new->tx.cookie = 0;
+ async_tx_ack(&new->tx);
+
+ /* update metadata */
+ dst_avail -= len;
+ src_avail -= len;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+
+fetch:
+ /* fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ /* no more entries: we're done */
+ if (dst_nents == 0)
+ break;
+
+ /* fetch the next entry: if there are no more: done */
+ dst_sg = sg_next(dst_sg);
+ if (!dst_sg)
+ break;
+
+ dst_nents--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+
+ /* fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ /* no more entries: we're done */
+ if (src_nents == 0)
+ break;
+
+ /* fetch the next entry: if there are no more: done */
+ src_sg = sg_next(src_sg);
+ if (!src_sg)
+ break;
+
+ src_nents--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ if (!new)
+ return NULL;
+
+ new->tx.flags = flags; /* client is in control of this ack */
+ new->tx.cookie = -EBUSY;
+ list_splice(&first->tx_list, &new->tx_list);
+
+ return &new->tx;
+fail:
+ if (!first)
+ return NULL;
+
+ xgene_dma_free_tx_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
+ struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
+ u32 src_cnt, size_t len, unsigned long flags)
+{
+ struct xgene_dma_desc_sw *first = NULL, *new;
+ struct xgene_dma_chan *chan;
+ static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01};
+
+ if (unlikely(!dchan || !len))
+ return NULL;
+
+ chan = to_dma_chan(dchan);
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = xgene_dma_alloc_descriptor(chan);
+ if (!new)
+ goto fail;
+
+ /* Prepare xor DMA descriptor */
+ xgene_dma_prep_xor_desc(chan, new, &dst, src,
+ src_cnt, &len, multi);
+
+ if (!first)
+ first = new;
+
+ new->tx.cookie = 0;
+ async_tx_ack(&new->tx);
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+ } while (len);
+
+ new->tx.flags = flags; /* client is in control of this ack */
+ new->tx.cookie = -EBUSY;
+ list_splice(&first->tx_list, &new->tx_list);
+
+ return &new->tx;
+
+fail:
+ if (!first)
+ return NULL;
+
+ xgene_dma_free_tx_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *xgene_dma_prep_pq(
+ struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
+ u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
+{
+ struct xgene_dma_desc_sw *first = NULL, *new;
+ struct xgene_dma_chan *chan;
+ size_t _len = len;
+ dma_addr_t _src[XGENE_DMA_MAX_XOR_SRC];
+ static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {0x01, 0x01, 0x01, 0x01, 0x01};
+
+ if (unlikely(!dchan || !len))
+ return NULL;
+
+ chan = to_dma_chan(dchan);
+
+ /*
+ * Save source addresses on local variable, may be we have to
+ * prepare two descriptor to generate P and Q if both enabled
+ * in the flags by client
+ */
+ memcpy(_src, src, sizeof(*src) * src_cnt);
+
+ if (flags & DMA_PREP_PQ_DISABLE_P)
+ len = 0;
+
+ if (flags & DMA_PREP_PQ_DISABLE_Q)
+ _len = 0;
+
+ do {
+ /* Allocate the link descriptor from DMA pool */
+ new = xgene_dma_alloc_descriptor(chan);
+ if (!new)
+ goto fail;
+
+ if (!first)
+ first = new;
+
+ new->tx.cookie = 0;
+ async_tx_ack(&new->tx);
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->tx_list);
+
+ /*
+ * Prepare DMA descriptor to generate P,
+ * if DMA_PREP_PQ_DISABLE_P flag is not set
+ */
+ if (len) {
+ xgene_dma_prep_xor_desc(chan, new, &dst[0], src,
+ src_cnt, &len, multi);
+ continue;
+ }
+
+ /*
+ * Prepare DMA descriptor to generate Q,
+ * if DMA_PREP_PQ_DISABLE_Q flag is not set
+ */
+ if (_len) {
+ xgene_dma_prep_xor_desc(chan, new, &dst[1], _src,
+ src_cnt, &_len, scf);
+ }
+ } while (len || _len);
+
+ new->tx.flags = flags; /* client is in control of this ack */
+ new->tx.cookie = -EBUSY;
+ list_splice(&first->tx_list, &new->tx_list);
+
+ return &new->tx;
+
+fail:
+ if (!first)
+ return NULL;
+
+ xgene_dma_free_tx_desc_list(chan, &first->tx_list);
+ return NULL;
+}
+
+static void xgene_dma_issue_pending(struct dma_chan *dchan)
+{
+ struct xgene_dma_chan *chan = to_dma_chan(dchan);
+
+ spin_lock_bh(&chan->lock);
+ xgene_chan_xfer_ld_pending(chan);
+ spin_unlock_bh(&chan->lock);
+}
+
+static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(dchan, cookie, txstate);
+}
+
+static void xgene_dma_tasklet_cb(unsigned long data)
+{
+ struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
+
+ spin_lock_bh(&chan->lock);
+
+ /* Run all cleanup for descriptors which have been completed */
+ xgene_dma_cleanup_descriptors(chan);
+
+ /* Re-enable DMA channel IRQ */
+ enable_irq(chan->rx_irq);
+
+ spin_unlock_bh(&chan->lock);
+}
+
+static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
+{
+ struct xgene_dma_chan *chan = (struct xgene_dma_chan *)id;
+
+ BUG_ON(!chan);
+
+ /*
+ * Disable DMA channel IRQ until we process completed
+ * descriptors
+ */
+ disable_irq_nosync(chan->rx_irq);
+
+ /*
+ * Schedule the tasklet to handle all cleanup of the current
+ * transaction. It will start a new transaction if there is
+ * one pending.
+ */
+ tasklet_schedule(&chan->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xgene_dma_err_isr(int irq, void *id)
+{
+ struct xgene_dma *pdma = (struct xgene_dma *)id;
+ unsigned long int_mask;
+ u32 val, i;
+
+ val = ioread32(pdma->csr_dma + XGENE_DMA_INT);
+
+ /* Clear DMA interrupts */
+ iowrite32(val, pdma->csr_dma + XGENE_DMA_INT);
+
+ /* Print DMA error info */
+ int_mask = val >> XGENE_DMA_INT_MASK_SHIFT;
+ for_each_set_bit(i, &int_mask, ARRAY_SIZE(xgene_dma_err))
+ dev_err(pdma->dev,
+ "Interrupt status 0x%08X %s\n", val, xgene_dma_err[i]);
+
+ return IRQ_HANDLED;
+}
+
+static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring)
+{
+ int i;
+
+ iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE);
+
+ for (i = 0; i < XGENE_DMA_RING_NUM_CONFIG; i++)
+ iowrite32(ring->state[i], ring->pdma->csr_ring +
+ XGENE_DMA_RING_STATE_WR_BASE + (i * 4));
+}
+
+static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring)
+{
+ memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG);
+ xgene_dma_wr_ring_state(ring);
+}
+
+static void xgene_dma_setup_ring(struct xgene_dma_ring *ring)
+{
+ void *ring_cfg = ring->state;
+ u64 addr = ring->desc_paddr;
+ void *desc;
+ u32 i, val;
+
+ ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE;
+
+ /* Clear DMA ring state */
+ xgene_dma_clr_ring_state(ring);
+
+ /* Set DMA ring type */
+ XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR);
+
+ if (ring->owner == XGENE_DMA_RING_OWNER_DMA) {
+ /* Set recombination buffer and timeout */
+ XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg);
+ XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg);
+ XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg);
+ }
+
+ /* Initialize DMA ring state */
+ XGENE_DMA_RING_SELTHRSH_SET(ring_cfg);
+ XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg);
+ XGENE_DMA_RING_COHERENT_SET(ring_cfg);
+ XGENE_DMA_RING_ADDRL_SET(ring_cfg, addr);
+ XGENE_DMA_RING_ADDRH_SET(ring_cfg, addr);
+ XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize);
+
+ /* Write DMA ring configurations */
+ xgene_dma_wr_ring_state(ring);
+
+ /* Set DMA ring id */
+ iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id),
+ ring->pdma->csr_ring + XGENE_DMA_RING_ID);
+
+ /* Set DMA ring buffer */
+ iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num),
+ ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
+
+ if (ring->owner != XGENE_DMA_RING_OWNER_CPU)
+ return;
+
+ /* Set empty signature to DMA Rx ring descriptors */
+ for (i = 0; i < ring->slots; i++) {
+ desc = &ring->desc_hw[i];
+ XGENE_DMA_DESC_SET_EMPTY(desc);
+ }
+
+ /* Enable DMA Rx ring interrupt */
+ val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
+ XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num);
+ iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
+}
+
+static void xgene_dma_clear_ring(struct xgene_dma_ring *ring)
+{
+ u32 ring_id, val;
+
+ if (ring->owner == XGENE_DMA_RING_OWNER_CPU) {
+ /* Disable DMA Rx ring interrupt */
+ val = ioread32(ring->pdma->csr_ring +
+ XGENE_DMA_RING_NE_INT_MODE);
+ XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num);
+ iowrite32(val, ring->pdma->csr_ring +
+ XGENE_DMA_RING_NE_INT_MODE);
+ }
+
+ /* Clear DMA ring state */
+ ring_id = XGENE_DMA_RING_ID_SETUP(ring->id);
+ iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID);
+
+ iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
+ xgene_dma_clr_ring_state(ring);
+}
+
+static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring)
+{
+ ring->cmd_base = ring->pdma->csr_ring_cmd +
+ XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num -
+ XGENE_DMA_RING_NUM));
+
+ ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET;
+}
+
+static int xgene_dma_get_ring_size(struct xgene_dma_chan *chan,
+ enum xgene_dma_ring_cfgsize cfgsize)
+{
+ int size;
+
+ switch (cfgsize) {
+ case XGENE_DMA_RING_CFG_SIZE_512B:
+ size = 0x200;
+ break;
+ case XGENE_DMA_RING_CFG_SIZE_2KB:
+ size = 0x800;
+ break;
+ case XGENE_DMA_RING_CFG_SIZE_16KB:
+ size = 0x4000;
+ break;
+ case XGENE_DMA_RING_CFG_SIZE_64KB:
+ size = 0x10000;
+ break;
+ case XGENE_DMA_RING_CFG_SIZE_512KB:
+ size = 0x80000;
+ break;
+ default:
+ chan_err(chan, "Unsupported cfg ring size %d\n", cfgsize);
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring)
+{
+ /* Clear DMA ring configurations */
+ xgene_dma_clear_ring(ring);
+
+ /* De-allocate DMA ring descriptor */
+ if (ring->desc_vaddr) {
+ dma_free_coherent(ring->pdma->dev, ring->size,
+ ring->desc_vaddr, ring->desc_paddr);
+ ring->desc_vaddr = NULL;
+ }
+}
+
+static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan)
+{
+ xgene_dma_delete_ring_one(&chan->rx_ring);
+ xgene_dma_delete_ring_one(&chan->tx_ring);
+}
+
+static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
+ struct xgene_dma_ring *ring,
+ enum xgene_dma_ring_cfgsize cfgsize)
+{
+ /* Setup DMA ring descriptor variables */
+ ring->pdma = chan->pdma;
+ ring->cfgsize = cfgsize;
+ ring->num = chan->pdma->ring_num++;
+ ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
+
+ ring->size = xgene_dma_get_ring_size(chan, cfgsize);
+ if (ring->size <= 0)
+ return ring->size;
+
+ /* Allocate memory for DMA ring descriptor */
+ ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
+ &ring->desc_paddr, GFP_KERNEL);
+ if (!ring->desc_vaddr) {
+ chan_err(chan, "Failed to allocate ring desc\n");
+ return -ENOMEM;
+ }
+
+ /* Configure and enable DMA ring */
+ xgene_dma_set_ring_cmd(ring);
+ xgene_dma_setup_ring(ring);
+
+ return 0;
+}
+
+static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
+{
+ struct xgene_dma_ring *rx_ring = &chan->rx_ring;
+ struct xgene_dma_ring *tx_ring = &chan->tx_ring;
+ int ret;
+
+ /* Create DMA Rx ring descriptor */
+ rx_ring->owner = XGENE_DMA_RING_OWNER_CPU;
+ rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id;
+
+ ret = xgene_dma_create_ring_one(chan, rx_ring,
+ XGENE_DMA_RING_CFG_SIZE_64KB);
+ if (ret)
+ return ret;
+
+ chan_dbg(chan, "Rx ring id 0x%X num %d desc 0x%p\n",
+ rx_ring->id, rx_ring->num, rx_ring->desc_vaddr);
+
+ /* Create DMA Tx ring descriptor */
+ tx_ring->owner = XGENE_DMA_RING_OWNER_DMA;
+ tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id;
+
+ ret = xgene_dma_create_ring_one(chan, tx_ring,
+ XGENE_DMA_RING_CFG_SIZE_64KB);
+ if (ret) {
+ xgene_dma_delete_ring_one(rx_ring);
+ return ret;
+ }
+
+ tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num);
+
+ chan_dbg(chan,
+ "Tx ring id 0x%X num %d desc 0x%p\n",
+ tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
+
+ /* Set the max outstanding request possible to this channel */
+ chan->max_outstanding = rx_ring->slots;
+
+ return ret;
+}
+
+static int xgene_dma_init_rings(struct xgene_dma *pdma)
+{
+ int ret, i, j;
+
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
+ ret = xgene_dma_create_chan_rings(&pdma->chan[i]);
+ if (ret) {
+ for (j = 0; j < i; j++)
+ xgene_dma_delete_chan_rings(&pdma->chan[j]);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void xgene_dma_enable(struct xgene_dma *pdma)
+{
+ u32 val;
+
+ /* Configure and enable DMA engine */
+ val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
+ XGENE_DMA_CH_SETUP(val);
+ XGENE_DMA_ENABLE(val);
+ iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
+}
+
+static void xgene_dma_disable(struct xgene_dma *pdma)
+{
+ u32 val;
+
+ val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
+ XGENE_DMA_DISABLE(val);
+ iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
+}
+
+static void xgene_dma_mask_interrupts(struct xgene_dma *pdma)
+{
+ /*
+ * Mask DMA ring overflow, underflow and
+ * AXI write/read error interrupts
+ */
+ iowrite32(XGENE_DMA_INT_ALL_MASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_MASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_MASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_MASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_MASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
+
+ /* Mask DMA error interrupts */
+ iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK);
+}
+
+static void xgene_dma_unmask_interrupts(struct xgene_dma *pdma)
+{
+ /*
+ * Unmask DMA ring overflow, underflow and
+ * AXI write/read error interrupts
+ */
+ iowrite32(XGENE_DMA_INT_ALL_UNMASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_UNMASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_UNMASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_UNMASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
+ iowrite32(XGENE_DMA_INT_ALL_UNMASK,
+ pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
+
+ /* Unmask DMA error interrupts */
+ iowrite32(XGENE_DMA_INT_ALL_UNMASK,
+ pdma->csr_dma + XGENE_DMA_INT_MASK);
+}
+
+static void xgene_dma_init_hw(struct xgene_dma *pdma)
+{
+ u32 val;
+
+ /* Associate DMA ring to corresponding ring HW */
+ iowrite32(XGENE_DMA_ASSOC_RING_MNGR1,
+ pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC);
+
+ /* Configure RAID6 polynomial control setting */
+ if (is_pq_enabled(pdma))
+ iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D),
+ pdma->csr_dma + XGENE_DMA_RAID6_CONT);
+ else
+ dev_info(pdma->dev, "PQ is disabled in HW\n");
+
+ xgene_dma_enable(pdma);
+ xgene_dma_unmask_interrupts(pdma);
+
+ /* Get DMA id and version info */
+ val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR);
+
+ /* DMA device info */
+ dev_info(pdma->dev,
+ "X-Gene DMA v%d.%02d.%02d driver registered %d channels",
+ XGENE_DMA_REV_NO_RD(val), XGENE_DMA_BUS_ID_RD(val),
+ XGENE_DMA_DEV_ID_RD(val), XGENE_DMA_MAX_CHANNEL);
+}
+
+static int xgene_dma_init_ring_mngr(struct xgene_dma *pdma)
+{
+ if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) &&
+ (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST)))
+ return 0;
+
+ iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN);
+ iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST);
+
+ /* Bring up memory */
+ iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
+
+ /* Force a barrier */
+ ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
+
+ /* reset may take up to 1ms */
+ usleep_range(1000, 1100);
+
+ if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY)
+ != XGENE_DMA_RING_BLK_MEM_RDY_VAL) {
+ dev_err(pdma->dev,
+ "Failed to release ring mngr memory from shutdown\n");
+ return -ENODEV;
+ }
+
+ /* program threshold set 1 and all hysteresis */
+ iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL,
+ pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1);
+ iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL,
+ pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1);
+ iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL,
+ pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS);
+
+ /* Enable QPcore and assign error queue */
+ iowrite32(XGENE_DMA_RING_ENABLE,
+ pdma->csr_ring + XGENE_DMA_RING_CONFIG);
+
+ return 0;
+}
+
+static int xgene_dma_init_mem(struct xgene_dma *pdma)
+{
+ int ret;
+
+ ret = xgene_dma_init_ring_mngr(pdma);
+ if (ret)
+ return ret;
+
+ /* Bring up memory */
+ iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
+
+ /* Force a barrier */
+ ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
+
+ /* reset may take up to 1ms */
+ usleep_range(1000, 1100);
+
+ if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY)
+ != XGENE_DMA_BLK_MEM_RDY_VAL) {
+ dev_err(pdma->dev,
+ "Failed to release DMA memory from shutdown\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int xgene_dma_request_irqs(struct xgene_dma *pdma)
+{
+ struct xgene_dma_chan *chan;
+ int ret, i, j;
+
+ /* Register DMA error irq */
+ ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr,
+ 0, "dma_error", pdma);
+ if (ret) {
+ dev_err(pdma->dev,
+ "Failed to register error IRQ %d\n", pdma->err_irq);
+ return ret;
+ }
+
+ /* Register DMA channel rx irq */
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
+ chan = &pdma->chan[i];
+ ret = devm_request_irq(chan->dev, chan->rx_irq,
+ xgene_dma_chan_ring_isr,
+ 0, chan->name, chan);
+ if (ret) {
+ chan_err(chan, "Failed to register Rx IRQ %d\n",
+ chan->rx_irq);
+ devm_free_irq(pdma->dev, pdma->err_irq, pdma);
+
+ for (j = 0; j < i; j++) {
+ chan = &pdma->chan[i];
+ devm_free_irq(chan->dev, chan->rx_irq, chan);
+ }
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void xgene_dma_free_irqs(struct xgene_dma *pdma)
+{
+ struct xgene_dma_chan *chan;
+ int i;
+
+ /* Free DMA device error irq */
+ devm_free_irq(pdma->dev, pdma->err_irq, pdma);
+
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
+ chan = &pdma->chan[i];
+ devm_free_irq(chan->dev, chan->rx_irq, chan);
+ }
+}
+
+static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
+ struct dma_device *dma_dev)
+{
+ /* Initialize DMA device capability mask */
+ dma_cap_zero(dma_dev->cap_mask);
+
+ /* Set DMA device capability */
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_SG, dma_dev->cap_mask);
+
+ /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
+ * and channel 1 supports XOR, PQ both. First thing here is we have
+ * mechanism in hw to enable/disable PQ/XOR supports on channel 1,
+ * we can make sure this by reading SoC Efuse register.
+ * Second thing, we have hw errata that if we run channel 0 and
+ * channel 1 simultaneously with executing XOR and PQ request,
+ * suddenly DMA engine hangs, So here we enable XOR on channel 0 only
+ * if XOR and PQ supports on channel 1 is disabled.
+ */
+ if ((chan->id == XGENE_DMA_PQ_CHANNEL) &&
+ is_pq_enabled(chan->pdma)) {
+ dma_cap_set(DMA_PQ, dma_dev->cap_mask);
+ dma_cap_set(DMA_XOR, dma_dev->cap_mask);
+ } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) &&
+ !is_pq_enabled(chan->pdma)) {
+ dma_cap_set(DMA_XOR, dma_dev->cap_mask);
+ }
+
+ /* Set base and prep routines */
+ dma_dev->dev = chan->dev;
+ dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
+ dma_dev->device_issue_pending = xgene_dma_issue_pending;
+ dma_dev->device_tx_status = xgene_dma_tx_status;
+ dma_dev->device_prep_dma_memcpy = xgene_dma_prep_memcpy;
+ dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
+
+ if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
+ dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
+ dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
+ dma_dev->xor_align = XGENE_DMA_XOR_ALIGNMENT;
+ }
+
+ if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
+ dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
+ dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
+ dma_dev->pq_align = XGENE_DMA_XOR_ALIGNMENT;
+ }
+}
+
+static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
+{
+ struct xgene_dma_chan *chan = &pdma->chan[id];
+ struct dma_device *dma_dev = &pdma->dma_dev[id];
+ int ret;
+
+ chan->dma_chan.device = dma_dev;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->ld_pending);
+ INIT_LIST_HEAD(&chan->ld_running);
+ INIT_LIST_HEAD(&chan->ld_completed);
+ tasklet_init(&chan->tasklet, xgene_dma_tasklet_cb,
+ (unsigned long)chan);
+
+ chan->pending = 0;
+ chan->desc_pool = NULL;
+ dma_cookie_init(&chan->dma_chan);
+
+ /* Setup dma device capabilities and prep routines */
+ xgene_dma_set_caps(chan, dma_dev);
+
+ /* Initialize DMA device list head */
+ INIT_LIST_HEAD(&dma_dev->channels);
+ list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels);
+
+ /* Register with Linux async DMA framework*/
+ ret = dma_async_device_register(dma_dev);
+ if (ret) {
+ chan_err(chan, "Failed to register async device %d", ret);
+ tasklet_kill(&chan->tasklet);
+
+ return ret;
+ }
+
+ /* DMA capability info */
+ dev_info(pdma->dev,
+ "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan),
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "",
+ dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
+ dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
+
+ return 0;
+}
+
+static int xgene_dma_init_async(struct xgene_dma *pdma)
+{
+ int ret, i, j;
+
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL ; i++) {
+ ret = xgene_dma_async_register(pdma, i);
+ if (ret) {
+ for (j = 0; j < i; j++) {
+ dma_async_device_unregister(&pdma->dma_dev[j]);
+ tasklet_kill(&pdma->chan[j].tasklet);
+ }
+
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void xgene_dma_async_unregister(struct xgene_dma *pdma)
+{
+ int i;
+
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
+ dma_async_device_unregister(&pdma->dma_dev[i]);
+}
+
+static void xgene_dma_init_channels(struct xgene_dma *pdma)
+{
+ struct xgene_dma_chan *chan;
+ int i;
+
+ pdma->ring_num = XGENE_DMA_RING_NUM;
+
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
+ chan = &pdma->chan[i];
+ chan->dev = pdma->dev;
+ chan->pdma = pdma;
+ chan->id = i;
+ snprintf(chan->name, sizeof(chan->name), "dmachan%d", chan->id);
+ }
+}
+
+static int xgene_dma_get_resources(struct platform_device *pdev,
+ struct xgene_dma *pdma)
+{
+ struct resource *res;
+ int irq, i;
+
+ /* Get DMA csr region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get csr region\n");
+ return -ENXIO;
+ }
+
+ pdma->csr_dma = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!pdma->csr_dma) {
+ dev_err(&pdev->dev, "Failed to ioremap csr region");
+ return -ENOMEM;
+ }
+
+ /* Get DMA ring csr region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get ring csr region\n");
+ return -ENXIO;
+ }
+
+ pdma->csr_ring = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!pdma->csr_ring) {
+ dev_err(&pdev->dev, "Failed to ioremap ring csr region");
+ return -ENOMEM;
+ }
+
+ /* Get DMA ring cmd csr region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get ring cmd csr region\n");
+ return -ENXIO;
+ }
+
+ pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!pdma->csr_ring_cmd) {
+ dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region");
+ return -ENOMEM;
+ }
+
+ /* Get efuse csr region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get efuse csr region\n");
+ return -ENXIO;
+ }
+
+ pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!pdma->csr_efuse) {
+ dev_err(&pdev->dev, "Failed to ioremap efuse csr region");
+ return -ENOMEM;
+ }
+
+ /* Get DMA error interrupt */
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "Failed to get Error IRQ\n");
+ return -ENXIO;
+ }
+
+ pdma->err_irq = irq;
+
+ /* Get DMA Rx ring descriptor interrupts for all DMA channels */
+ for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "Failed to get Rx IRQ\n");
+ return -ENXIO;
+ }
+
+ pdma->chan[i - 1].rx_irq = irq;
+ }
+
+ return 0;
+}
+
+static int xgene_dma_probe(struct platform_device *pdev)
+{
+ struct xgene_dma *pdma;
+ int ret, i;
+
+ pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL);
+ if (!pdma)
+ return -ENOMEM;
+
+ pdma->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pdma);
+
+ ret = xgene_dma_get_resources(pdev, pdma);
+ if (ret)
+ return ret;
+
+ pdma->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pdma->clk)) {
+ dev_err(&pdev->dev, "Failed to get clk\n");
+ return PTR_ERR(pdma->clk);
+ }
+
+ /* Enable clk before accessing registers */
+ ret = clk_prepare_enable(pdma->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
+ return ret;
+ }
+
+ /* Remove DMA RAM out of shutdown */
+ ret = xgene_dma_init_mem(pdma);
+ if (ret)
+ goto err_clk_enable;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration\n");
+ goto err_dma_mask;
+ }
+
+ /* Initialize DMA channels software state */
+ xgene_dma_init_channels(pdma);
+
+ /* Configue DMA rings */
+ ret = xgene_dma_init_rings(pdma);
+ if (ret)
+ goto err_clk_enable;
+
+ ret = xgene_dma_request_irqs(pdma);
+ if (ret)
+ goto err_request_irq;
+
+ /* Configure and enable DMA engine */
+ xgene_dma_init_hw(pdma);
+
+ /* Register DMA device with linux async framework */
+ ret = xgene_dma_init_async(pdma);
+ if (ret)
+ goto err_async_init;
+
+ return 0;
+
+err_async_init:
+ xgene_dma_free_irqs(pdma);
+
+err_request_irq:
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
+ xgene_dma_delete_chan_rings(&pdma->chan[i]);
+
+err_dma_mask:
+err_clk_enable:
+ clk_disable_unprepare(pdma->clk);
+
+ return ret;
+}
+
+static int xgene_dma_remove(struct platform_device *pdev)
+{
+ struct xgene_dma *pdma = platform_get_drvdata(pdev);
+ struct xgene_dma_chan *chan;
+ int i;
+
+ xgene_dma_async_unregister(pdma);
+
+ /* Mask interrupts and disable DMA engine */
+ xgene_dma_mask_interrupts(pdma);
+ xgene_dma_disable(pdma);
+ xgene_dma_free_irqs(pdma);
+
+ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
+ chan = &pdma->chan[i];
+ tasklet_kill(&chan->tasklet);
+ xgene_dma_delete_chan_rings(chan);
+ }
+
+ clk_disable_unprepare(pdma->clk);
+
+ return 0;
+}
+
+static const struct of_device_id xgene_dma_of_match_ptr[] = {
+ {.compatible = "apm,xgene-storm-dma",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr);
+
+static struct platform_driver xgene_dma_driver = {
+ .probe = xgene_dma_probe,
+ .remove = xgene_dma_remove,
+ .driver = {
+ .name = "X-Gene-DMA",
+ .of_match_table = xgene_dma_of_match_ptr,
+ },
+};
+
+module_platform_driver(xgene_dma_driver);
+
+MODULE_DESCRIPTION("APM X-Gene SoC DMA driver");
+MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>");
+MODULE_AUTHOR("Loc Ho <lho@apm.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index bdd2a5dd7220..d8434d465885 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -22,9 +22,9 @@
* (at your option) any later version.
*/
-#include <linux/amba/xilinx_dma.h>
#include <linux/bitops.h>
#include <linux/dmapool.h>
+#include <linux/dma/xilinx_dma.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index ef5feeecec84..580e10acaa3a 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -538,8 +538,14 @@ struct dma_buf *
armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
int flags)
{
- return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
- O_RDWR, NULL);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &armada_gem_prime_dmabuf_ops;
+ exp_info.size = obj->size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = obj;
+
+ return dma_buf_export(&exp_info);
}
struct drm_gem_object *
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7482b06cd08f..7fec191b45f7 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -339,13 +339,17 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags)
{
- struct reservation_object *robj = NULL;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &drm_gem_prime_dmabuf_ops;
+ exp_info.size = obj->size;
+ exp_info.flags = flags;
+ exp_info.priv = obj;
if (dev->driver->gem_prime_res_obj)
- robj = dev->driver->gem_prime_res_obj(obj);
+ exp_info.resv = dev->driver->gem_prime_res_obj(obj);
- return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
- flags, robj);
+ return dma_buf_export(&exp_info);
}
EXPORT_SYMBOL(drm_gem_prime_export);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 3833bf8ca025..cd485c091b30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -185,9 +185,14 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
struct drm_gem_object *obj, int flags)
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- return dma_buf_export(obj, &exynos_dmabuf_ops,
- exynos_gem_obj->base.size, flags, NULL);
+ exp_info.ops = &exynos_dmabuf_ops;
+ exp_info.size = exynos_gem_obj->base.size;
+ exp_info.flags = flags;
+ exp_info.priv = obj;
+
+ return dma_buf_export(&exp_info);
}
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c24c3f1ff8a3..c302ffb5a168 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1038,7 +1038,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
- s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+ s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
s->ecochk = I915_READ(GAM_ECOCHK);
@@ -1120,7 +1120,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
- I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
+ I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
I915_WRITE(GAM_ECOCHK, s->ecochk);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d07c0b1fb498..53394f998a1f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2377,10 +2377,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
ret = ring->add_request(ring);
if (ret)
return ret;
+
+ request->tail = intel_ring_get_tail(ringbuf);
}
request->head = request_start;
- request->tail = intel_ring_get_tail(ringbuf);
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 82a1f4b57778..7998da27c500 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -230,6 +230,13 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &i915_dmabuf_ops;
+ exp_info.size = gem_obj->size;
+ exp_info.flags = flags;
+ exp_info.priv = gem_obj;
+
if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj);
@@ -237,8 +244,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
- return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags,
- NULL);
+ return dma_buf_export(&exp_info);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b522eb6e59a4..3da1af46625c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1807,6 +1807,7 @@ enum skl_disp_power_wells {
#define GMBUS_CYCLE_INDEX (2<<25)
#define GMBUS_CYCLE_STOP (4<<25)
#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_BYTE_COUNT_MAX 256U
#define GMBUS_SLAVE_INDEX_SHIFT 8
#define GMBUS_SLAVE_ADDR_SHIFT 1
#define GMBUS_SLAVE_READ (1<<0)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index b31088a551f2..56e437e31580 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -270,18 +270,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
}
static int
-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
- u32 gmbus1_index)
+gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
+ unsigned short addr, u8 *buf, unsigned int len,
+ u32 gmbus1_index)
{
int reg_offset = dev_priv->gpio_mmio_base;
- u16 len = msg->len;
- u8 *buf = msg->buf;
I915_WRITE(GMBUS1 + reg_offset,
gmbus1_index |
GMBUS_CYCLE_WAIT |
(len << GMBUS_BYTE_COUNT_SHIFT) |
- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -303,11 +302,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
}
static int
-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ u32 gmbus1_index)
{
- int reg_offset = dev_priv->gpio_mmio_base;
- u16 len = msg->len;
u8 *buf = msg->buf;
+ unsigned int rx_size = msg->len;
+ unsigned int len;
+ int ret;
+
+ do {
+ len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
+
+ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
+ buf, len, gmbus1_index);
+ if (ret)
+ return ret;
+
+ rx_size -= len;
+ buf += len;
+ } while (rx_size != 0);
+
+ return 0;
+}
+
+static int
+gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
+ unsigned short addr, u8 *buf, unsigned int len)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ unsigned int chunk_size = len;
u32 val, loop;
val = loop = 0;
@@ -319,8 +342,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset,
GMBUS_CYCLE_WAIT |
- (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -337,6 +360,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
if (ret)
return ret;
}
+
+ return 0;
+}
+
+static int
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+{
+ u8 *buf = msg->buf;
+ unsigned int tx_size = msg->len;
+ unsigned int len;
+ int ret;
+
+ do {
+ len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
+
+ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
+ if (ret)
+ return ret;
+
+ buf += len;
+ tx_size -= len;
+ } while (tx_size != 0);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index fcb074bd55dc..09df74b8e917 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -393,6 +393,26 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
}
}
+ if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
+ /*
+ * WaIdleLiteRestore: make sure we never cause a lite
+ * restore with HEAD==TAIL
+ */
+ if (req0 && req0->elsp_submitted) {
+ /*
+ * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
+ * as we resubmit the request. See gen8_emit_request()
+ * for where we prepare the padding after the end of the
+ * request.
+ */
+ struct intel_ringbuffer *ringbuf;
+
+ ringbuf = req0->ctx->engine[ring->id].ringbuf;
+ req0->tail += 8;
+ req0->tail &= ringbuf->size - 1;
+ }
+ }
+
WARN_ON(req1 && req1->elsp_submitted);
execlists_submit_contexts(ring, req0->ctx, req0->tail,
@@ -1315,7 +1335,12 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
u32 cmd;
int ret;
- ret = intel_logical_ring_begin(ringbuf, request->ctx, 6);
+ /*
+ * Reserve space for 2 NOOPs at the end of each request to be
+ * used as a workaround for not being allowed to do lite
+ * restore with HEAD==TAIL (WaIdleLiteRestore).
+ */
+ ret = intel_logical_ring_begin(ringbuf, request->ctx, 8);
if (ret)
return ret;
@@ -1333,6 +1358,14 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
+ /*
+ * Here we add two extra NOOPs as padding to avoid
+ * lite restore of a context with HEAD==TAIL.
+ */
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_emit(ringbuf, MI_NOOP);
+ intel_logical_ring_advance(ringbuf);
+
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index b46dabd9faf7..344fd789170d 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -171,7 +171,14 @@ static struct dma_buf_ops omap_dmabuf_ops = {
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags)
{
- return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags, NULL);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &omap_dmabuf_ops;
+ exp_info.size = obj->size;
+ exp_info.flags = flags;
+ exp_info.priv = obj;
+
+ return dma_buf_export(&exp_info);
}
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index cfb481943b6b..1217272a51f2 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -627,8 +627,14 @@ struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
struct drm_gem_object *gem,
int flags)
{
- return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
- flags, NULL);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &tegra_gem_prime_dmabuf_ops;
+ exp_info.size = gem->size;
+ exp_info.flags = flags;
+ exp_info.priv = gem;
+
+ return dma_buf_export(&exp_info);
}
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 12c87110db3a..4f5fa8d65fe9 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -683,6 +683,12 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
dma_buf = prime->dma_buf;
if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &tdev->ops;
+ exp_info.size = prime->size;
+ exp_info.flags = flags;
+ exp_info.priv = prime;
/*
* Need to create a new dma_buf, with memory accounting.
@@ -694,8 +700,7 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
goto out_unref;
}
- dma_buf = dma_buf_export(prime, &tdev->ops,
- prime->size, flags, NULL);
+ dma_buf = dma_buf_export(&exp_info);
if (IS_ERR(dma_buf)) {
ret = PTR_ERR(dma_buf);
ttm_mem_global_free(tdev->mem_glob,
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index ac8a66b4dfc2..e2243edd1ce3 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -202,7 +202,14 @@ static struct dma_buf_ops udl_dmabuf_ops = {
struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *obj, int flags)
{
- return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags, NULL);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &udl_dmabuf_ops;
+ exp_info.size = obj->size;
+ exp_info.flags = flags;
+ exp_info.priv = obj;
+
+ return dma_buf_export(&exp_info);
}
static int udl_prime_create(struct drm_device *dev,
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 875c22ae5400..fa8dedd8c3a2 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -182,72 +182,41 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[],
const u16 bus_num = bus->remote_bus;
int request_len;
int response_len;
- u8 *request = NULL;
- u8 *response = NULL;
int result;
- struct cros_ec_command msg;
+ struct cros_ec_command msg = { };
request_len = ec_i2c_count_message(i2c_msgs, num);
if (request_len < 0) {
dev_warn(dev, "Error constructing message %d\n", request_len);
- result = request_len;
- goto exit;
+ return request_len;
}
+
response_len = ec_i2c_count_response(i2c_msgs, num);
if (response_len < 0) {
/* Unexpected; no errors should come when NULL response */
dev_warn(dev, "Error preparing response %d\n", response_len);
- result = response_len;
- goto exit;
- }
-
- if (request_len <= ARRAY_SIZE(bus->request_buf)) {
- request = bus->request_buf;
- } else {
- request = kzalloc(request_len, GFP_KERNEL);
- if (request == NULL) {
- result = -ENOMEM;
- goto exit;
- }
- }
- if (response_len <= ARRAY_SIZE(bus->response_buf)) {
- response = bus->response_buf;
- } else {
- response = kzalloc(response_len, GFP_KERNEL);
- if (response == NULL) {
- result = -ENOMEM;
- goto exit;
- }
+ return response_len;
}
- result = ec_i2c_construct_message(request, i2c_msgs, num, bus_num);
+ result = ec_i2c_construct_message(msg.outdata, i2c_msgs, num, bus_num);
if (result)
- goto exit;
+ return result;
msg.version = 0;
msg.command = EC_CMD_I2C_PASSTHRU;
- msg.outdata = request;
msg.outsize = request_len;
- msg.indata = response;
msg.insize = response_len;
result = cros_ec_cmd_xfer(bus->ec, &msg);
if (result < 0)
- goto exit;
+ return result;
- result = ec_i2c_parse_response(response, i2c_msgs, &num);
+ result = ec_i2c_parse_response(msg.indata, i2c_msgs, &num);
if (result < 0)
- goto exit;
+ return result;
/* Indicate success by saying how many messages were sent */
- result = num;
-exit:
- if (request != bus->request_buf)
- kfree(request);
- if (response != bus->response_buf)
- kfree(response);
-
- return result;
+ return num;
}
static u32 ec_i2c_functionality(struct i2c_adapter *adap)
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c
index 03f1e5549896..9604024e0eb0 100644
--- a/drivers/i2c/busses/i2c-digicolor.c
+++ b/drivers/i2c/busses/i2c-digicolor.c
@@ -12,11 +12,10 @@
#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 56fceff6ba14..3e84f6c090a5 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -913,7 +913,7 @@ static void __exit mxs_i2c_exit(void)
module_exit(mxs_i2c_exit);
MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
-MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("MXS I2C Bus Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 6336f02ec566..3bd2e7d06e4b 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -285,6 +285,6 @@ static struct platform_driver i2c_pca_pf_driver = {
module_platform_driver(i2c_pca_pf_driver);
-MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("I2C-PCA9564/PCA9665 platform driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 5f96b1b3e3a5..019d5426fe52 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -833,7 +833,7 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
clk_disable(i2c->clk);
spin_unlock_irqrestore(&i2c->lock, flags);
- return ret;
+ return ret < 0 ? ret : num;
}
static u32 rk3x_i2c_func(struct i2c_adapter *adap)
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 88057fad9dfe..ea72dca32fdf 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -10,17 +10,18 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
#include <linux/clk.h>
-#include <linux/io.h>
#include <linux/delay.h>
-#include <linux/interrupt.h>
#include <linux/err.h>
-#include <linux/of.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
/* SSC registers */
#define SSC_BRG 0x000
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 098f698fe8f4..987c124432c5 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1413,6 +1413,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
+ pm_runtime_no_callbacks(&adap->dev);
+
#ifdef CONFIG_I2C_COMPAT
res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev,
adap->dev.parent);
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 593f7ca9adc7..06cc1ff088f1 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -32,8 +32,9 @@ struct i2c_mux_priv {
struct i2c_algorithm algo;
struct i2c_adapter *parent;
- void *mux_priv; /* the mux chip/device */
- u32 chan_id; /* the channel id */
+ struct device *mux_dev;
+ void *mux_priv;
+ u32 chan_id;
int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
@@ -119,6 +120,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
/* Set up private adapter data */
priv->parent = parent;
+ priv->mux_dev = mux_dev;
priv->mux_priv = mux_priv;
priv->chan_id = chan_id;
priv->select = select;
@@ -203,7 +205,7 @@ void i2c_del_mux_adapter(struct i2c_adapter *adap)
char symlink_name[20];
snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id);
- sysfs_remove_link(&adap->dev.parent->kobj, symlink_name);
+ sysfs_remove_link(&priv->mux_dev->kobj, symlink_name);
sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
i2c_del_adapter(adap);
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 8c014b5dab4c..38acb3cfc545 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -99,12 +99,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
/*
* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
*/
- if ((PAGE_ALIGN(addr + size) <= size) ||
- (PAGE_ALIGN(addr + size) <= addr))
+ if (((addr + size) < addr) ||
+ PAGE_ALIGN(addr + size) < (addr + size))
return ERR_PTR(-EINVAL);
if (!can_do_mlock())
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 259dcc7779f5..88cce9bb72fe 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -246,6 +246,17 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uqp);
}
+ list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
+ struct ib_srq *srq = uobj->object;
+ struct ib_uevent_object *uevent =
+ container_of(uobj, struct ib_uevent_object, uobject);
+
+ idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
+ ib_destroy_srq(srq);
+ ib_uverbs_release_uevent(file, uevent);
+ kfree(uevent);
+ }
+
list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {
struct ib_cq *cq = uobj->object;
struct ib_uverbs_event_file *ev_file = cq->cq_context;
@@ -258,17 +269,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(ucq);
}
- list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) {
- struct ib_srq *srq = uobj->object;
- struct ib_uevent_object *uevent =
- container_of(uobj, struct ib_uevent_object, uobject);
-
- idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
- ib_destroy_srq(srq);
- ib_uverbs_release_uevent(file, uevent);
- kfree(uevent);
- }
-
list_for_each_entry_safe(uobj, tmp, &context->mr_list, list) {
struct ib_mr *mr = uobj->object;
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 33c45dfcbd88..1ca8e32a9592 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -82,14 +82,14 @@ static int create_file(const char *name, umode_t mode,
{
int error;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry))
- error = ipathfs_mknod(parent->d_inode, *dentry,
+ error = ipathfs_mknod(d_inode(parent), *dentry,
mode, fops, data);
else
error = PTR_ERR(*dentry);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
return error;
}
@@ -277,11 +277,11 @@ static int remove_file(struct dentry *parent, char *name)
}
spin_lock(&tmp->d_lock);
- if (!d_unhashed(tmp) && tmp->d_inode) {
+ if (!d_unhashed(tmp) && d_really_is_positive(tmp)) {
dget_dlock(tmp);
__d_drop(tmp);
spin_unlock(&tmp->d_lock);
- simple_unlink(parent->d_inode, tmp);
+ simple_unlink(d_inode(parent), tmp);
} else
spin_unlock(&tmp->d_lock);
@@ -302,7 +302,7 @@ static int remove_device_files(struct super_block *sb,
int ret;
root = dget(sb->s_root);
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
dir = lookup_one_len(unit, root, strlen(unit));
@@ -315,10 +315,10 @@ static int remove_device_files(struct super_block *sb,
remove_file(dir, "flash");
remove_file(dir, "atomic_counters");
d_delete(dir);
- ret = simple_rmdir(root->d_inode, dir);
+ ret = simple_rmdir(d_inode(root), dir);
bail:
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
dput(root);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index a31e031afd87..0f00204d2ece 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -58,14 +58,19 @@ struct mlx4_alias_guid_work_context {
int query_id;
struct list_head list;
int block_num;
+ ib_sa_comp_mask guid_indexes;
+ u8 method;
};
struct mlx4_next_alias_guid_work {
u8 port;
u8 block_num;
+ u8 method;
struct mlx4_sriov_alias_guid_info_rec_det rec_det;
};
+static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
+ int *resched_delay_sec);
void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev, int block_num,
u8 port_num, u8 *p_data)
@@ -118,6 +123,57 @@ ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index)
return IB_SA_COMP_MASK(4 + index);
}
+void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
+ int port, int slave_init)
+{
+ __be64 curr_guid, required_guid;
+ int record_num = slave / 8;
+ int index = slave % 8;
+ int port_index = port - 1;
+ unsigned long flags;
+ int do_work = 0;
+
+ spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
+ if (dev->sriov.alias_guid.ports_guid[port_index].state_flags &
+ GUID_STATE_NEED_PORT_INIT)
+ goto unlock;
+ if (!slave_init) {
+ curr_guid = *(__be64 *)&dev->sriov.
+ alias_guid.ports_guid[port_index].
+ all_rec_per_port[record_num].
+ all_recs[GUID_REC_SIZE * index];
+ if (curr_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL) ||
+ !curr_guid)
+ goto unlock;
+ required_guid = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
+ } else {
+ required_guid = mlx4_get_admin_guid(dev->dev, slave, port);
+ if (required_guid == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
+ goto unlock;
+ }
+ *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index].
+ all_rec_per_port[record_num].
+ all_recs[GUID_REC_SIZE * index] = required_guid;
+ dev->sriov.alias_guid.ports_guid[port_index].
+ all_rec_per_port[record_num].guid_indexes
+ |= mlx4_ib_get_aguid_comp_mask_from_ix(index);
+ dev->sriov.alias_guid.ports_guid[port_index].
+ all_rec_per_port[record_num].status
+ = MLX4_GUID_INFO_STATUS_IDLE;
+ /* set to run immediately */
+ dev->sriov.alias_guid.ports_guid[port_index].
+ all_rec_per_port[record_num].time_to_run = 0;
+ dev->sriov.alias_guid.ports_guid[port_index].
+ all_rec_per_port[record_num].
+ guids_retry_schedule[index] = 0;
+ do_work = 1;
+unlock:
+ spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
+
+ if (do_work)
+ mlx4_ib_init_alias_guid_work(dev, port_index);
+}
+
/*
* Whenever new GUID is set/unset (guid table change) create event and
* notify the relevant slave (master also should be notified).
@@ -138,10 +194,15 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
enum slave_port_state prev_state;
__be64 tmp_cur_ag, form_cache_ag;
enum slave_port_gen_event gen_event;
+ struct mlx4_sriov_alias_guid_info_rec_det *rec;
+ unsigned long flags;
+ __be64 required_value;
if (!mlx4_is_master(dev->dev))
return;
+ rec = &dev->sriov.alias_guid.ports_guid[port_num - 1].
+ all_rec_per_port[block_num];
guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.
ports_guid[port_num - 1].
all_rec_per_port[block_num].guid_indexes);
@@ -166,8 +227,27 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
*/
if (tmp_cur_ag != form_cache_ag)
continue;
- mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
+ spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
+ required_value = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
+
+ if (required_value == cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
+ required_value = 0;
+
+ if (tmp_cur_ag == required_value) {
+ rec->guid_indexes = rec->guid_indexes &
+ ~mlx4_ib_get_aguid_comp_mask_from_ix(i);
+ } else {
+ /* may notify port down if value is 0 */
+ if (tmp_cur_ag != MLX4_NOT_SET_GUID) {
+ spin_unlock_irqrestore(&dev->sriov.
+ alias_guid.ag_work_lock, flags);
+ continue;
+ }
+ }
+ spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock,
+ flags);
+ mlx4_gen_guid_change_eqe(dev->dev, slave_id, port_num);
/*2 cases: Valid GUID, and Invalid Guid*/
if (tmp_cur_ag != MLX4_NOT_SET_GUID) { /*valid GUID*/
@@ -188,10 +268,14 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
set_and_calc_slave_port_state(dev->dev, slave_id, port_num,
MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
&gen_event);
- pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
- slave_id, port_num);
- mlx4_gen_port_state_change_eqe(dev->dev, slave_id, port_num,
- MLX4_PORT_CHANGE_SUBTYPE_DOWN);
+ if (gen_event == SLAVE_PORT_GEN_EVENT_DOWN) {
+ pr_debug("sending PORT DOWN event to slave: %d, port: %d\n",
+ slave_id, port_num);
+ mlx4_gen_port_state_change_eqe(dev->dev,
+ slave_id,
+ port_num,
+ MLX4_PORT_CHANGE_SUBTYPE_DOWN);
+ }
}
}
}
@@ -206,6 +290,9 @@ static void aliasguid_query_handler(int status,
int i;
struct mlx4_sriov_alias_guid_info_rec_det *rec;
unsigned long flags, flags1;
+ ib_sa_comp_mask declined_guid_indexes = 0;
+ ib_sa_comp_mask applied_guid_indexes = 0;
+ unsigned int resched_delay_sec = 0;
if (!context)
return;
@@ -216,9 +303,9 @@ static void aliasguid_query_handler(int status,
all_rec_per_port[cb_ctx->block_num];
if (status) {
- rec->status = MLX4_GUID_INFO_STATUS_IDLE;
pr_debug("(port: %d) failed: status = %d\n",
cb_ctx->port, status);
+ rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC;
goto out;
}
@@ -235,57 +322,101 @@ static void aliasguid_query_handler(int status,
rec = &dev->sriov.alias_guid.ports_guid[port_index].
all_rec_per_port[guid_rec->block_num];
- rec->status = MLX4_GUID_INFO_STATUS_SET;
- rec->method = MLX4_GUID_INFO_RECORD_SET;
-
+ spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
for (i = 0 ; i < NUM_ALIAS_GUID_IN_REC; i++) {
- __be64 tmp_cur_ag;
- tmp_cur_ag = *(__be64 *)&guid_rec->guid_info_list[i * GUID_REC_SIZE];
+ __be64 sm_response, required_val;
+
+ if (!(cb_ctx->guid_indexes &
+ mlx4_ib_get_aguid_comp_mask_from_ix(i)))
+ continue;
+ sm_response = *(__be64 *)&guid_rec->guid_info_list
+ [i * GUID_REC_SIZE];
+ required_val = *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE];
+ if (cb_ctx->method == MLX4_GUID_INFO_RECORD_DELETE) {
+ if (required_val ==
+ cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
+ goto next_entry;
+
+ /* A new value was set till we got the response */
+ pr_debug("need to set new value %llx, record num %d, block_num:%d\n",
+ be64_to_cpu(required_val),
+ i, guid_rec->block_num);
+ goto entry_declined;
+ }
+
/* check if the SM didn't assign one of the records.
- * if it didn't, if it was not sysadmin request:
- * ask the SM to give a new GUID, (instead of the driver request).
+ * if it didn't, re-ask for.
*/
- if (tmp_cur_ag == MLX4_NOT_SET_GUID) {
- mlx4_ib_warn(&dev->ib_dev, "%s:Record num %d in "
- "block_num: %d was declined by SM, "
- "ownership by %d (0 = driver, 1=sysAdmin,"
- " 2=None)\n", __func__, i,
- guid_rec->block_num, rec->ownership);
- if (rec->ownership == MLX4_GUID_DRIVER_ASSIGN) {
- /* if it is driver assign, asks for new GUID from SM*/
- *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
- MLX4_NOT_SET_GUID;
-
- /* Mark the record as not assigned, and let it
- * be sent again in the next work sched.*/
- rec->status = MLX4_GUID_INFO_STATUS_IDLE;
- rec->guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
- }
+ if (sm_response == MLX4_NOT_SET_GUID) {
+ if (rec->guids_retry_schedule[i] == 0)
+ mlx4_ib_warn(&dev->ib_dev,
+ "%s:Record num %d in block_num: %d was declined by SM\n",
+ __func__, i,
+ guid_rec->block_num);
+ goto entry_declined;
} else {
/* properly assigned record. */
/* We save the GUID we just got from the SM in the
* admin_guid in order to be persistent, and in the
* request from the sm the process will ask for the same GUID */
- if (rec->ownership == MLX4_GUID_SYSADMIN_ASSIGN &&
- tmp_cur_ag != *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE]) {
- /* the sysadmin assignment failed.*/
- mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
- " admin guid after SysAdmin "
- "configuration. "
- "Record num %d in block_num:%d "
- "was declined by SM, "
- "new val(0x%llx) was kept\n",
- __func__, i,
- guid_rec->block_num,
- be64_to_cpu(*(__be64 *) &
- rec->all_recs[i * GUID_REC_SIZE]));
+ if (required_val &&
+ sm_response != required_val) {
+ /* Warn only on first retry */
+ if (rec->guids_retry_schedule[i] == 0)
+ mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set"
+ " admin guid after SysAdmin "
+ "configuration. "
+ "Record num %d in block_num:%d "
+ "was declined by SM, "
+ "new val(0x%llx) was kept, SM returned (0x%llx)\n",
+ __func__, i,
+ guid_rec->block_num,
+ be64_to_cpu(required_val),
+ be64_to_cpu(sm_response));
+ goto entry_declined;
} else {
- memcpy(&rec->all_recs[i * GUID_REC_SIZE],
- &guid_rec->guid_info_list[i * GUID_REC_SIZE],
- GUID_REC_SIZE);
+ *(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] =
+ sm_response;
+ if (required_val == 0)
+ mlx4_set_admin_guid(dev->dev,
+ sm_response,
+ (guid_rec->block_num
+ * NUM_ALIAS_GUID_IN_REC) + i,
+ cb_ctx->port);
+ goto next_entry;
}
}
+entry_declined:
+ declined_guid_indexes |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
+ rec->guids_retry_schedule[i] =
+ (rec->guids_retry_schedule[i] == 0) ? 1 :
+ min((unsigned int)60,
+ rec->guids_retry_schedule[i] * 2);
+ /* using the minimum value among all entries in that record */
+ resched_delay_sec = (resched_delay_sec == 0) ?
+ rec->guids_retry_schedule[i] :
+ min(resched_delay_sec,
+ rec->guids_retry_schedule[i]);
+ continue;
+
+next_entry:
+ rec->guids_retry_schedule[i] = 0;
}
+
+ applied_guid_indexes = cb_ctx->guid_indexes & ~declined_guid_indexes;
+ if (declined_guid_indexes ||
+ rec->guid_indexes & ~(applied_guid_indexes)) {
+ pr_debug("record=%d wasn't fully set, guid_indexes=0x%llx applied_indexes=0x%llx, declined_indexes=0x%llx\n",
+ guid_rec->block_num,
+ be64_to_cpu((__force __be64)rec->guid_indexes),
+ be64_to_cpu((__force __be64)applied_guid_indexes),
+ be64_to_cpu((__force __be64)declined_guid_indexes));
+ rec->time_to_run = ktime_get_real_ns() +
+ resched_delay_sec * NSEC_PER_SEC;
+ } else {
+ rec->status = MLX4_GUID_INFO_STATUS_SET;
+ }
+ spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
/*
The func is call here to close the cases when the
sm doesn't send smp, so in the sa response the driver
@@ -297,10 +428,13 @@ static void aliasguid_query_handler(int status,
out:
spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
- if (!dev->sriov.is_going_down)
+ if (!dev->sriov.is_going_down) {
+ get_low_record_time_index(dev, port_index, &resched_delay_sec);
queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq,
&dev->sriov.alias_guid.ports_guid[port_index].
- alias_guid_work, 0);
+ alias_guid_work,
+ msecs_to_jiffies(resched_delay_sec * 1000));
+ }
if (cb_ctx->sa_query) {
list_del(&cb_ctx->list);
kfree(cb_ctx);
@@ -317,9 +451,7 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
ib_sa_comp_mask comp_mask = 0;
dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status
- = MLX4_GUID_INFO_STATUS_IDLE;
- dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].method
- = MLX4_GUID_INFO_RECORD_SET;
+ = MLX4_GUID_INFO_STATUS_SET;
/* calculate the comp_mask for that record.*/
for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
@@ -333,19 +465,21 @@ static void invalidate_guid_record(struct mlx4_ib_dev *dev, u8 port, int index)
need to assign GUIDs, then don't put it up for assignment.
*/
if (MLX4_GUID_FOR_DELETE_VAL == cur_admin_val ||
- (!index && !i) ||
- MLX4_GUID_NONE_ASSIGN == dev->sriov.alias_guid.
- ports_guid[port - 1].all_rec_per_port[index].ownership)
+ (!index && !i))
continue;
comp_mask |= mlx4_ib_get_aguid_comp_mask_from_ix(i);
}
dev->sriov.alias_guid.ports_guid[port - 1].
- all_rec_per_port[index].guid_indexes = comp_mask;
+ all_rec_per_port[index].guid_indexes |= comp_mask;
+ if (dev->sriov.alias_guid.ports_guid[port - 1].
+ all_rec_per_port[index].guid_indexes)
+ dev->sriov.alias_guid.ports_guid[port - 1].
+ all_rec_per_port[index].status = MLX4_GUID_INFO_STATUS_IDLE;
+
}
static int set_guid_rec(struct ib_device *ibdev,
- u8 port, int index,
- struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
+ struct mlx4_next_alias_guid_work *rec)
{
int err;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
@@ -354,6 +488,9 @@ static int set_guid_rec(struct ib_device *ibdev,
struct ib_port_attr attr;
struct mlx4_alias_guid_work_context *callback_context;
unsigned long resched_delay, flags, flags1;
+ u8 port = rec->port + 1;
+ int index = rec->block_num;
+ struct mlx4_sriov_alias_guid_info_rec_det *rec_det = &rec->rec_det;
struct list_head *head =
&dev->sriov.alias_guid.ports_guid[port - 1].cb_list;
@@ -380,6 +517,8 @@ static int set_guid_rec(struct ib_device *ibdev,
callback_context->port = port;
callback_context->dev = dev;
callback_context->block_num = index;
+ callback_context->guid_indexes = rec_det->guid_indexes;
+ callback_context->method = rec->method;
memset(&guid_info_rec, 0, sizeof (struct ib_sa_guidinfo_rec));
@@ -399,7 +538,7 @@ static int set_guid_rec(struct ib_device *ibdev,
callback_context->query_id =
ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client,
ibdev, port, &guid_info_rec,
- comp_mask, rec_det->method, 1000,
+ comp_mask, rec->method, 1000,
GFP_KERNEL, aliasguid_query_handler,
callback_context,
&callback_context->sa_query);
@@ -434,6 +573,30 @@ out:
return err;
}
+static void mlx4_ib_guid_port_init(struct mlx4_ib_dev *dev, int port)
+{
+ int j, k, entry;
+ __be64 guid;
+
+ /*Check if the SM doesn't need to assign the GUIDs*/
+ for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
+ for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
+ entry = j * NUM_ALIAS_GUID_IN_REC + k;
+ /* no request for the 0 entry (hw guid) */
+ if (!entry || entry > dev->dev->persist->num_vfs ||
+ !mlx4_is_slave_active(dev->dev, entry))
+ continue;
+ guid = mlx4_get_admin_guid(dev->dev, entry, port);
+ *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1].
+ all_rec_per_port[j].all_recs
+ [GUID_REC_SIZE * k] = guid;
+ pr_debug("guid was set, entry=%d, val=0x%llx, port=%d\n",
+ entry,
+ be64_to_cpu(guid),
+ port);
+ }
+ }
+}
void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
{
int i;
@@ -443,6 +606,13 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
+
+ if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags &
+ GUID_STATE_NEED_PORT_INIT) {
+ mlx4_ib_guid_port_init(dev, port);
+ dev->sriov.alias_guid.ports_guid[port - 1].state_flags &=
+ (~GUID_STATE_NEED_PORT_INIT);
+ }
for (i = 0; i < NUM_ALIAS_GUID_REC_IN_PORT; i++)
invalidate_guid_record(dev, port, i);
@@ -462,60 +632,107 @@ void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port)
spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
}
-/* The function returns the next record that was
- * not configured (or failed to be configured) */
-static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
- struct mlx4_next_alias_guid_work *rec)
+static void set_required_record(struct mlx4_ib_dev *dev, u8 port,
+ struct mlx4_next_alias_guid_work *next_rec,
+ int record_index)
{
- int j;
- unsigned long flags;
+ int i;
+ int lowset_time_entry = -1;
+ int lowest_time = 0;
+ ib_sa_comp_mask delete_guid_indexes = 0;
+ ib_sa_comp_mask set_guid_indexes = 0;
+ struct mlx4_sriov_alias_guid_info_rec_det *rec =
+ &dev->sriov.alias_guid.ports_guid[port].
+ all_rec_per_port[record_index];
- for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
- spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
- if (dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status ==
- MLX4_GUID_INFO_STATUS_IDLE) {
- memcpy(&rec->rec_det,
- &dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j],
- sizeof (struct mlx4_sriov_alias_guid_info_rec_det));
- rec->port = port;
- rec->block_num = j;
- dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[j].status =
- MLX4_GUID_INFO_STATUS_PENDING;
- spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
- return 0;
+ for (i = 0; i < NUM_ALIAS_GUID_IN_REC; i++) {
+ if (!(rec->guid_indexes &
+ mlx4_ib_get_aguid_comp_mask_from_ix(i)))
+ continue;
+
+ if (*(__be64 *)&rec->all_recs[i * GUID_REC_SIZE] ==
+ cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL))
+ delete_guid_indexes |=
+ mlx4_ib_get_aguid_comp_mask_from_ix(i);
+ else
+ set_guid_indexes |=
+ mlx4_ib_get_aguid_comp_mask_from_ix(i);
+
+ if (lowset_time_entry == -1 || rec->guids_retry_schedule[i] <=
+ lowest_time) {
+ lowset_time_entry = i;
+ lowest_time = rec->guids_retry_schedule[i];
}
- spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
}
- return -ENOENT;
+
+ memcpy(&next_rec->rec_det, rec, sizeof(*rec));
+ next_rec->port = port;
+ next_rec->block_num = record_index;
+
+ if (*(__be64 *)&rec->all_recs[lowset_time_entry * GUID_REC_SIZE] ==
+ cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL)) {
+ next_rec->rec_det.guid_indexes = delete_guid_indexes;
+ next_rec->method = MLX4_GUID_INFO_RECORD_DELETE;
+ } else {
+ next_rec->rec_det.guid_indexes = set_guid_indexes;
+ next_rec->method = MLX4_GUID_INFO_RECORD_SET;
+ }
}
-static void set_administratively_guid_record(struct mlx4_ib_dev *dev, int port,
- int rec_index,
- struct mlx4_sriov_alias_guid_info_rec_det *rec_det)
+/* return index of record that should be updated based on lowest
+ * rescheduled time
+ */
+static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
+ int *resched_delay_sec)
{
- dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].guid_indexes =
- rec_det->guid_indexes;
- memcpy(dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].all_recs,
- rec_det->all_recs, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
- dev->sriov.alias_guid.ports_guid[port].all_rec_per_port[rec_index].status =
- rec_det->status;
+ int record_index = -1;
+ u64 low_record_time = 0;
+ struct mlx4_sriov_alias_guid_info_rec_det rec;
+ int j;
+
+ for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
+ rec = dev->sriov.alias_guid.ports_guid[port].
+ all_rec_per_port[j];
+ if (rec.status == MLX4_GUID_INFO_STATUS_IDLE &&
+ rec.guid_indexes) {
+ if (record_index == -1 ||
+ rec.time_to_run < low_record_time) {
+ record_index = j;
+ low_record_time = rec.time_to_run;
+ }
+ }
+ }
+ if (resched_delay_sec) {
+ u64 curr_time = ktime_get_real_ns();
+
+ *resched_delay_sec = (low_record_time < curr_time) ? 0 :
+ div_u64((low_record_time - curr_time), NSEC_PER_SEC);
+ }
+
+ return record_index;
}
-static void set_all_slaves_guids(struct mlx4_ib_dev *dev, int port)
+/* The function returns the next record that was
+ * not configured (or failed to be configured) */
+static int get_next_record_to_update(struct mlx4_ib_dev *dev, u8 port,
+ struct mlx4_next_alias_guid_work *rec)
{
- int j;
- struct mlx4_sriov_alias_guid_info_rec_det rec_det ;
-
- for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT ; j++) {
- memset(rec_det.all_recs, 0, NUM_ALIAS_GUID_IN_REC * GUID_REC_SIZE);
- rec_det.guid_indexes = (!j ? 0 : IB_SA_GUIDINFO_REC_GID0) |
- IB_SA_GUIDINFO_REC_GID1 | IB_SA_GUIDINFO_REC_GID2 |
- IB_SA_GUIDINFO_REC_GID3 | IB_SA_GUIDINFO_REC_GID4 |
- IB_SA_GUIDINFO_REC_GID5 | IB_SA_GUIDINFO_REC_GID6 |
- IB_SA_GUIDINFO_REC_GID7;
- rec_det.status = MLX4_GUID_INFO_STATUS_IDLE;
- set_administratively_guid_record(dev, port, j, &rec_det);
+ unsigned long flags;
+ int record_index;
+ int ret = 0;
+
+ spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags);
+ record_index = get_low_record_time_index(dev, port, NULL);
+
+ if (record_index < 0) {
+ ret = -ENOENT;
+ goto out;
}
+
+ set_required_record(dev, port, rec, record_index);
+out:
+ spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags);
+ return ret;
}
static void alias_guid_work(struct work_struct *work)
@@ -545,9 +762,7 @@ static void alias_guid_work(struct work_struct *work)
goto out;
}
- set_guid_rec(&dev->ib_dev, rec->port + 1, rec->block_num,
- &rec->rec_det);
-
+ set_guid_rec(&dev->ib_dev, rec);
out:
kfree(rec);
}
@@ -562,6 +777,12 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
if (!dev->sriov.is_going_down) {
+ /* If there is pending one should cancell then run, otherwise
+ * won't run till previous one is ended as same work
+ * struct is used.
+ */
+ cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port].
+ alias_guid_work);
queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq,
&dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0);
}
@@ -609,7 +830,7 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
{
char alias_wq_name[15];
int ret = 0;
- int i, j, k;
+ int i, j;
union ib_gid gid;
if (!mlx4_is_master(dev->dev))
@@ -633,33 +854,25 @@ int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev)
for (i = 0 ; i < dev->num_ports; i++) {
memset(&dev->sriov.alias_guid.ports_guid[i], 0,
sizeof (struct mlx4_sriov_alias_guid_port_rec_det));
- /*Check if the SM doesn't need to assign the GUIDs*/
+ dev->sriov.alias_guid.ports_guid[i].state_flags |=
+ GUID_STATE_NEED_PORT_INIT;
for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) {
- if (mlx4_ib_sm_guid_assign) {
- dev->sriov.alias_guid.ports_guid[i].
- all_rec_per_port[j].
- ownership = MLX4_GUID_DRIVER_ASSIGN;
- continue;
- }
- dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j].
- ownership = MLX4_GUID_NONE_ASSIGN;
- /*mark each val as it was deleted,
- till the sysAdmin will give it valid val*/
- for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) {
- *(__be64 *)&dev->sriov.alias_guid.ports_guid[i].
- all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] =
- cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL);
- }
+ /* mark each val as it was deleted */
+ memset(dev->sriov.alias_guid.ports_guid[i].
+ all_rec_per_port[j].all_recs, 0xFF,
+ sizeof(dev->sriov.alias_guid.ports_guid[i].
+ all_rec_per_port[j].all_recs));
}
INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list);
/*prepare the records, set them to be allocated by sm*/
+ if (mlx4_ib_sm_guid_assign)
+ for (j = 1; j < NUM_ALIAS_GUID_PER_PORT; j++)
+ mlx4_set_admin_guid(dev->dev, 0, j, i + 1);
for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++)
invalidate_guid_record(dev, i + 1, j);
dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid;
dev->sriov.alias_guid.ports_guid[i].port = i;
- if (mlx4_ib_sm_guid_assign)
- set_all_slaves_guids(dev, i);
snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i);
dev->sriov.alias_guid.ports_guid[i].wq =
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 59040265e361..9cd2b002d7ae 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1430,6 +1430,10 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
tun_qp->ring[i].addr,
rx_buf_size,
DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
+ kfree(tun_qp->ring[i].addr);
+ goto err;
+ }
}
for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
@@ -1442,6 +1446,11 @@ static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
tun_qp->tx_ring[i].buf.addr,
tx_buf_size,
DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(ctx->ib_dev,
+ tun_qp->tx_ring[i].buf.map)) {
+ kfree(tun_qp->tx_ring[i].buf.addr);
+ goto tx_err;
+ }
tun_qp->tx_ring[i].ah = NULL;
}
spin_lock_init(&tun_qp->tx_lock);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 976bea794b5f..57070c529dfb 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -66,9 +66,9 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
-int mlx4_ib_sm_guid_assign = 1;
+int mlx4_ib_sm_guid_assign = 0;
module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
-MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
+MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
static const char mlx4_ib_version[] =
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
@@ -2791,9 +2791,31 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_SLAVE_INIT:
/* here, p is the slave id */
do_slave_init(ibdev, p, 1);
+ if (mlx4_is_master(dev)) {
+ int i;
+
+ for (i = 1; i <= ibdev->num_ports; i++) {
+ if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
+ == IB_LINK_LAYER_INFINIBAND)
+ mlx4_ib_slave_alias_guid_event(ibdev,
+ p, i,
+ 1);
+ }
+ }
return;
case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
+ if (mlx4_is_master(dev)) {
+ int i;
+
+ for (i = 1; i <= ibdev->num_ports; i++) {
+ if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
+ == IB_LINK_LAYER_INFINIBAND)
+ mlx4_ib_slave_alias_guid_event(ibdev,
+ p, i,
+ 0);
+ }
+ }
/* here, p is the slave id */
do_slave_init(ibdev, p, 0);
return;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f829fd935b79..fce3934372a1 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -342,14 +342,9 @@ struct mlx4_ib_ah {
enum mlx4_guid_alias_rec_status {
MLX4_GUID_INFO_STATUS_IDLE,
MLX4_GUID_INFO_STATUS_SET,
- MLX4_GUID_INFO_STATUS_PENDING,
};
-enum mlx4_guid_alias_rec_ownership {
- MLX4_GUID_DRIVER_ASSIGN,
- MLX4_GUID_SYSADMIN_ASSIGN,
- MLX4_GUID_NONE_ASSIGN, /*init state of each record*/
-};
+#define GUID_STATE_NEED_PORT_INIT 0x01
enum mlx4_guid_alias_rec_method {
MLX4_GUID_INFO_RECORD_SET = IB_MGMT_METHOD_SET,
@@ -360,8 +355,8 @@ struct mlx4_sriov_alias_guid_info_rec_det {
u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
- u8 method; /*set or delete*/
- enum mlx4_guid_alias_rec_ownership ownership; /*indicates who assign that alias_guid record*/
+ unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC];
+ u64 time_to_run;
};
struct mlx4_sriov_alias_guid_port_rec_det {
@@ -369,6 +364,7 @@ struct mlx4_sriov_alias_guid_port_rec_det {
struct workqueue_struct *wq;
struct delayed_work alias_guid_work;
u8 port;
+ u32 state_flags;
struct mlx4_sriov_alias_guid *parent;
struct list_head cb_list;
};
@@ -802,6 +798,8 @@ int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
struct attribute *attr);
ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
+void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
+ int port, int slave_init);
int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ed2bd6701f9b..02fc91c68027 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -566,6 +566,10 @@ static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp)
ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr,
sizeof (struct mlx4_ib_proxy_sqp_hdr),
DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) {
+ kfree(qp->sqp_proxy_rcv[i].addr);
+ goto err;
+ }
}
return 0;
@@ -2605,8 +2609,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
- wr->wr.ud.hlen);
+ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
*lso_seg_len = halign;
return 0;
}
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index d10c2b8a5dad..6797108ce873 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -46,21 +46,17 @@
static ssize_t show_admin_alias_guid(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int record_num;/*0-15*/
- int guid_index_in_rec; /*0 - 7*/
struct mlx4_ib_iov_sysfs_attr *mlx4_ib_iov_dentry =
container_of(attr, struct mlx4_ib_iov_sysfs_attr, dentry);
struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
struct mlx4_ib_dev *mdev = port->dev;
+ __be64 sysadmin_ag_val;
- record_num = mlx4_ib_iov_dentry->entry_num / 8 ;
- guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8 ;
+ sysadmin_ag_val = mlx4_get_admin_guid(mdev->dev,
+ mlx4_ib_iov_dentry->entry_num,
+ port->num);
- return sprintf(buf, "%llx\n",
- be64_to_cpu(*(__be64 *)&mdev->sriov.alias_guid.
- ports_guid[port->num - 1].
- all_rec_per_port[record_num].
- all_recs[8 * guid_index_in_rec]));
+ return sprintf(buf, "%llx\n", be64_to_cpu(sysadmin_ag_val));
}
/* store_admin_alias_guid stores the (new) administratively assigned value of that GUID.
@@ -80,6 +76,7 @@ static ssize_t store_admin_alias_guid(struct device *dev,
struct mlx4_ib_iov_port *port = mlx4_ib_iov_dentry->ctx;
struct mlx4_ib_dev *mdev = port->dev;
u64 sysadmin_ag_val;
+ unsigned long flags;
record_num = mlx4_ib_iov_dentry->entry_num / 8;
guid_index_in_rec = mlx4_ib_iov_dentry->entry_num % 8;
@@ -87,6 +84,7 @@ static ssize_t store_admin_alias_guid(struct device *dev,
pr_err("GUID 0 block 0 is RO\n");
return count;
}
+ spin_lock_irqsave(&mdev->sriov.alias_guid.ag_work_lock, flags);
sscanf(buf, "%llx", &sysadmin_ag_val);
*(__be64 *)&mdev->sriov.alias_guid.ports_guid[port->num - 1].
all_rec_per_port[record_num].
@@ -96,33 +94,15 @@ static ssize_t store_admin_alias_guid(struct device *dev,
/* Change the state to be pending for update */
mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].status
= MLX4_GUID_INFO_STATUS_IDLE ;
-
- mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method
- = MLX4_GUID_INFO_RECORD_SET;
-
- switch (sysadmin_ag_val) {
- case MLX4_GUID_FOR_DELETE_VAL:
- mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].method
- = MLX4_GUID_INFO_RECORD_DELETE;
- mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
- = MLX4_GUID_SYSADMIN_ASSIGN;
- break;
- /* The sysadmin requests the SM to re-assign */
- case MLX4_NOT_SET_GUID:
- mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
- = MLX4_GUID_DRIVER_ASSIGN;
- break;
- /* The sysadmin requests a specific value.*/
- default:
- mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].ownership
- = MLX4_GUID_SYSADMIN_ASSIGN;
- break;
- }
+ mlx4_set_admin_guid(mdev->dev, cpu_to_be64(sysadmin_ag_val),
+ mlx4_ib_iov_dentry->entry_num,
+ port->num);
/* set the record index */
mdev->sriov.alias_guid.ports_guid[port->num - 1].all_rec_per_port[record_num].guid_indexes
- = mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec);
+ |= mlx4_ib_get_aguid_comp_mask_from_ix(guid_index_in_rec);
+ spin_unlock_irqrestore(&mdev->sriov.alias_guid.ag_work_lock, flags);
mlx4_ib_init_alias_guid_work(mdev, port->num - 1);
return count;
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 650897a8591e..bdd5d3857203 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -89,14 +89,14 @@ static int create_file(const char *name, umode_t mode,
{
int error;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry))
- error = qibfs_mknod(parent->d_inode, *dentry,
+ error = qibfs_mknod(d_inode(parent), *dentry,
mode, fops, data);
else
error = PTR_ERR(*dentry);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
return error;
}
@@ -455,10 +455,10 @@ static int remove_file(struct dentry *parent, char *name)
}
spin_lock(&tmp->d_lock);
- if (!d_unhashed(tmp) && tmp->d_inode) {
+ if (!d_unhashed(tmp) && d_really_is_positive(tmp)) {
__d_drop(tmp);
spin_unlock(&tmp->d_lock);
- simple_unlink(parent->d_inode, tmp);
+ simple_unlink(d_inode(parent), tmp);
} else {
spin_unlock(&tmp->d_lock);
}
@@ -481,7 +481,7 @@ static int remove_device_files(struct super_block *sb,
int ret, i;
root = dget(sb->s_root);
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
snprintf(unit, sizeof(unit), "%u", dd->unit);
dir = lookup_one_len(unit, root, strlen(unit));
@@ -491,7 +491,7 @@ static int remove_device_files(struct super_block *sb,
goto bail;
}
- mutex_lock(&dir->d_inode->i_mutex);
+ mutex_lock(&d_inode(dir)->i_mutex);
remove_file(dir, "counters");
remove_file(dir, "counter_names");
remove_file(dir, "portcounter_names");
@@ -506,13 +506,13 @@ static int remove_device_files(struct super_block *sb,
}
}
remove_file(dir, "flash");
- mutex_unlock(&dir->d_inode->i_mutex);
- ret = simple_rmdir(root->d_inode, dir);
+ mutex_unlock(&d_inode(dir)->i_mutex);
+ ret = simple_rmdir(d_inode(root), dir);
d_delete(dir);
dput(dir);
bail:
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
dput(root);
return ret;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index d7562beb5423..bd94b0a6e9e5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -87,7 +87,6 @@ enum {
IPOIB_FLAG_ADMIN_UP = 2,
IPOIB_PKEY_ASSIGNED = 3,
IPOIB_FLAG_SUBINTERFACE = 5,
- IPOIB_MCAST_RUN = 6,
IPOIB_STOP_REAPER = 7,
IPOIB_FLAG_ADMIN_CM = 9,
IPOIB_FLAG_UMCAST = 10,
@@ -98,9 +97,15 @@ enum {
IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
IPOIB_MCAST_FLAG_SENDONLY = 1,
- IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
+ /*
+ * For IPOIB_MCAST_FLAG_BUSY
+ * When set, in flight join and mcast->mc is unreliable
+ * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
+ * haven't started yet
+ * When clear and mcast->mc is valid pointer, join was successful
+ */
+ IPOIB_MCAST_FLAG_BUSY = 2,
IPOIB_MCAST_FLAG_ATTACHED = 3,
- IPOIB_MCAST_JOIN_STARTED = 4,
MAX_SEND_CQE = 16,
IPOIB_CM_COPYBREAK = 256,
@@ -148,6 +153,7 @@ struct ipoib_mcast {
unsigned long created;
unsigned long backoff;
+ unsigned long delay_until;
unsigned long flags;
unsigned char logcount;
@@ -292,6 +298,11 @@ struct ipoib_neigh_table {
struct completion deleted;
};
+struct ipoib_qp_state_validate {
+ struct work_struct work;
+ struct ipoib_dev_priv *priv;
+};
+
/*
* Device private locking: network stack tx_lock protects members used
* in TX fast path, lock protects everything else. lock nests inside
@@ -317,6 +328,7 @@ struct ipoib_dev_priv {
struct list_head multicast_list;
struct rb_root multicast_tree;
+ struct workqueue_struct *wq;
struct delayed_work mcast_task;
struct work_struct carrier_on_task;
struct work_struct flush_light;
@@ -426,11 +438,6 @@ struct ipoib_neigh {
#define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN)
#define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES)
-static inline int ipoib_ud_need_sg(unsigned int ib_mtu)
-{
- return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE;
-}
-
void ipoib_neigh_dtor(struct ipoib_neigh *neigh);
static inline void ipoib_neigh_put(struct ipoib_neigh *neigh)
{
@@ -477,10 +484,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
void ipoib_pkey_event(struct work_struct *work);
void ipoib_ib_dev_cleanup(struct net_device *dev);
-int ipoib_ib_dev_open(struct net_device *dev, int flush);
+int ipoib_ib_dev_open(struct net_device *dev);
int ipoib_ib_dev_up(struct net_device *dev);
-int ipoib_ib_dev_down(struct net_device *dev, int flush);
-int ipoib_ib_dev_stop(struct net_device *dev, int flush);
+int ipoib_ib_dev_down(struct net_device *dev);
+int ipoib_ib_dev_stop(struct net_device *dev);
void ipoib_pkey_dev_check_presence(struct net_device *dev);
int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -492,7 +499,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
void ipoib_mcast_restart_task(struct work_struct *work);
int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
+int ipoib_mcast_stop_thread(struct net_device *dev);
void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 933efcea0d03..56959adb6c7d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
}
spin_lock_irq(&priv->lock);
- queue_delayed_work(ipoib_workqueue,
+ queue_delayed_work(priv->wq,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
/* Add this entry to passive ids list head, but do not re-add it
* if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
ipoib_cm_start_rx_drain(priv);
- queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
spin_unlock_irqrestore(&priv->lock, flags);
} else
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_reap_list);
spin_unlock_irqrestore(&priv->lock, flags);
- queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
}
return;
}
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
}
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
- queue_work(ipoib_workqueue, &priv->cm.start_task);
+ queue_work(priv->wq, &priv->cm.start_task);
return tx;
}
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
spin_lock_irqsave(&priv->lock, flags);
list_move(&tx->list, &priv->cm.reap_list);
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
+ queue_work(priv->wq, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
tx->neigh->daddr + 4);
tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
- queue_work(ipoib_workqueue, &priv->cm.skb_task);
+ queue_work(priv->wq, &priv->cm.skb_task);
}
static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
}
if (!list_empty(&priv->cm.passive_ids))
- queue_delayed_work(ipoib_workqueue,
+ queue_delayed_work(priv->wq,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 72626c348174..63b92cbb29ad 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -94,39 +94,9 @@ void ipoib_free_ah(struct kref *kref)
static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
u64 mapping[IPOIB_UD_RX_SG])
{
- if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
- ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
- DMA_FROM_DEVICE);
- ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
- DMA_FROM_DEVICE);
- } else
- ib_dma_unmap_single(priv->ca, mapping[0],
- IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
- DMA_FROM_DEVICE);
-}
-
-static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
- struct sk_buff *skb,
- unsigned int length)
-{
- if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
- unsigned int size;
- /*
- * There is only two buffers needed for max_payload = 4K,
- * first buf size is IPOIB_UD_HEAD_SIZE
- */
- skb->tail += IPOIB_UD_HEAD_SIZE;
- skb->len += length;
-
- size = length - IPOIB_UD_HEAD_SIZE;
-
- skb_frag_size_set(frag, size);
- skb->data_len += size;
- skb->truesize += PAGE_SIZE;
- } else
- skb_put(skb, length);
-
+ ib_dma_unmap_single(priv->ca, mapping[0],
+ IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
+ DMA_FROM_DEVICE);
}
static int ipoib_ib_post_receive(struct net_device *dev, int id)
@@ -156,18 +126,11 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int buf_size;
- int tailroom;
u64 *mapping;
- if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
- buf_size = IPOIB_UD_HEAD_SIZE;
- tailroom = 128; /* reserve some tailroom for IP/TCP headers */
- } else {
- buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- tailroom = 0;
- }
+ buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- skb = dev_alloc_skb(buf_size + tailroom + 4);
+ skb = dev_alloc_skb(buf_size + IPOIB_ENCAP_LEN);
if (unlikely(!skb))
return NULL;
@@ -184,23 +147,8 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
goto error;
- if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
- struct page *page = alloc_page(GFP_ATOMIC);
- if (!page)
- goto partial_error;
- skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
- mapping[1] =
- ib_dma_map_page(priv->ca, page,
- 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
- goto partial_error;
- }
-
priv->rx_ring[id].skb = skb;
return skb;
-
-partial_error:
- ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
error:
dev_kfree_skb_any(skb);
return NULL;
@@ -278,7 +226,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
wc->byte_len, wc->slid);
ipoib_ud_dma_unmap_rx(priv, mapping);
- ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
+
+ skb_put(skb, wc->byte_len);
/* First byte of dgid signals multicast when 0xff */
dgid = &((struct ib_grh *)skb->data)->dgid;
@@ -296,6 +245,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
skb_reset_mac_header(skb);
skb_pull(skb, IPOIB_ENCAP_LEN);
+ skb->truesize = SKB_TRUESIZE(skb->len);
+
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
@@ -376,6 +327,51 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
}
}
+/*
+ * As the result of a completion error the QP Can be transferred to SQE states.
+ * The function checks if the (send)QP is in SQE state and
+ * moves it back to RTS state, that in order to have it functional again.
+ */
+static void ipoib_qp_state_validate_work(struct work_struct *work)
+{
+ struct ipoib_qp_state_validate *qp_work =
+ container_of(work, struct ipoib_qp_state_validate, work);
+
+ struct ipoib_dev_priv *priv = qp_work->priv;
+ struct ib_qp_attr qp_attr;
+ struct ib_qp_init_attr query_init_attr;
+ int ret;
+
+ ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
+ if (ret) {
+ ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
+ __func__, ret);
+ goto free_res;
+ }
+ pr_info("%s: QP: 0x%x is in state: %d\n",
+ __func__, priv->qp->qp_num, qp_attr.qp_state);
+
+ /* currently support only in SQE->RTS transition*/
+ if (qp_attr.qp_state == IB_QPS_SQE) {
+ qp_attr.qp_state = IB_QPS_RTS;
+
+ ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
+ if (ret) {
+ pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
+ ret, priv->qp->qp_num);
+ goto free_res;
+ }
+ pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
+ __func__, priv->qp->qp_num);
+ } else {
+ pr_warn("QP (%d) will stay in state: %d\n",
+ priv->qp->qp_num, qp_attr.qp_state);
+ }
+
+free_res:
+ kfree(qp_work);
+}
+
static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -407,10 +403,22 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
- wc->status != IB_WC_WR_FLUSH_ERR)
+ wc->status != IB_WC_WR_FLUSH_ERR) {
+ struct ipoib_qp_state_validate *qp_work;
ipoib_warn(priv, "failed send event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
+ qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
+ if (!qp_work) {
+ ipoib_warn(priv, "%s Failed alloc ipoib_qp_state_validate for qp: 0x%x\n",
+ __func__, priv->qp->qp_num);
+ return;
+ }
+
+ INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
+ qp_work->priv = priv;
+ queue_work(priv->wq, &qp_work->work);
+ }
}
static int poll_tx(struct ipoib_dev_priv *priv)
@@ -655,16 +663,33 @@ void ipoib_reap_ah(struct work_struct *work)
__ipoib_reap_ah(dev);
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
}
+static void ipoib_flush_ah(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ cancel_delayed_work(&priv->ah_reap_task);
+ flush_workqueue(priv->wq);
+ ipoib_reap_ah(&priv->ah_reap_task.work);
+}
+
+static void ipoib_stop_ah(struct net_device *dev)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ set_bit(IPOIB_STOP_REAPER, &priv->flags);
+ ipoib_flush_ah(dev);
+}
+
static void ipoib_ib_tx_timer_func(unsigned long ctx)
{
drain_tx_cq((struct net_device *)ctx);
}
-int ipoib_ib_dev_open(struct net_device *dev, int flush)
+int ipoib_ib_dev_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
@@ -696,7 +721,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush)
}
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
- queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
+ queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +731,7 @@ int ipoib_ib_dev_open(struct net_device *dev, int flush)
dev_stop:
if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
napi_enable(&priv->napi);
- ipoib_ib_dev_stop(dev, flush);
+ ipoib_ib_dev_stop(dev);
return -1;
}
@@ -738,7 +763,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
return ipoib_mcast_start_thread(dev);
}
-int ipoib_ib_dev_down(struct net_device *dev, int flush)
+int ipoib_ib_dev_down(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -747,7 +772,7 @@ int ipoib_ib_dev_down(struct net_device *dev, int flush)
clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
netif_carrier_off(dev);
- ipoib_mcast_stop_thread(dev, flush);
+ ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
ipoib_flush_paths(dev);
@@ -807,7 +832,7 @@ void ipoib_drain_cq(struct net_device *dev)
local_bh_enable();
}
-int ipoib_ib_dev_stop(struct net_device *dev, int flush)
+int ipoib_ib_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
@@ -877,24 +902,7 @@ timeout:
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to RESET state\n");
- /* Wait for all AHs to be reaped */
- set_bit(IPOIB_STOP_REAPER, &priv->flags);
- cancel_delayed_work(&priv->ah_reap_task);
- if (flush)
- flush_workqueue(ipoib_workqueue);
-
- begin = jiffies;
-
- while (!list_empty(&priv->dead_ahs)) {
- __ipoib_reap_ah(dev);
-
- if (time_after(jiffies, begin + HZ)) {
- ipoib_warn(priv, "timing out; will leak address handles\n");
- break;
- }
-
- msleep(1);
- }
+ ipoib_flush_ah(dev);
ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
@@ -918,7 +926,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
(unsigned long) dev);
if (dev->flags & IFF_UP) {
- if (ipoib_ib_dev_open(dev, 1)) {
+ if (ipoib_ib_dev_open(dev)) {
ipoib_transport_dev_cleanup(dev);
return -ENODEV;
}
@@ -1037,15 +1045,16 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
if (level == IPOIB_FLUSH_LIGHT) {
ipoib_mark_paths_invalid(dev);
ipoib_mcast_dev_flush(dev);
+ ipoib_flush_ah(dev);
}
if (level >= IPOIB_FLUSH_NORMAL)
- ipoib_ib_dev_down(dev, 0);
+ ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
- ipoib_ib_dev_stop(dev, 0);
- if (ipoib_ib_dev_open(dev, 0) != 0)
+ ipoib_ib_dev_stop(dev);
+ if (ipoib_ib_dev_open(dev) != 0)
return;
if (netif_queue_stopped(dev))
netif_start_queue(dev);
@@ -1097,9 +1106,17 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
*/
ipoib_flush_paths(dev);
- ipoib_mcast_stop_thread(dev, 1);
+ ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
+ /*
+ * All of our ah references aren't free until after
+ * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
+ * the neighbor garbage collection is stopped and reaped.
+ * That should all be done now, so make a final ah flush.
+ */
+ ipoib_stop_ah(dev);
+
ipoib_transport_dev_cleanup(dev);
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 915ad04a827e..9e1b203d756d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
- if (ipoib_ib_dev_open(dev, 1)) {
+ if (ipoib_ib_dev_open(dev)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
return 0;
err_stop:
- ipoib_ib_dev_stop(dev, 1);
+ ipoib_ib_dev_stop(dev);
err_disable:
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
netif_stop_queue(dev);
- ipoib_ib_dev_down(dev, 1);
- ipoib_ib_dev_stop(dev, 0);
+ ipoib_ib_dev_down(dev);
+ ipoib_ib_dev_stop(dev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
@@ -640,8 +640,10 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
if (!path->query && path_rec_start(dev, path))
goto err_path;
-
- __skb_queue_tail(&neigh->queue, skb);
+ if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)
+ __skb_queue_tail(&neigh->queue, skb);
+ else
+ goto err_drop;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -676,7 +678,12 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
new_path = 1;
}
if (path) {
- __skb_queue_tail(&path->queue, skb);
+ if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
+ __skb_queue_tail(&path->queue, skb);
+ } else {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ }
if (!path->query && path_rec_start(dev, path)) {
spin_unlock_irqrestore(&priv->lock, flags);
@@ -839,7 +846,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
return;
}
- queue_work(ipoib_workqueue, &priv->restart_task);
+ queue_work(priv->wq, &priv->restart_task);
}
static int ipoib_get_iflink(const struct net_device *dev)
@@ -966,7 +973,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
__ipoib_reap_neigh(priv);
if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
}
@@ -1145,7 +1152,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
/* start garbage collection */
clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
- queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
+ queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
return 0;
@@ -1274,15 +1281,13 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- if (ipoib_neigh_hash_init(priv) < 0)
- goto out;
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
GFP_KERNEL);
if (!priv->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
ca->name, ipoib_recvq_size);
- goto out_neigh_hash_cleanup;
+ goto out;
}
priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1297,16 +1302,24 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
if (ipoib_ib_dev_init(dev, ca, port))
goto out_tx_ring_cleanup;
+ /*
+ * Must be after ipoib_ib_dev_init so we can allocate a per
+ * device wq there and use it here
+ */
+ if (ipoib_neigh_hash_init(priv) < 0)
+ goto out_dev_uninit;
+
return 0;
+out_dev_uninit:
+ ipoib_ib_dev_cleanup(dev);
+
out_tx_ring_cleanup:
vfree(priv->tx_ring);
out_rx_ring_cleanup:
kfree(priv->rx_ring);
-out_neigh_hash_cleanup:
- ipoib_neigh_hash_uninit(dev);
out:
return -ENOMEM;
}
@@ -1329,6 +1342,12 @@ void ipoib_dev_cleanup(struct net_device *dev)
}
unregister_netdevice_many(&head);
+ /*
+ * Must be before ipoib_ib_dev_cleanup or we delete an in use
+ * work queue
+ */
+ ipoib_neigh_hash_uninit(dev);
+
ipoib_ib_dev_cleanup(dev);
kfree(priv->rx_ring);
@@ -1336,8 +1355,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
priv->rx_ring = NULL;
priv->tx_ring = NULL;
-
- ipoib_neigh_hash_uninit(dev);
}
static const struct header_ops ipoib_header_ops = {
@@ -1646,10 +1663,11 @@ sysfs_failed:
register_failed:
ib_unregister_event_handler(&priv->event_handler);
+ flush_workqueue(ipoib_workqueue);
/* Stop GC if started before flush */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
event_failed:
ipoib_dev_cleanup(priv->dev);
@@ -1712,6 +1730,7 @@ static void ipoib_remove_one(struct ib_device *device)
list_for_each_entry_safe(priv, tmp, dev_list, list) {
ib_unregister_event_handler(&priv->event_handler);
+ flush_workqueue(ipoib_workqueue);
rtnl_lock();
dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
@@ -1720,7 +1739,7 @@ static void ipoib_remove_one(struct ib_device *device)
/* Stop GC */
set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
cancel_delayed_work(&priv->neigh_reap_task);
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
unregister_netdev(priv->dev);
free_netdev(priv->dev);
@@ -1755,14 +1774,16 @@ static int __init ipoib_init_module(void)
return ret;
/*
- * We create our own workqueue mainly because we want to be
- * able to flush it when devices are being removed. We can't
- * use schedule_work()/flush_scheduled_work() because both
- * unregister_netdev() and linkwatch_event take the rtnl lock,
- * so flush_scheduled_work() can deadlock during device
- * removal.
+ * We create a global workqueue here that is used for all flush
+ * operations. However, if you attempt to flush a workqueue
+ * from a task on that same workqueue, it deadlocks the system.
+ * We want to be able to flush the tasks associated with a
+ * specific net device, so we also create a workqueue for each
+ * netdevice. We queue up the tasks for that device only on
+ * its private workqueue, and we only queue up flush events
+ * on our global flush workqueue. This avoids the deadlocks.
*/
- ipoib_workqueue = create_singlethread_workqueue("ipoib");
+ ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ffb83b5f7e80..0d23e0568deb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -55,8 +55,6 @@ MODULE_PARM_DESC(mcast_debug_level,
"Enable multicast debug tracing if > 0");
#endif
-static DEFINE_MUTEX(mcast_mutex);
-
struct ipoib_mcast_iter {
struct net_device *dev;
union ib_gid mgid;
@@ -66,6 +64,48 @@ struct ipoib_mcast_iter {
unsigned int send_only;
};
+/*
+ * This should be called with the priv->lock held
+ */
+static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv,
+ struct ipoib_mcast *mcast,
+ bool delay)
+{
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return;
+
+ /*
+ * We will be scheduling *something*, so cancel whatever is
+ * currently scheduled first
+ */
+ cancel_delayed_work(&priv->mcast_task);
+ if (mcast && delay) {
+ /*
+ * We had a failure and want to schedule a retry later
+ */
+ mcast->backoff *= 2;
+ if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
+ mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+ mcast->delay_until = jiffies + (mcast->backoff * HZ);
+ /*
+ * Mark this mcast for its delay, but restart the
+ * task immediately. The join task will make sure to
+ * clear out all entries without delays, and then
+ * schedule itself to run again when the earliest
+ * delay expires
+ */
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+ } else if (delay) {
+ /*
+ * Special case of retrying after a failure to
+ * allocate the broadcast multicast group, wait
+ * 1 second and try again
+ */
+ queue_delayed_work(priv->wq, &priv->mcast_task, HZ);
+ } else
+ queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+}
+
static void ipoib_mcast_free(struct ipoib_mcast *mcast)
{
struct net_device *dev = mcast->dev;
@@ -103,6 +143,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
mcast->dev = dev;
mcast->created = jiffies;
+ mcast->delay_until = jiffies;
mcast->backoff = 1;
INIT_LIST_HEAD(&mcast->list);
@@ -185,17 +226,27 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
spin_unlock_irq(&priv->lock);
return -EAGAIN;
}
- priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+ /*update priv member according to the new mcast*/
+ priv->broadcast->mcmember.qkey = mcmember->qkey;
+ priv->broadcast->mcmember.mtu = mcmember->mtu;
+ priv->broadcast->mcmember.traffic_class = mcmember->traffic_class;
+ priv->broadcast->mcmember.rate = mcmember->rate;
+ priv->broadcast->mcmember.sl = mcmember->sl;
+ priv->broadcast->mcmember.flow_label = mcmember->flow_label;
+ priv->broadcast->mcmember.hop_limit = mcmember->hop_limit;
+ /* assume if the admin and the mcast are the same both can be changed */
+ if (priv->mcast_mtu == priv->admin_mtu)
+ priv->admin_mtu =
+ priv->mcast_mtu =
+ IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+ else
+ priv->mcast_mtu =
+ IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
+
priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
spin_unlock_irq(&priv->lock);
priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
set_qkey = 1;
-
- if (!ipoib_cm_admin_enabled(dev)) {
- rtnl_lock();
- dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
- rtnl_unlock();
- }
}
if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -270,107 +321,35 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
return 0;
}
-static int
-ipoib_mcast_sendonly_join_complete(int status,
- struct ib_sa_multicast *multicast)
-{
- struct ipoib_mcast *mcast = multicast->context;
- struct net_device *dev = mcast->dev;
-
- /* We trap for port events ourselves. */
- if (status == -ENETRESET)
- return 0;
-
- if (!status)
- status = ipoib_mcast_join_finish(mcast, &multicast->rec);
-
- if (status) {
- if (mcast->logcount++ < 20)
- ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
-
- /* Flush out any queued packets */
- netif_tx_lock_bh(dev);
- while (!skb_queue_empty(&mcast->pkt_queue)) {
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
- }
- netif_tx_unlock_bh(dev);
-
- /* Clear the busy flag so we try again */
- status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
- &mcast->flags);
- }
- return status;
-}
-
-static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
-{
- struct net_device *dev = mcast->dev;
- struct ipoib_dev_priv *priv = netdev_priv(dev);
- struct ib_sa_mcmember_rec rec = {
-#if 0 /* Some SMs don't support send-only yet */
- .join_state = 4
-#else
- .join_state = 1
-#endif
- };
- int ret = 0;
-
- if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
- ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
- return -ENODEV;
- }
-
- if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
- ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
- return -EBUSY;
- }
-
- rec.mgid = mcast->mcmember.mgid;
- rec.port_gid = priv->local_gid;
- rec.pkey = cpu_to_be16(priv->pkey);
-
- mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
- priv->port, &rec,
- IB_SA_MCMEMBER_REC_MGID |
- IB_SA_MCMEMBER_REC_PORT_GID |
- IB_SA_MCMEMBER_REC_PKEY |
- IB_SA_MCMEMBER_REC_JOIN_STATE,
- GFP_ATOMIC,
- ipoib_mcast_sendonly_join_complete,
- mcast);
- if (IS_ERR(mcast->mc)) {
- ret = PTR_ERR(mcast->mc);
- clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
- ret);
- } else {
- ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
- mcast->mcmember.mgid.raw);
- }
-
- return ret;
-}
-
void ipoib_mcast_carrier_on_task(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
carrier_on_task);
struct ib_port_attr attr;
- /*
- * Take rtnl_lock to avoid racing with ipoib_stop() and
- * turning the carrier back on while a device is being
- * removed.
- */
if (ib_query_port(priv->ca, priv->port, &attr) ||
attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return;
}
- rtnl_lock();
+ /*
+ * Take rtnl_lock to avoid racing with ipoib_stop() and
+ * turning the carrier back on while a device is being
+ * removed. However, ipoib_stop() will attempt to flush
+ * the workqueue while holding the rtnl lock, so loop
+ * on trylock until either we get the lock or we see
+ * FLAG_OPER_UP go away as that signals that we are bailing
+ * and can safely ignore the carrier on work.
+ */
+ while (!rtnl_trylock()) {
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return;
+ else
+ msleep(20);
+ }
+ if (!ipoib_cm_admin_enabled(priv->dev))
+ dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
netif_carrier_on(priv->dev);
rtnl_unlock();
}
@@ -382,7 +361,9 @@ static int ipoib_mcast_join_complete(int status,
struct net_device *dev = mcast->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev);
- ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
+ ipoib_dbg_mcast(priv, "%sjoin completion for %pI6 (status %d)\n",
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ?
+ "sendonly " : "",
mcast->mcmember.mgid.raw, status);
/* We trap for port events ourselves. */
@@ -396,49 +377,74 @@ static int ipoib_mcast_join_complete(int status,
if (!status) {
mcast->backoff = 1;
- mutex_lock(&mcast_mutex);
- if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task, 0);
- mutex_unlock(&mcast_mutex);
+ mcast->delay_until = jiffies;
/*
- * Defer carrier on work to ipoib_workqueue to avoid a
- * deadlock on rtnl_lock here.
+ * Defer carrier on work to priv->wq to avoid a
+ * deadlock on rtnl_lock here. Requeue our multicast
+ * work too, which will end up happening right after
+ * our carrier on task work and will allow us to
+ * send out all of the non-broadcast joins
*/
- if (mcast == priv->broadcast)
- queue_work(ipoib_workqueue, &priv->carrier_on_task);
-
- status = 0;
- goto out;
- }
+ if (mcast == priv->broadcast) {
+ spin_lock_irq(&priv->lock);
+ queue_work(priv->wq, &priv->carrier_on_task);
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ goto out_locked;
+ }
+ } else {
+ if (mcast->logcount++ < 20) {
+ if (status == -ETIMEDOUT || status == -EAGAIN) {
+ ipoib_dbg_mcast(priv, "%smulticast join failed for %pI6, status %d\n",
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
+ mcast->mcmember.mgid.raw, status);
+ } else {
+ ipoib_warn(priv, "%smulticast join failed for %pI6, status %d\n",
+ test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
+ mcast->mcmember.mgid.raw, status);
+ }
+ }
- if (mcast->logcount++ < 20) {
- if (status == -ETIMEDOUT || status == -EAGAIN) {
- ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
+ mcast->backoff >= 2) {
+ /*
+ * We only retry sendonly joins once before we drop
+ * the packet and quit trying to deal with the
+ * group. However, we leave the group in the
+ * mcast list as an unjoined group. If we want to
+ * try joining again, we simply queue up a packet
+ * and restart the join thread. The empty queue
+ * is why the join thread ignores this group.
+ */
+ mcast->backoff = 1;
+ netif_tx_lock_bh(dev);
+ while (!skb_queue_empty(&mcast->pkt_queue)) {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
+ }
+ netif_tx_unlock_bh(dev);
} else {
- ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
- mcast->mcmember.mgid.raw, status);
+ spin_lock_irq(&priv->lock);
+ /* Requeue this join task with a backoff delay */
+ __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
+ goto out_locked;
}
}
-
- mcast->backoff *= 2;
- if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
- mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
-
- /* Clear the busy flag so we try again */
- status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-
- mutex_lock(&mcast_mutex);
+out:
spin_lock_irq(&priv->lock);
- if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
- mcast->backoff * HZ);
+out_locked:
+ /*
+ * Make sure to set mcast->mc before we clear the busy flag to avoid
+ * racing with code that checks for BUSY before checking mcast->mc
+ */
+ if (status)
+ mcast->mc = NULL;
+ else
+ mcast->mc = multicast;
+ clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
spin_unlock_irq(&priv->lock);
- mutex_unlock(&mcast_mutex);
-out:
complete(&mcast->done);
+
return status;
}
@@ -446,6 +452,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
int create)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct ib_sa_multicast *multicast;
struct ib_sa_mcmember_rec rec = {
.join_state = 1
};
@@ -487,29 +494,18 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
}
- set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- init_completion(&mcast->done);
- set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
-
- mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
+ multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
- if (IS_ERR(mcast->mc)) {
+ if (IS_ERR(multicast)) {
+ ret = PTR_ERR(multicast);
+ ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
+ spin_lock_irq(&priv->lock);
+ /* Requeue this join task with a backoff delay */
+ __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ spin_unlock_irq(&priv->lock);
complete(&mcast->done);
- ret = PTR_ERR(mcast->mc);
- ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
-
- mcast->backoff *= 2;
- if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
- mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
-
- mutex_lock(&mcast_mutex);
- if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task,
- mcast->backoff * HZ);
- mutex_unlock(&mcast_mutex);
}
}
@@ -519,8 +515,11 @@ void ipoib_mcast_join_task(struct work_struct *work)
container_of(work, struct ipoib_dev_priv, mcast_task.work);
struct net_device *dev = priv->dev;
struct ib_port_attr port_attr;
+ unsigned long delay_until = 0;
+ struct ipoib_mcast *mcast = NULL;
+ int create = 1;
- if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return;
if (ib_query_port(priv->ca, priv->port, &port_attr) ||
@@ -536,93 +535,118 @@ void ipoib_mcast_join_task(struct work_struct *work)
else
memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ spin_lock_irq(&priv->lock);
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ goto out;
+
if (!priv->broadcast) {
struct ipoib_mcast *broadcast;
- if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
- return;
-
- broadcast = ipoib_mcast_alloc(dev, 1);
+ broadcast = ipoib_mcast_alloc(dev, 0);
if (!broadcast) {
ipoib_warn(priv, "failed to allocate broadcast group\n");
- mutex_lock(&mcast_mutex);
- if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue,
- &priv->mcast_task, HZ);
- mutex_unlock(&mcast_mutex);
- return;
+ /*
+ * Restart us after a 1 second delay to retry
+ * creating our broadcast group and attaching to
+ * it. Until this succeeds, this ipoib dev is
+ * completely stalled (multicast wise).
+ */
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 1);
+ goto out;
}
- spin_lock_irq(&priv->lock);
memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
sizeof (union ib_gid));
priv->broadcast = broadcast;
__ipoib_mcast_add(dev, priv->broadcast);
- spin_unlock_irq(&priv->lock);
}
if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
- if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
- ipoib_mcast_join(dev, priv->broadcast, 0);
- return;
- }
-
- while (1) {
- struct ipoib_mcast *mcast = NULL;
-
- spin_lock_irq(&priv->lock);
- list_for_each_entry(mcast, &priv->multicast_list, list) {
- if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
- && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
- && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
- /* Found the next unjoined group */
- break;
+ if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) {
+ mcast = priv->broadcast;
+ create = 0;
+ if (mcast->backoff > 1 &&
+ time_before(jiffies, mcast->delay_until)) {
+ delay_until = mcast->delay_until;
+ mcast = NULL;
}
}
- spin_unlock_irq(&priv->lock);
+ goto out;
+ }
- if (&mcast->list == &priv->multicast_list) {
- /* All done */
- break;
+ /*
+ * We'll never get here until the broadcast group is both allocated
+ * and attached
+ */
+ list_for_each_entry(mcast, &priv->multicast_list, list) {
+ if (IS_ERR_OR_NULL(mcast->mc) &&
+ !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
+ (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ||
+ !skb_queue_empty(&mcast->pkt_queue))) {
+ if (mcast->backoff == 1 ||
+ time_after_eq(jiffies, mcast->delay_until)) {
+ /* Found the next unjoined group */
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ create = 0;
+ else
+ create = 1;
+ spin_unlock_irq(&priv->lock);
+ ipoib_mcast_join(dev, mcast, create);
+ spin_lock_irq(&priv->lock);
+ } else if (!delay_until ||
+ time_before(mcast->delay_until, delay_until))
+ delay_until = mcast->delay_until;
}
-
- ipoib_mcast_join(dev, mcast, 1);
- return;
}
- ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
+ mcast = NULL;
+ ipoib_dbg_mcast(priv, "successfully started all multicast joins\n");
- clear_bit(IPOIB_MCAST_RUN, &priv->flags);
+out:
+ if (delay_until) {
+ cancel_delayed_work(&priv->mcast_task);
+ queue_delayed_work(priv->wq, &priv->mcast_task,
+ delay_until - jiffies);
+ }
+ if (mcast) {
+ init_completion(&mcast->done);
+ set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ }
+ spin_unlock_irq(&priv->lock);
+ if (mcast)
+ ipoib_mcast_join(dev, mcast, create);
}
int ipoib_mcast_start_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
ipoib_dbg_mcast(priv, "starting multicast thread\n");
- mutex_lock(&mcast_mutex);
- if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
- queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
- mutex_unlock(&mcast_mutex);
+ spin_lock_irqsave(&priv->lock, flags);
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
-int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
+int ipoib_mcast_stop_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
ipoib_dbg_mcast(priv, "stopping multicast thread\n");
- mutex_lock(&mcast_mutex);
- clear_bit(IPOIB_MCAST_RUN, &priv->flags);
+ spin_lock_irqsave(&priv->lock, flags);
cancel_delayed_work(&priv->mcast_task);
- mutex_unlock(&mcast_mutex);
+ spin_unlock_irqrestore(&priv->lock, flags);
- if (flush)
- flush_workqueue(ipoib_workqueue);
+ flush_workqueue(priv->wq);
return 0;
}
@@ -633,6 +657,9 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
int ret = 0;
if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
+
+ if (!IS_ERR_OR_NULL(mcast->mc))
ib_sa_free_multicast(mcast->mc);
if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -644,7 +671,9 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
be16_to_cpu(mcast->mcmember.mlid));
if (ret)
ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
- }
+ } else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+ ipoib_dbg(priv, "leaving with no mcmember but not a "
+ "SENDONLY join\n");
return 0;
}
@@ -667,49 +696,37 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
}
mcast = __ipoib_mcast_find(dev, mgid);
- if (!mcast) {
- /* Let's create a new send only group now */
- ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
- mgid);
-
- mcast = ipoib_mcast_alloc(dev, 0);
+ if (!mcast || !mcast->ah) {
if (!mcast) {
- ipoib_warn(priv, "unable to allocate memory for "
- "multicast structure\n");
- ++dev->stats.tx_dropped;
- dev_kfree_skb_any(skb);
- goto out;
- }
-
- set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
- memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
- __ipoib_mcast_add(dev, mcast);
- list_add_tail(&mcast->list, &priv->multicast_list);
- }
+ /* Let's create a new send only group now */
+ ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
+ mgid);
+
+ mcast = ipoib_mcast_alloc(dev, 0);
+ if (!mcast) {
+ ipoib_warn(priv, "unable to allocate memory "
+ "for multicast structure\n");
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
- if (!mcast->ah) {
+ set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
+ memcpy(mcast->mcmember.mgid.raw, mgid,
+ sizeof (union ib_gid));
+ __ipoib_mcast_add(dev, mcast);
+ list_add_tail(&mcast->list, &priv->multicast_list);
+ }
if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
skb_queue_tail(&mcast->pkt_queue, skb);
else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
-
- if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
- ipoib_dbg_mcast(priv, "no address vector, "
- "but multicast join already started\n");
- else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- ipoib_mcast_sendonly_join(mcast);
-
- /*
- * If lookup completes between here and out:, don't
- * want to send packet twice.
- */
- mcast = NULL;
- }
-
-out:
- if (mcast && mcast->ah) {
+ if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ }
+ } else {
struct ipoib_neigh *neigh;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -759,9 +776,12 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags);
- /* seperate between the wait to the leave*/
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
- if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
wait_for_completion(&mcast->done);
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -792,9 +812,14 @@ void ipoib_mcast_restart_task(struct work_struct *work)
unsigned long flags;
struct ib_sa_mcmember_rec rec;
- ipoib_dbg_mcast(priv, "restarting multicast task\n");
+ if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ /*
+ * shortcut...on shutdown flush is called next, just
+ * let it do all the work
+ */
+ return;
- ipoib_mcast_stop_thread(dev, 0);
+ ipoib_dbg_mcast(priv, "restarting multicast task\n");
local_irq_save(flags);
netif_addr_lock(dev);
@@ -880,14 +905,27 @@ void ipoib_mcast_restart_task(struct work_struct *work)
netif_addr_unlock(dev);
local_irq_restore(flags);
- /* We have to cancel outside of the spinlock */
+ /*
+ * make sure the in-flight joins have finished before we attempt
+ * to leave
+ */
+ list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
+ if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+ wait_for_completion(&mcast->done);
+
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast);
}
- if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
- ipoib_mcast_start_thread(dev);
+ /*
+ * Double check that we are still up
+ */
+ if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ __ipoib_mcast_schedule_join_thread(priv, NULL, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index c56d5d44c53b..e5cc43074196 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -157,6 +157,16 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
goto out_free_pd;
}
+ /*
+ * the various IPoIB tasks assume they will never race against
+ * themselves, so always use a single thread workqueue
+ */
+ priv->wq = create_singlethread_workqueue("ipoib_wq");
+ if (!priv->wq) {
+ printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
+ goto out_free_mr;
+ }
+
size = ipoib_recvq_size + 1;
ret = ipoib_cm_dev_init(dev);
if (!ret) {
@@ -165,12 +175,13 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */
else
size += ipoib_recvq_size * ipoib_max_conn_qp;
- }
+ } else
+ goto out_free_wq;
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
if (IS_ERR(priv->recv_cq)) {
printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
- goto out_free_mr;
+ goto out_cm_dev_cleanup;
}
priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL,
@@ -216,15 +227,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->tx_wr.send_flags = IB_SEND_SIGNALED;
priv->rx_sge[0].lkey = priv->mr->lkey;
- if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
- priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE;
- priv->rx_sge[1].length = PAGE_SIZE;
- priv->rx_sge[1].lkey = priv->mr->lkey;
- priv->rx_wr.num_sge = IPOIB_UD_RX_SG;
- } else {
- priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
- priv->rx_wr.num_sge = 1;
- }
+
+ priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ priv->rx_wr.num_sge = 1;
+
priv->rx_wr.next = NULL;
priv->rx_wr.sg_list = priv->rx_sge;
@@ -236,12 +242,19 @@ out_free_send_cq:
out_free_recv_cq:
ib_destroy_cq(priv->recv_cq);
+out_cm_dev_cleanup:
+ ipoib_cm_dev_cleanup(dev);
+
+out_free_wq:
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
out_free_mr:
ib_dereg_mr(priv->mr);
- ipoib_cm_dev_cleanup(dev);
out_free_pd:
ib_dealloc_pd(priv->pd);
+
return -ENODEV;
}
@@ -265,11 +278,18 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
ipoib_cm_dev_cleanup(dev);
+ if (priv->wq) {
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+ }
+
if (ib_dereg_mr(priv->mr))
ipoib_warn(priv, "ib_dereg_mr failed\n");
if (ib_dealloc_pd(priv->pd))
ipoib_warn(priv, "ib_dealloc_pd failed\n");
+
}
void ipoib_event(struct ib_event_handler *handler,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index b47aea1094b2..262ba1f8ee50 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -69,7 +69,7 @@
#define DRV_NAME "iser"
#define PFX DRV_NAME ": "
-#define DRV_VER "1.5"
+#define DRV_VER "1.6"
#define iser_dbg(fmt, arg...) \
do { \
@@ -218,22 +218,21 @@ enum iser_data_dir {
/**
* struct iser_data_buf - iSER data buffer
*
- * @buf: pointer to the sg list
+ * @sg: pointer to the sg list
* @size: num entries of this sg
* @data_len: total beffer byte len
* @dma_nents: returned by dma_map_sg
- * @copy_buf: allocated copy buf for SGs unaligned
- * for rdma which are copied
- * @sg_single: SG-ified clone of a non SG SC or
- * unaligned SG
+ * @orig_sg: pointer to the original sg list (in case
+ * we used a copy)
+ * @orig_size: num entris of orig sg list
*/
struct iser_data_buf {
- void *buf;
+ struct scatterlist *sg;
unsigned int size;
unsigned long data_len;
unsigned int dma_nents;
- char *copy_buf;
- struct scatterlist sg_single;
+ struct scatterlist *orig_sg;
+ unsigned int orig_size;
};
/* fwd declarations */
@@ -244,35 +243,14 @@ struct iscsi_endpoint;
/**
* struct iser_mem_reg - iSER memory registration info
*
- * @lkey: MR local key
- * @rkey: MR remote key
- * @va: MR start address (buffer va)
- * @len: MR length
+ * @sge: memory region sg element
+ * @rkey: memory region remote key
* @mem_h: pointer to registration context (FMR/Fastreg)
*/
struct iser_mem_reg {
- u32 lkey;
- u32 rkey;
- u64 va;
- u64 len;
- void *mem_h;
-};
-
-/**
- * struct iser_regd_buf - iSER buffer registration desc
- *
- * @reg: memory registration info
- * @virt_addr: virtual address of buffer
- * @device: reference to iser device
- * @direction: dma direction (for dma_unmap)
- * @data_size: data buffer size in bytes
- */
-struct iser_regd_buf {
- struct iser_mem_reg reg;
- void *virt_addr;
- struct iser_device *device;
- enum dma_data_direction direction;
- unsigned int data_size;
+ struct ib_sge sge;
+ u32 rkey;
+ void *mem_h;
};
enum iser_desc_type {
@@ -534,11 +512,9 @@ struct iser_conn {
* @sc: link to scsi command
* @command_sent: indicate if command was sent
* @dir: iser data direction
- * @rdma_regd: task rdma registration desc
+ * @rdma_reg: task rdma registration desc
* @data: iser data buffer desc
- * @data_copy: iser data copy buffer desc (bounce buffer)
* @prot: iser protection buffer desc
- * @prot_copy: iser protection copy buffer desc (bounce buffer)
*/
struct iscsi_iser_task {
struct iser_tx_desc desc;
@@ -547,11 +523,9 @@ struct iscsi_iser_task {
struct scsi_cmnd *sc;
int command_sent;
int dir[ISER_DIRS_NUM];
- struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];
+ struct iser_mem_reg rdma_reg[ISER_DIRS_NUM];
struct iser_data_buf data[ISER_DIRS_NUM];
- struct iser_data_buf data_copy[ISER_DIRS_NUM];
struct iser_data_buf prot[ISER_DIRS_NUM];
- struct iser_data_buf prot_copy[ISER_DIRS_NUM];
};
struct iser_page_vec {
@@ -621,7 +595,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
- struct iser_data_buf *mem_copy,
enum iser_data_dir cmd_dir);
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
@@ -634,10 +607,6 @@ int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *dst_addr,
int non_blocking);
-int iser_reg_page_vec(struct ib_conn *ib_conn,
- struct iser_page_vec *page_vec,
- struct iser_mem_reg *mem_reg);
-
void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir);
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
@@ -667,4 +636,9 @@ int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max);
void iser_free_fastreg_pool(struct ib_conn *ib_conn);
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector);
+struct fast_reg_descriptor *
+iser_reg_desc_get(struct ib_conn *ib_conn);
+void
+iser_reg_desc_put(struct ib_conn *ib_conn,
+ struct fast_reg_descriptor *desc);
#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 20e859a6f1a6..3e2118e8ed87 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -50,7 +50,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
- struct iser_regd_buf *regd_buf;
+ struct iser_mem_reg *mem_reg;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
@@ -78,15 +78,15 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
iser_err("Failed to set up Data-IN RDMA\n");
return err;
}
- regd_buf = &iser_task->rdma_regd[ISER_DIR_IN];
+ mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
hdr->flags |= ISER_RSV;
- hdr->read_stag = cpu_to_be32(regd_buf->reg.rkey);
- hdr->read_va = cpu_to_be64(regd_buf->reg.va);
+ hdr->read_stag = cpu_to_be32(mem_reg->rkey);
+ hdr->read_va = cpu_to_be64(mem_reg->sge.addr);
iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
- task->itt, regd_buf->reg.rkey,
- (unsigned long long)regd_buf->reg.va);
+ task->itt, mem_reg->rkey,
+ (unsigned long long)mem_reg->sge.addr);
return 0;
}
@@ -104,7 +104,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_device *device = iser_task->iser_conn->ib_conn.device;
- struct iser_regd_buf *regd_buf;
+ struct iser_mem_reg *mem_reg;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
@@ -134,25 +134,25 @@ iser_prepare_write_cmd(struct iscsi_task *task,
return err;
}
- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
+ mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
if (unsol_sz < edtl) {
hdr->flags |= ISER_WSV;
- hdr->write_stag = cpu_to_be32(regd_buf->reg.rkey);
- hdr->write_va = cpu_to_be64(regd_buf->reg.va + unsol_sz);
+ hdr->write_stag = cpu_to_be32(mem_reg->rkey);
+ hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
"VA:%#llX + unsol:%d\n",
- task->itt, regd_buf->reg.rkey,
- (unsigned long long)regd_buf->reg.va, unsol_sz);
+ task->itt, mem_reg->rkey,
+ (unsigned long long)mem_reg->sge.addr, unsol_sz);
}
if (imm_sz > 0) {
iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
task->itt, imm_sz);
- tx_dsg->addr = regd_buf->reg.va;
+ tx_dsg->addr = mem_reg->sge.addr;
tx_dsg->length = imm_sz;
- tx_dsg->lkey = regd_buf->reg.lkey;
+ tx_dsg->lkey = mem_reg->sge.lkey;
iser_task->desc.num_sge = 2;
}
@@ -401,16 +401,16 @@ int iser_send_command(struct iscsi_conn *conn,
}
if (scsi_sg_count(sc)) { /* using a scatter list */
- data_buf->buf = scsi_sglist(sc);
+ data_buf->sg = scsi_sglist(sc);
data_buf->size = scsi_sg_count(sc);
}
data_buf->data_len = scsi_bufflen(sc);
if (scsi_prot_sg_count(sc)) {
- prot_buf->buf = scsi_prot_sglist(sc);
+ prot_buf->sg = scsi_prot_sglist(sc);
prot_buf->size = scsi_prot_sg_count(sc);
- prot_buf->data_len = data_buf->data_len >>
- ilog2(sc->device->sector_size) * 8;
+ prot_buf->data_len = (data_buf->data_len >>
+ ilog2(sc->device->sector_size)) * 8;
}
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
@@ -450,7 +450,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc = NULL;
- struct iser_regd_buf *regd_buf;
+ struct iser_mem_reg *mem_reg;
unsigned long buf_offset;
unsigned long data_seg_len;
uint32_t itt;
@@ -477,11 +477,11 @@ int iser_send_data_out(struct iscsi_conn *conn,
/* build the tx desc */
iser_initialize_task_headers(task, tx_desc);
- regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
+ mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
tx_dsg = &tx_desc->tx_sg[1];
- tx_dsg->addr = regd_buf->reg.va + buf_offset;
- tx_dsg->length = data_seg_len;
- tx_dsg->lkey = regd_buf->reg.lkey;
+ tx_dsg->addr = mem_reg->sge.addr + buf_offset;
+ tx_dsg->length = data_seg_len;
+ tx_dsg->lkey = mem_reg->sge.lkey;
tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
@@ -658,10 +658,10 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
iser_task->prot[ISER_DIR_IN].data_len = 0;
iser_task->prot[ISER_DIR_OUT].data_len = 0;
- memset(&iser_task->rdma_regd[ISER_DIR_IN], 0,
- sizeof(struct iser_regd_buf));
- memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0,
- sizeof(struct iser_regd_buf));
+ memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
+ sizeof(struct iser_mem_reg));
+ memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
+ sizeof(struct iser_mem_reg));
}
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
@@ -674,35 +674,31 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
/* if we were reading, copy back to unaligned sglist,
* anyway dma_unmap and free the copy
*/
- if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) {
+ if (iser_task->data[ISER_DIR_IN].orig_sg) {
is_rdma_data_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task,
&iser_task->data[ISER_DIR_IN],
- &iser_task->data_copy[ISER_DIR_IN],
ISER_DIR_IN);
}
- if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ if (iser_task->data[ISER_DIR_OUT].orig_sg) {
is_rdma_data_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task,
&iser_task->data[ISER_DIR_OUT],
- &iser_task->data_copy[ISER_DIR_OUT],
ISER_DIR_OUT);
}
- if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) {
+ if (iser_task->prot[ISER_DIR_IN].orig_sg) {
is_rdma_prot_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task,
&iser_task->prot[ISER_DIR_IN],
- &iser_task->prot_copy[ISER_DIR_IN],
ISER_DIR_IN);
}
- if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) {
+ if (iser_task->prot[ISER_DIR_OUT].orig_sg) {
is_rdma_prot_aligned = 0;
iser_finalize_rdma_unaligned_sg(iser_task,
&iser_task->prot[ISER_DIR_OUT],
- &iser_task->prot_copy[ISER_DIR_OUT],
ISER_DIR_OUT);
}
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 341040bf0984..f0cdc961eb11 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -39,68 +39,173 @@
#include "iscsi_iser.h"
-#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
+static void
+iser_free_bounce_sg(struct iser_data_buf *data)
+{
+ struct scatterlist *sg;
+ int count;
-/**
- * iser_start_rdma_unaligned_sg
- */
-static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
- struct iser_data_buf *data_copy,
- enum iser_data_dir cmd_dir)
+ for_each_sg(data->sg, sg, data->size, count)
+ __free_page(sg_page(sg));
+
+ kfree(data->sg);
+
+ data->sg = data->orig_sg;
+ data->size = data->orig_size;
+ data->orig_sg = NULL;
+ data->orig_size = 0;
+}
+
+static int
+iser_alloc_bounce_sg(struct iser_data_buf *data)
{
- struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
- struct scatterlist *sgl = (struct scatterlist *)data->buf;
struct scatterlist *sg;
- char *mem = NULL;
- unsigned long cmd_data_len = 0;
- int dma_nents, i;
+ struct page *page;
+ unsigned long length = data->data_len;
+ int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE);
- for_each_sg(sgl, sg, data->size, i)
- cmd_data_len += ib_sg_dma_len(dev, sg);
+ sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC);
+ if (!sg)
+ goto err;
- if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
- mem = (void *)__get_free_pages(GFP_ATOMIC,
- ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
- else
- mem = kmalloc(cmd_data_len, GFP_ATOMIC);
+ sg_init_table(sg, nents);
+ while (length) {
+ u32 page_len = min_t(u32, length, PAGE_SIZE);
- if (mem == NULL) {
- iser_err("Failed to allocate mem size %d %d for copying sglist\n",
- data->size, (int)cmd_data_len);
- return -ENOMEM;
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ goto err;
+
+ sg_set_page(&sg[i], page, page_len, 0);
+ length -= page_len;
+ i++;
}
- if (cmd_dir == ISER_DIR_OUT) {
- /* copy the unaligned sg the buffer which is used for RDMA */
- char *p, *from;
-
- sgl = (struct scatterlist *)data->buf;
- p = mem;
- for_each_sg(sgl, sg, data->size, i) {
- from = kmap_atomic(sg_page(sg));
- memcpy(p,
- from + sg->offset,
- sg->length);
- kunmap_atomic(from);
- p += sg->length;
+ data->orig_sg = data->sg;
+ data->orig_size = data->size;
+ data->sg = sg;
+ data->size = nents;
+
+ return 0;
+
+err:
+ for (; i > 0; i--)
+ __free_page(sg_page(&sg[i - 1]));
+ kfree(sg);
+
+ return -ENOMEM;
+}
+
+static void
+iser_copy_bounce(struct iser_data_buf *data, bool to_buffer)
+{
+ struct scatterlist *osg, *bsg = data->sg;
+ void *oaddr, *baddr;
+ unsigned int left = data->data_len;
+ unsigned int bsg_off = 0;
+ int i;
+
+ for_each_sg(data->orig_sg, osg, data->orig_size, i) {
+ unsigned int copy_len, osg_off = 0;
+
+ oaddr = kmap_atomic(sg_page(osg)) + osg->offset;
+ copy_len = min(left, osg->length);
+ while (copy_len) {
+ unsigned int len = min(copy_len, bsg->length - bsg_off);
+
+ baddr = kmap_atomic(sg_page(bsg)) + bsg->offset;
+ if (to_buffer)
+ memcpy(baddr + bsg_off, oaddr + osg_off, len);
+ else
+ memcpy(oaddr + osg_off, baddr + bsg_off, len);
+
+ kunmap_atomic(baddr - bsg->offset);
+ osg_off += len;
+ bsg_off += len;
+ copy_len -= len;
+
+ if (bsg_off >= bsg->length) {
+ bsg = sg_next(bsg);
+ bsg_off = 0;
+ }
}
+ kunmap_atomic(oaddr - osg->offset);
+ left -= osg_off;
}
+}
+
+static inline void
+iser_copy_from_bounce(struct iser_data_buf *data)
+{
+ iser_copy_bounce(data, false);
+}
+
+static inline void
+iser_copy_to_bounce(struct iser_data_buf *data)
+{
+ iser_copy_bounce(data, true);
+}
+
+struct fast_reg_descriptor *
+iser_reg_desc_get(struct ib_conn *ib_conn)
+{
+ struct fast_reg_descriptor *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ib_conn->lock, flags);
+ desc = list_first_entry(&ib_conn->fastreg.pool,
+ struct fast_reg_descriptor, list);
+ list_del(&desc->list);
+ spin_unlock_irqrestore(&ib_conn->lock, flags);
+
+ return desc;
+}
+
+void
+iser_reg_desc_put(struct ib_conn *ib_conn,
+ struct fast_reg_descriptor *desc)
+{
+ unsigned long flags;
- sg_init_one(&data_copy->sg_single, mem, cmd_data_len);
- data_copy->buf = &data_copy->sg_single;
- data_copy->size = 1;
- data_copy->copy_buf = mem;
+ spin_lock_irqsave(&ib_conn->lock, flags);
+ list_add(&desc->list, &ib_conn->fastreg.pool);
+ spin_unlock_irqrestore(&ib_conn->lock, flags);
+}
- dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1,
- (cmd_dir == ISER_DIR_OUT) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- BUG_ON(dma_nents == 0);
+/**
+ * iser_start_rdma_unaligned_sg
+ */
+static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *data,
+ enum iser_data_dir cmd_dir)
+{
+ struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
+ int rc;
+
+ rc = iser_alloc_bounce_sg(data);
+ if (rc) {
+ iser_err("Failed to allocate bounce for data len %lu\n",
+ data->data_len);
+ return rc;
+ }
+
+ if (cmd_dir == ISER_DIR_OUT)
+ iser_copy_to_bounce(data);
- data_copy->dma_nents = dma_nents;
- data_copy->data_len = cmd_data_len;
+ data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size,
+ (cmd_dir == ISER_DIR_OUT) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!data->dma_nents) {
+ iser_err("Got dma_nents %d, something went wrong...\n",
+ data->dma_nents);
+ rc = -ENOMEM;
+ goto err;
+ }
return 0;
+err:
+ iser_free_bounce_sg(data);
+ return rc;
}
/**
@@ -109,51 +214,18 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
- struct iser_data_buf *data_copy,
enum iser_data_dir cmd_dir)
{
- struct ib_device *dev;
- unsigned long cmd_data_len;
-
- dev = iser_task->iser_conn->ib_conn.device->ib_device;
+ struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
- ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
+ ib_dma_unmap_sg(dev, data->sg, data->size,
(cmd_dir == ISER_DIR_OUT) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (cmd_dir == ISER_DIR_IN) {
- char *mem;
- struct scatterlist *sgl, *sg;
- unsigned char *p, *to;
- unsigned int sg_size;
- int i;
-
- /* copy back read RDMA to unaligned sg */
- mem = data_copy->copy_buf;
-
- sgl = (struct scatterlist *)data->buf;
- sg_size = data->size;
-
- p = mem;
- for_each_sg(sgl, sg, sg_size, i) {
- to = kmap_atomic(sg_page(sg));
- memcpy(to + sg->offset,
- p,
- sg->length);
- kunmap_atomic(to);
- p += sg->length;
- }
- }
+ if (cmd_dir == ISER_DIR_IN)
+ iser_copy_from_bounce(data);
- cmd_data_len = data->data_len;
-
- if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
- free_pages((unsigned long)data_copy->copy_buf,
- ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
- else
- kfree(data_copy->copy_buf);
-
- data_copy->copy_buf = NULL;
+ iser_free_bounce_sg(data);
}
#define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
@@ -175,7 +247,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
struct ib_device *ibdev, u64 *pages,
int *offset, int *data_size)
{
- struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
+ struct scatterlist *sg, *sgl = data->sg;
u64 start_addr, end_addr, page, chunk_start = 0;
unsigned long total_sz = 0;
unsigned int dma_len;
@@ -227,14 +299,14 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
static int iser_data_buf_aligned_len(struct iser_data_buf *data,
struct ib_device *ibdev)
{
- struct scatterlist *sgl, *sg, *next_sg = NULL;
+ struct scatterlist *sg, *sgl, *next_sg = NULL;
u64 start_addr, end_addr;
int i, ret_len, start_check = 0;
if (data->dma_nents == 1)
return 1;
- sgl = (struct scatterlist *)data->buf;
+ sgl = data->sg;
start_addr = ib_sg_dma_address(ibdev, sgl);
for_each_sg(sgl, sg, data->dma_nents, i) {
@@ -266,11 +338,10 @@ static int iser_data_buf_aligned_len(struct iser_data_buf *data,
static void iser_data_buf_dump(struct iser_data_buf *data,
struct ib_device *ibdev)
{
- struct scatterlist *sgl = (struct scatterlist *)data->buf;
struct scatterlist *sg;
int i;
- for_each_sg(sgl, sg, data->dma_nents, i)
+ for_each_sg(data->sg, sg, data->dma_nents, i)
iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
"off:0x%x sz:0x%x dma_len:0x%x\n",
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
@@ -288,31 +359,6 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
}
-static void iser_page_vec_build(struct iser_data_buf *data,
- struct iser_page_vec *page_vec,
- struct ib_device *ibdev)
-{
- int page_vec_len = 0;
-
- page_vec->length = 0;
- page_vec->offset = 0;
-
- iser_dbg("Translating sg sz: %d\n", data->dma_nents);
- page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages,
- &page_vec->offset,
- &page_vec->data_size);
- iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len);
-
- page_vec->length = page_vec_len;
-
- if (page_vec_len * SIZE_4K < page_vec->data_size) {
- iser_err("page_vec too short to hold this SG\n");
- iser_data_buf_dump(data, ibdev);
- iser_dump_page_vec(page_vec);
- BUG();
- }
-}
-
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
enum iser_data_dir iser_dir,
@@ -323,7 +369,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
iser_task->dir[iser_dir] = 1;
dev = iser_task->iser_conn->ib_conn.device->ib_device;
- data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
+ data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
if (data->dma_nents == 0) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
@@ -338,24 +384,41 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
struct ib_device *dev;
dev = iser_task->iser_conn->ib_conn.device->ib_device;
- ib_dma_unmap_sg(dev, data->buf, data->size, dir);
+ ib_dma_unmap_sg(dev, data->sg, data->size, dir);
+}
+
+static int
+iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
+ struct iser_mem_reg *reg)
+{
+ struct scatterlist *sg = mem->sg;
+
+ reg->sge.lkey = device->mr->lkey;
+ reg->rkey = device->mr->rkey;
+ reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
+ reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
+
+ iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
+ " length=0x%x\n", reg->sge.lkey, reg->rkey,
+ reg->sge.addr, reg->sge.length);
+
+ return 0;
}
static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
- struct ib_device *ibdev,
struct iser_data_buf *mem,
- struct iser_data_buf *mem_copy,
enum iser_data_dir cmd_dir,
int aligned_len)
{
- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+ struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+ struct iser_device *device = iser_task->iser_conn->ib_conn.device;
iscsi_conn->fmr_unalign_cnt++;
iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
aligned_len, mem->size);
if (iser_debug_level > 0)
- iser_data_buf_dump(mem, ibdev);
+ iser_data_buf_dump(mem, device->ib_device);
/* unmap the command data before accessing it */
iser_dma_unmap_task_data(iser_task, mem,
@@ -364,13 +427,95 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
/* allocate copy buf, if we are writing, copy the */
/* unaligned scatterlist, dma map the copy */
- if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0)
+ if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0)
return -ENOMEM;
return 0;
}
/**
+ * iser_reg_page_vec - Register physical memory
+ *
+ * returns: 0 on success, errno code on failure
+ */
+static
+int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_page_vec *page_vec,
+ struct iser_mem_reg *mem_reg)
+{
+ struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_pool_fmr *fmr;
+ int ret, plen;
+
+ plen = iser_sg_to_page_vec(mem, device->ib_device,
+ page_vec->pages,
+ &page_vec->offset,
+ &page_vec->data_size);
+ page_vec->length = plen;
+ if (plen * SIZE_4K < page_vec->data_size) {
+ iser_err("page vec too short to hold this SG\n");
+ iser_data_buf_dump(mem, device->ib_device);
+ iser_dump_page_vec(page_vec);
+ return -EINVAL;
+ }
+
+ fmr = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
+ page_vec->pages,
+ page_vec->length,
+ page_vec->pages[0]);
+ if (IS_ERR(fmr)) {
+ ret = PTR_ERR(fmr);
+ iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
+ return ret;
+ }
+
+ mem_reg->sge.lkey = fmr->fmr->lkey;
+ mem_reg->rkey = fmr->fmr->rkey;
+ mem_reg->sge.addr = page_vec->pages[0] + page_vec->offset;
+ mem_reg->sge.length = page_vec->data_size;
+ mem_reg->mem_h = fmr;
+
+ return 0;
+}
+
+/**
+ * Unregister (previosuly registered using FMR) memory.
+ * If memory is non-FMR does nothing.
+ */
+void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
+{
+ struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
+ int ret;
+
+ if (!reg->mem_h)
+ return;
+
+ iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
+
+ ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
+ if (ret)
+ iser_err("ib_fmr_pool_unmap failed %d\n", ret);
+
+ reg->mem_h = NULL;
+}
+
+void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
+{
+ struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
+
+ if (!reg->mem_h)
+ return;
+
+ iser_reg_desc_put(&iser_task->iser_conn->ib_conn,
+ reg->mem_h);
+ reg->mem_h = NULL;
+}
+
+/**
* iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
* using FMR (if possible) obtaining rkey and va
*
@@ -383,45 +528,29 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
- struct iser_regd_buf *regd_buf;
+ struct iser_mem_reg *mem_reg;
int aligned_len;
int err;
int i;
- struct scatterlist *sg;
- regd_buf = &iser_task->rdma_regd[cmd_dir];
+ mem_reg = &iser_task->rdma_reg[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
- err = fall_to_bounce_buf(iser_task, ibdev, mem,
- &iser_task->data_copy[cmd_dir],
+ err = fall_to_bounce_buf(iser_task, mem,
cmd_dir, aligned_len);
if (err) {
iser_err("failed to allocate bounce buffer\n");
return err;
}
- mem = &iser_task->data_copy[cmd_dir];
}
/* if there a single dma entry, FMR is not needed */
if (mem->dma_nents == 1) {
- sg = (struct scatterlist *)mem->buf;
-
- regd_buf->reg.lkey = device->mr->lkey;
- regd_buf->reg.rkey = device->mr->rkey;
- regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
- regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
-
- iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
- "va: 0x%08lX sz: %ld]\n",
- (unsigned int)regd_buf->reg.lkey,
- (unsigned int)regd_buf->reg.rkey,
- (unsigned long)regd_buf->reg.va,
- (unsigned long)regd_buf->reg.len);
+ return iser_reg_dma(device, mem, mem_reg);
} else { /* use FMR for multiple dma entries */
- iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev);
- err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec,
- &regd_buf->reg);
+ err = iser_reg_page_vec(iser_task, mem, ib_conn->fmr.page_vec,
+ mem_reg);
if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
@@ -519,8 +648,10 @@ iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
static int
iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
- struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
- struct ib_sge *prot_sge, struct ib_sge *sig_sge)
+ struct fast_reg_descriptor *desc,
+ struct iser_mem_reg *data_reg,
+ struct iser_mem_reg *prot_reg,
+ struct iser_mem_reg *sig_reg)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_pi_context *pi_ctx = desc->pi_ctx;
@@ -544,12 +675,12 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
memset(&sig_wr, 0, sizeof(sig_wr));
sig_wr.opcode = IB_WR_REG_SIG_MR;
sig_wr.wr_id = ISER_FASTREG_LI_WRID;
- sig_wr.sg_list = data_sge;
+ sig_wr.sg_list = &data_reg->sge;
sig_wr.num_sge = 1;
sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
if (scsi_prot_sg_count(iser_task->sc))
- sig_wr.wr.sig_handover.prot = prot_sge;
+ sig_wr.wr.sig_handover.prot = &prot_reg->sge;
sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE;
@@ -566,27 +697,26 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
}
desc->reg_indicators &= ~ISER_SIG_KEY_VALID;
- sig_sge->lkey = pi_ctx->sig_mr->lkey;
- sig_sge->addr = 0;
- sig_sge->length = scsi_transfer_length(iser_task->sc);
+ sig_reg->sge.lkey = pi_ctx->sig_mr->lkey;
+ sig_reg->rkey = pi_ctx->sig_mr->rkey;
+ sig_reg->sge.addr = 0;
+ sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
- iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n",
- sig_sge->addr, sig_sge->length,
- sig_sge->lkey);
+ iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n",
+ sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
+ sig_reg->sge.length);
err:
return ret;
}
static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
- struct iser_regd_buf *regd_buf,
struct iser_data_buf *mem,
+ struct fast_reg_descriptor *desc,
enum iser_reg_indicator ind,
- struct ib_sge *sge)
+ struct iser_mem_reg *reg)
{
- struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
- struct ib_device *ibdev = device->ib_device;
struct ib_mr *mr;
struct ib_fast_reg_page_list *frpl;
struct ib_send_wr fastreg_wr, inv_wr;
@@ -594,17 +724,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
int ret, offset, size, plen;
/* if there a single dma entry, dma mr suffices */
- if (mem->dma_nents == 1) {
- struct scatterlist *sg = (struct scatterlist *)mem->buf;
-
- sge->lkey = device->mr->lkey;
- sge->addr = ib_sg_dma_address(ibdev, &sg[0]);
- sge->length = ib_sg_dma_len(ibdev, &sg[0]);
-
- iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n",
- sge->lkey, sge->addr, sge->length);
- return 0;
- }
+ if (mem->dma_nents == 1)
+ return iser_reg_dma(device, mem, reg);
if (ind == ISER_DATA_KEY_VALID) {
mr = desc->data_mr;
@@ -652,9 +773,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
}
desc->reg_indicators &= ~ind;
- sge->lkey = mr->lkey;
- sge->addr = frpl->page_list[0] + offset;
- sge->length = size;
+ reg->sge.lkey = mr->lkey;
+ reg->rkey = mr->rkey;
+ reg->sge.addr = frpl->page_list[0] + offset;
+ reg->sge.length = size;
return ret;
}
@@ -672,93 +794,66 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir];
- struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
+ struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir];
struct fast_reg_descriptor *desc = NULL;
- struct ib_sge data_sge;
int err, aligned_len;
- unsigned long flags;
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
- err = fall_to_bounce_buf(iser_task, ibdev, mem,
- &iser_task->data_copy[cmd_dir],
+ err = fall_to_bounce_buf(iser_task, mem,
cmd_dir, aligned_len);
if (err) {
iser_err("failed to allocate bounce buffer\n");
return err;
}
- mem = &iser_task->data_copy[cmd_dir];
}
if (mem->dma_nents != 1 ||
scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
- spin_lock_irqsave(&ib_conn->lock, flags);
- desc = list_first_entry(&ib_conn->fastreg.pool,
- struct fast_reg_descriptor, list);
- list_del(&desc->list);
- spin_unlock_irqrestore(&ib_conn->lock, flags);
- regd_buf->reg.mem_h = desc;
+ desc = iser_reg_desc_get(ib_conn);
+ mem_reg->mem_h = desc;
}
- err = iser_fast_reg_mr(iser_task, regd_buf, mem,
- ISER_DATA_KEY_VALID, &data_sge);
+ err = iser_fast_reg_mr(iser_task, mem, desc,
+ ISER_DATA_KEY_VALID, mem_reg);
if (err)
goto err_reg;
if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
- struct ib_sge prot_sge, sig_sge;
+ struct iser_mem_reg prot_reg;
- memset(&prot_sge, 0, sizeof(prot_sge));
+ memset(&prot_reg, 0, sizeof(prot_reg));
if (scsi_prot_sg_count(iser_task->sc)) {
mem = &iser_task->prot[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
if (aligned_len != mem->dma_nents) {
- err = fall_to_bounce_buf(iser_task, ibdev, mem,
- &iser_task->prot_copy[cmd_dir],
+ err = fall_to_bounce_buf(iser_task, mem,
cmd_dir, aligned_len);
if (err) {
iser_err("failed to allocate bounce buffer\n");
return err;
}
- mem = &iser_task->prot_copy[cmd_dir];
}
- err = iser_fast_reg_mr(iser_task, regd_buf, mem,
- ISER_PROT_KEY_VALID, &prot_sge);
+ err = iser_fast_reg_mr(iser_task, mem, desc,
+ ISER_PROT_KEY_VALID, &prot_reg);
if (err)
goto err_reg;
}
- err = iser_reg_sig_mr(iser_task, desc, &data_sge,
- &prot_sge, &sig_sge);
+ err = iser_reg_sig_mr(iser_task, desc, mem_reg,
+ &prot_reg, mem_reg);
if (err) {
iser_err("Failed to register signature mr\n");
return err;
}
desc->reg_indicators |= ISER_FASTREG_PROTECTED;
-
- regd_buf->reg.lkey = sig_sge.lkey;
- regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey;
- regd_buf->reg.va = sig_sge.addr;
- regd_buf->reg.len = sig_sge.length;
- } else {
- if (desc)
- regd_buf->reg.rkey = desc->data_mr->rkey;
- else
- regd_buf->reg.rkey = device->mr->rkey;
-
- regd_buf->reg.lkey = data_sge.lkey;
- regd_buf->reg.va = data_sge.addr;
- regd_buf->reg.len = data_sge.length;
}
return 0;
err_reg:
- if (desc) {
- spin_lock_irqsave(&ib_conn->lock, flags);
- list_add_tail(&desc->list, &ib_conn->fastreg.pool);
- spin_unlock_irqrestore(&ib_conn->lock, flags);
- }
+ if (desc)
+ iser_reg_desc_put(ib_conn, desc);
return err;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 4065abe28829..cc2dd35ffbc0 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -274,6 +274,65 @@ void iser_free_fmr_pool(struct ib_conn *ib_conn)
}
static int
+iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd,
+ struct fast_reg_descriptor *desc)
+{
+ struct iser_pi_context *pi_ctx = NULL;
+ struct ib_mr_init_attr mr_init_attr = {.max_reg_descriptors = 2,
+ .flags = IB_MR_SIGNATURE_EN};
+ int ret = 0;
+
+ desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
+ if (!desc->pi_ctx)
+ return -ENOMEM;
+
+ pi_ctx = desc->pi_ctx;
+
+ pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
+ ISCSI_ISER_SG_TABLESIZE);
+ if (IS_ERR(pi_ctx->prot_frpl)) {
+ ret = PTR_ERR(pi_ctx->prot_frpl);
+ goto prot_frpl_failure;
+ }
+
+ pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
+ ISCSI_ISER_SG_TABLESIZE + 1);
+ if (IS_ERR(pi_ctx->prot_mr)) {
+ ret = PTR_ERR(pi_ctx->prot_mr);
+ goto prot_mr_failure;
+ }
+ desc->reg_indicators |= ISER_PROT_KEY_VALID;
+
+ pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+ if (IS_ERR(pi_ctx->sig_mr)) {
+ ret = PTR_ERR(pi_ctx->sig_mr);
+ goto sig_mr_failure;
+ }
+ desc->reg_indicators |= ISER_SIG_KEY_VALID;
+ desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
+
+ return 0;
+
+sig_mr_failure:
+ ib_dereg_mr(desc->pi_ctx->prot_mr);
+prot_mr_failure:
+ ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+prot_frpl_failure:
+ kfree(desc->pi_ctx);
+
+ return ret;
+}
+
+static void
+iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
+{
+ ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
+ ib_dereg_mr(pi_ctx->prot_mr);
+ ib_destroy_mr(pi_ctx->sig_mr);
+ kfree(pi_ctx);
+}
+
+static int
iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
bool pi_enable, struct fast_reg_descriptor *desc)
{
@@ -297,59 +356,12 @@ iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
desc->reg_indicators |= ISER_DATA_KEY_VALID;
if (pi_enable) {
- struct ib_mr_init_attr mr_init_attr = {0};
- struct iser_pi_context *pi_ctx = NULL;
-
- desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
- if (!desc->pi_ctx) {
- iser_err("Failed to allocate pi context\n");
- ret = -ENOMEM;
+ ret = iser_alloc_pi_ctx(ib_device, pd, desc);
+ if (ret)
goto pi_ctx_alloc_failure;
- }
- pi_ctx = desc->pi_ctx;
-
- pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_frpl)) {
- ret = PTR_ERR(pi_ctx->prot_frpl);
- iser_err("Failed to allocate prot frpl ret=%d\n",
- ret);
- goto prot_frpl_failure;
- }
-
- pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
- ISCSI_ISER_SG_TABLESIZE + 1);
- if (IS_ERR(pi_ctx->prot_mr)) {
- ret = PTR_ERR(pi_ctx->prot_mr);
- iser_err("Failed to allocate prot frmr ret=%d\n",
- ret);
- goto prot_mr_failure;
- }
- desc->reg_indicators |= ISER_PROT_KEY_VALID;
-
- mr_init_attr.max_reg_descriptors = 2;
- mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
- pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
- if (IS_ERR(pi_ctx->sig_mr)) {
- ret = PTR_ERR(pi_ctx->sig_mr);
- iser_err("Failed to allocate signature enabled mr err=%d\n",
- ret);
- goto sig_mr_failure;
- }
- desc->reg_indicators |= ISER_SIG_KEY_VALID;
}
- desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
-
- iser_dbg("Create fr_desc %p page_list %p\n",
- desc, desc->data_frpl->page_list);
return 0;
-sig_mr_failure:
- ib_dereg_mr(desc->pi_ctx->prot_mr);
-prot_mr_failure:
- ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
-prot_frpl_failure:
- kfree(desc->pi_ctx);
pi_ctx_alloc_failure:
ib_dereg_mr(desc->data_mr);
fast_reg_mr_failure:
@@ -416,12 +428,8 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn)
list_del(&desc->list);
ib_free_fast_reg_page_list(desc->data_frpl);
ib_dereg_mr(desc->data_mr);
- if (desc->pi_ctx) {
- ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
- ib_dereg_mr(desc->pi_ctx->prot_mr);
- ib_destroy_mr(desc->pi_ctx->sig_mr);
- kfree(desc->pi_ctx);
- }
+ if (desc->pi_ctx)
+ iser_free_pi_ctx(desc->pi_ctx);
kfree(desc);
++i;
}
@@ -721,7 +729,7 @@ static void iser_connect_error(struct rdma_cm_id *cma_id)
struct iser_conn *iser_conn;
iser_conn = (struct iser_conn *)cma_id->context;
- iser_conn->state = ISER_CONN_DOWN;
+ iser_conn->state = ISER_CONN_TERMINATING;
}
/**
@@ -992,93 +1000,6 @@ connect_failure:
return err;
}
-/**
- * iser_reg_page_vec - Register physical memory
- *
- * returns: 0 on success, errno code on failure
- */
-int iser_reg_page_vec(struct ib_conn *ib_conn,
- struct iser_page_vec *page_vec,
- struct iser_mem_reg *mem_reg)
-{
- struct ib_pool_fmr *mem;
- u64 io_addr;
- u64 *page_list;
- int status;
-
- page_list = page_vec->pages;
- io_addr = page_list[0];
-
- mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool,
- page_list,
- page_vec->length,
- io_addr);
-
- if (IS_ERR(mem)) {
- status = (int)PTR_ERR(mem);
- iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
- return status;
- }
-
- mem_reg->lkey = mem->fmr->lkey;
- mem_reg->rkey = mem->fmr->rkey;
- mem_reg->len = page_vec->length * SIZE_4K;
- mem_reg->va = io_addr;
- mem_reg->mem_h = (void *)mem;
-
- mem_reg->va += page_vec->offset;
- mem_reg->len = page_vec->data_size;
-
- iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
- "entry[0]: (0x%08lx,%ld)] -> "
- "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
- page_vec, page_vec->length,
- (unsigned long)page_vec->pages[0],
- (unsigned long)page_vec->data_size,
- (unsigned int)mem_reg->lkey, mem_reg->mem_h,
- (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
- return 0;
-}
-
-/**
- * Unregister (previosuly registered using FMR) memory.
- * If memory is non-FMR does nothing.
- */
-void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
-{
- struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
- int ret;
-
- if (!reg->mem_h)
- return;
-
- iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
-
- ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
- if (ret)
- iser_err("ib_fmr_pool_unmap failed %d\n", ret);
-
- reg->mem_h = NULL;
-}
-
-void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
-{
- struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
- struct iser_conn *iser_conn = iser_task->iser_conn;
- struct ib_conn *ib_conn = &iser_conn->ib_conn;
- struct fast_reg_descriptor *desc = reg->mem_h;
-
- if (!desc)
- return;
-
- reg->mem_h = NULL;
- spin_lock_bh(&ib_conn->lock);
- list_add_tail(&desc->list, &ib_conn->fastreg.pool);
- spin_unlock_bh(&ib_conn->lock);
-}
-
int iser_post_recvl(struct iser_conn *iser_conn)
{
struct ib_recv_wr rx_wr, *rx_wr_failed;
@@ -1210,6 +1131,9 @@ iser_handle_comp_error(struct ib_conn *ib_conn,
iscsi_conn_failure(iser_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
+ if (wc->wr_id == ISER_FASTREG_LI_WRID)
+ return;
+
if (is_iser_tx_desc(iser_conn, wr_id)) {
struct iser_tx_desc *desc = wr_id;
@@ -1254,13 +1178,11 @@ static void iser_handle_wc(struct ib_wc *wc)
else
iser_dbg("flush error: wr id %llx\n", wc->wr_id);
- if (wc->wr_id != ISER_FASTREG_LI_WRID &&
- wc->wr_id != ISER_BEACON_WRID)
- iser_handle_comp_error(ib_conn, wc);
-
- /* complete in case all flush errors were consumed */
if (wc->wr_id == ISER_BEACON_WRID)
+ /* all flush errors were consumed */
complete(&ib_conn->flush_comp);
+ else
+ iser_handle_comp_error(ib_conn, wc);
}
}
@@ -1306,7 +1228,7 @@ static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector)
{
- struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
+ struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
struct fast_reg_descriptor *desc = reg->mem_h;
unsigned long sector_size = iser_task->sc->device->sector_size;
struct ib_mr_status mr_status;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 075b19cc78e8..327529ee85eb 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -76,12 +76,12 @@ isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
static void
isert_qp_event_callback(struct ib_event *e, void *context)
{
- struct isert_conn *isert_conn = (struct isert_conn *)context;
+ struct isert_conn *isert_conn = context;
isert_err("conn %p event: %d\n", isert_conn, e->event);
switch (e->event) {
case IB_EVENT_COMM_EST:
- rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
+ rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
@@ -107,13 +107,12 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
return 0;
}
-static int
-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+static struct isert_comp *
+isert_comp_get(struct isert_conn *isert_conn)
{
- struct isert_device *device = isert_conn->conn_device;
- struct ib_qp_init_attr attr;
+ struct isert_device *device = isert_conn->device;
struct isert_comp *comp;
- int ret, i, min = 0;
+ int i, min = 0;
mutex_lock(&device_list_mutex);
for (i = 0; i < device->comps_used; i++)
@@ -122,9 +121,30 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
min = i;
comp = &device->comps[min];
comp->active_qps++;
+ mutex_unlock(&device_list_mutex);
+
isert_info("conn %p, using comp %p min_index: %d\n",
isert_conn, comp, min);
+
+ return comp;
+}
+
+static void
+isert_comp_put(struct isert_comp *comp)
+{
+ mutex_lock(&device_list_mutex);
+ comp->active_qps--;
mutex_unlock(&device_list_mutex);
+}
+
+static struct ib_qp *
+isert_create_qp(struct isert_conn *isert_conn,
+ struct isert_comp *comp,
+ struct rdma_cm_id *cma_id)
+{
+ struct isert_device *device = isert_conn->device;
+ struct ib_qp_init_attr attr;
+ int ret;
memset(&attr, 0, sizeof(struct ib_qp_init_attr));
attr.event_handler = isert_qp_event_callback;
@@ -149,19 +169,31 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
if (device->pi_capable)
attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
- ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
+ ret = rdma_create_qp(cma_id, device->pd, &attr);
if (ret) {
isert_err("rdma_create_qp failed for cma_id %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ return cma_id->qp;
+}
+
+static int
+isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+{
+ struct isert_comp *comp;
+ int ret;
+
+ comp = isert_comp_get(isert_conn);
+ isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
+ if (IS_ERR(isert_conn->qp)) {
+ ret = PTR_ERR(isert_conn->qp);
goto err;
}
- isert_conn->conn_qp = cma_id->qp;
return 0;
err:
- mutex_lock(&device_list_mutex);
- comp->active_qps--;
- mutex_unlock(&device_list_mutex);
-
+ isert_comp_put(comp);
return ret;
}
@@ -174,18 +206,19 @@ isert_cq_event_callback(struct ib_event *e, void *context)
static int
isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
struct iser_rx_desc *rx_desc;
struct ib_sge *rx_sg;
u64 dma_addr;
int i, j;
- isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
+ isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
sizeof(struct iser_rx_desc), GFP_KERNEL);
- if (!isert_conn->conn_rx_descs)
+ if (!isert_conn->rx_descs)
goto fail;
- rx_desc = isert_conn->conn_rx_descs;
+ rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
@@ -198,21 +231,21 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
- rx_sg->lkey = isert_conn->conn_mr->lkey;
+ rx_sg->lkey = device->mr->lkey;
}
- isert_conn->conn_rx_desc_head = 0;
+ isert_conn->rx_desc_head = 0;
return 0;
dma_map_fail:
- rx_desc = isert_conn->conn_rx_descs;
+ rx_desc = isert_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
}
- kfree(isert_conn->conn_rx_descs);
- isert_conn->conn_rx_descs = NULL;
+ kfree(isert_conn->rx_descs);
+ isert_conn->rx_descs = NULL;
fail:
isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
@@ -222,59 +255,51 @@ fail:
static void
isert_free_rx_descriptors(struct isert_conn *isert_conn)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
struct iser_rx_desc *rx_desc;
int i;
- if (!isert_conn->conn_rx_descs)
+ if (!isert_conn->rx_descs)
return;
- rx_desc = isert_conn->conn_rx_descs;
+ rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
}
- kfree(isert_conn->conn_rx_descs);
- isert_conn->conn_rx_descs = NULL;
+ kfree(isert_conn->rx_descs);
+ isert_conn->rx_descs = NULL;
}
static void isert_cq_work(struct work_struct *);
static void isert_cq_callback(struct ib_cq *, void *);
-static int
-isert_create_device_ib_res(struct isert_device *device)
+static void
+isert_free_comps(struct isert_device *device)
{
- struct ib_device *ib_dev = device->ib_device;
- struct ib_device_attr *dev_attr;
- int ret = 0, i;
- int max_cqe;
-
- dev_attr = &device->dev_attr;
- ret = isert_query_device(ib_dev, dev_attr);
- if (ret)
- return ret;
+ int i;
- max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
+ for (i = 0; i < device->comps_used; i++) {
+ struct isert_comp *comp = &device->comps[i];
- /* asign function handlers */
- if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
- dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
- device->use_fastreg = 1;
- device->reg_rdma_mem = isert_reg_rdma;
- device->unreg_rdma_mem = isert_unreg_rdma;
- } else {
- device->use_fastreg = 0;
- device->reg_rdma_mem = isert_map_rdma;
- device->unreg_rdma_mem = isert_unmap_cmd;
+ if (comp->cq) {
+ cancel_work_sync(&comp->work);
+ ib_destroy_cq(comp->cq);
+ }
}
+ kfree(device->comps);
+}
- /* Check signature cap */
- device->pi_capable = dev_attr->device_cap_flags &
- IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
+static int
+isert_alloc_comps(struct isert_device *device,
+ struct ib_device_attr *attr)
+{
+ int i, max_cqe, ret = 0;
device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
- device->ib_device->num_comp_vectors));
+ device->ib_device->num_comp_vectors));
+
isert_info("Using %d CQs, %s supports %d vectors support "
"Fast registration %d pi_capable %d\n",
device->comps_used, device->ib_device->name,
@@ -288,6 +313,8 @@ isert_create_device_ib_res(struct isert_device *device)
return -ENOMEM;
}
+ max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe);
+
for (i = 0; i < device->comps_used; i++) {
struct isert_comp *comp = &device->comps[i];
@@ -299,6 +326,7 @@ isert_create_device_ib_res(struct isert_device *device)
(void *)comp,
max_cqe, i);
if (IS_ERR(comp->cq)) {
+ isert_err("Unable to allocate cq\n");
ret = PTR_ERR(comp->cq);
comp->cq = NULL;
goto out_cq;
@@ -310,40 +338,79 @@ isert_create_device_ib_res(struct isert_device *device)
}
return 0;
-
out_cq:
- for (i = 0; i < device->comps_used; i++) {
- struct isert_comp *comp = &device->comps[i];
+ isert_free_comps(device);
+ return ret;
+}
- if (comp->cq) {
- cancel_work_sync(&comp->work);
- ib_destroy_cq(comp->cq);
- }
+static int
+isert_create_device_ib_res(struct isert_device *device)
+{
+ struct ib_device_attr *dev_attr;
+ int ret;
+
+ dev_attr = &device->dev_attr;
+ ret = isert_query_device(device->ib_device, dev_attr);
+ if (ret)
+ return ret;
+
+ /* asign function handlers */
+ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
+ dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
+ device->use_fastreg = 1;
+ device->reg_rdma_mem = isert_reg_rdma;
+ device->unreg_rdma_mem = isert_unreg_rdma;
+ } else {
+ device->use_fastreg = 0;
+ device->reg_rdma_mem = isert_map_rdma;
+ device->unreg_rdma_mem = isert_unmap_cmd;
}
- kfree(device->comps);
+ ret = isert_alloc_comps(device, dev_attr);
+ if (ret)
+ return ret;
+
+ device->pd = ib_alloc_pd(device->ib_device);
+ if (IS_ERR(device->pd)) {
+ ret = PTR_ERR(device->pd);
+ isert_err("failed to allocate pd, device %p, ret=%d\n",
+ device, ret);
+ goto out_cq;
+ }
+
+ device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(device->mr)) {
+ ret = PTR_ERR(device->mr);
+ isert_err("failed to create dma mr, device %p, ret=%d\n",
+ device, ret);
+ goto out_mr;
+ }
+
+ /* Check signature cap */
+ device->pi_capable = dev_attr->device_cap_flags &
+ IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
+
+ return 0;
+
+out_mr:
+ ib_dealloc_pd(device->pd);
+out_cq:
+ isert_free_comps(device);
return ret;
}
static void
isert_free_device_ib_res(struct isert_device *device)
{
- int i;
-
isert_info("device %p\n", device);
- for (i = 0; i < device->comps_used; i++) {
- struct isert_comp *comp = &device->comps[i];
-
- cancel_work_sync(&comp->work);
- ib_destroy_cq(comp->cq);
- comp->cq = NULL;
- }
- kfree(device->comps);
+ ib_dereg_mr(device->mr);
+ ib_dealloc_pd(device->pd);
+ isert_free_comps(device);
}
static void
-isert_device_try_release(struct isert_device *device)
+isert_device_put(struct isert_device *device)
{
mutex_lock(&device_list_mutex);
device->refcount--;
@@ -357,7 +424,7 @@ isert_device_try_release(struct isert_device *device)
}
static struct isert_device *
-isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
+isert_device_get(struct rdma_cm_id *cma_id)
{
struct isert_device *device;
int ret;
@@ -404,13 +471,13 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
struct fast_reg_descriptor *fr_desc, *tmp;
int i = 0;
- if (list_empty(&isert_conn->conn_fr_pool))
+ if (list_empty(&isert_conn->fr_pool))
return;
isert_info("Freeing conn %p fastreg pool", isert_conn);
list_for_each_entry_safe(fr_desc, tmp,
- &isert_conn->conn_fr_pool, list) {
+ &isert_conn->fr_pool, list) {
list_del(&fr_desc->list);
ib_free_fast_reg_page_list(fr_desc->data_frpl);
ib_dereg_mr(fr_desc->data_mr);
@@ -424,9 +491,9 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
++i;
}
- if (i < isert_conn->conn_fr_pool_size)
+ if (i < isert_conn->fr_pool_size)
isert_warn("Pool still has %d regions registered\n",
- isert_conn->conn_fr_pool_size - i);
+ isert_conn->fr_pool_size - i);
}
static int
@@ -526,7 +593,7 @@ static int
isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
{
struct fast_reg_descriptor *fr_desc;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_device *device = isert_conn->device;
struct se_session *se_sess = isert_conn->conn->sess->se_sess;
struct se_node_acl *se_nacl = se_sess->se_node_acl;
int i, ret, tag_num;
@@ -537,7 +604,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
- isert_conn->conn_fr_pool_size = 0;
+ isert_conn->fr_pool_size = 0;
for (i = 0; i < tag_num; i++) {
fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
if (!fr_desc) {
@@ -547,7 +614,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
}
ret = isert_create_fr_desc(device->ib_device,
- isert_conn->conn_pd, fr_desc);
+ device->pd, fr_desc);
if (ret) {
isert_err("Failed to create fastreg descriptor err=%d\n",
ret);
@@ -555,12 +622,12 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
goto err;
}
- list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
- isert_conn->conn_fr_pool_size++;
+ list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
+ isert_conn->fr_pool_size++;
}
isert_dbg("Creating conn %p fastreg pool size=%d",
- isert_conn, isert_conn->conn_fr_pool_size);
+ isert_conn, isert_conn->fr_pool_size);
return 0;
@@ -569,55 +636,50 @@ err:
return ret;
}
-static int
-isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+static void
+isert_init_conn(struct isert_conn *isert_conn)
{
- struct isert_np *isert_np = cma_id->context;
- struct iscsi_np *np = isert_np->np;
- struct isert_conn *isert_conn;
- struct isert_device *device;
- struct ib_device *ib_dev = cma_id->device;
- int ret = 0;
-
- spin_lock_bh(&np->np_thread_lock);
- if (!np->enabled) {
- spin_unlock_bh(&np->np_thread_lock);
- isert_dbg("iscsi_np is not enabled, reject connect request\n");
- return rdma_reject(cma_id, NULL, 0);
- }
- spin_unlock_bh(&np->np_thread_lock);
-
- isert_dbg("cma_id: %p, portal: %p\n",
- cma_id, cma_id->context);
-
- isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
- if (!isert_conn) {
- isert_err("Unable to allocate isert_conn\n");
- return -ENOMEM;
- }
isert_conn->state = ISER_CONN_INIT;
- INIT_LIST_HEAD(&isert_conn->conn_accept_node);
- init_completion(&isert_conn->conn_login_comp);
+ INIT_LIST_HEAD(&isert_conn->accept_node);
+ init_completion(&isert_conn->login_comp);
init_completion(&isert_conn->login_req_comp);
- init_completion(&isert_conn->conn_wait);
- kref_init(&isert_conn->conn_kref);
- mutex_init(&isert_conn->conn_mutex);
- spin_lock_init(&isert_conn->conn_lock);
- INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
+ init_completion(&isert_conn->wait);
+ kref_init(&isert_conn->kref);
+ mutex_init(&isert_conn->mutex);
+ spin_lock_init(&isert_conn->pool_lock);
+ INIT_LIST_HEAD(&isert_conn->fr_pool);
+}
+
+static void
+isert_free_login_buf(struct isert_conn *isert_conn)
+{
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
- isert_conn->conn_cm_id = cma_id;
+ ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
+ ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+ ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
+ ISCSI_DEF_MAX_RECV_SEG_LEN,
+ DMA_FROM_DEVICE);
+ kfree(isert_conn->login_buf);
+}
+
+static int
+isert_alloc_login_buf(struct isert_conn *isert_conn,
+ struct ib_device *ib_dev)
+{
+ int ret;
isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!isert_conn->login_buf) {
isert_err("Unable to allocate isert_conn->login_buf\n");
- ret = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
isert_conn->login_req_buf = isert_conn->login_buf;
isert_conn->login_rsp_buf = isert_conn->login_buf +
ISCSI_DEF_MAX_RECV_SEG_LEN;
+
isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
isert_conn->login_buf, isert_conn->login_req_buf,
isert_conn->login_rsp_buf);
@@ -628,8 +690,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
if (ret) {
- isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
- ret);
+ isert_err("login_req_dma mapping error: %d\n", ret);
isert_conn->login_req_dma = 0;
goto out_login_buf;
}
@@ -640,17 +701,58 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
if (ret) {
- isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
- ret);
+ isert_err("login_rsp_dma mapping error: %d\n", ret);
isert_conn->login_rsp_dma = 0;
goto out_req_dma_map;
}
- device = isert_device_find_by_ib_dev(cma_id);
+ return 0;
+
+out_req_dma_map:
+ ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
+out_login_buf:
+ kfree(isert_conn->login_buf);
+ return ret;
+}
+
+static int
+isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+{
+ struct isert_np *isert_np = cma_id->context;
+ struct iscsi_np *np = isert_np->np;
+ struct isert_conn *isert_conn;
+ struct isert_device *device;
+ int ret = 0;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (!np->enabled) {
+ spin_unlock_bh(&np->np_thread_lock);
+ isert_dbg("iscsi_np is not enabled, reject connect request\n");
+ return rdma_reject(cma_id, NULL, 0);
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ isert_dbg("cma_id: %p, portal: %p\n",
+ cma_id, cma_id->context);
+
+ isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
+ if (!isert_conn)
+ return -ENOMEM;
+
+ isert_init_conn(isert_conn);
+ isert_conn->cm_id = cma_id;
+
+ ret = isert_alloc_login_buf(isert_conn, cma_id->device);
+ if (ret)
+ goto out;
+
+ device = isert_device_get(cma_id);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
goto out_rsp_dma_map;
}
+ isert_conn->device = device;
/* Set max inflight RDMA READ requests */
isert_conn->initiator_depth = min_t(u8,
@@ -658,24 +760,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
device->dev_attr.max_qp_init_rd_atom);
isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
- isert_conn->conn_device = device;
- isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
- if (IS_ERR(isert_conn->conn_pd)) {
- ret = PTR_ERR(isert_conn->conn_pd);
- isert_err("ib_alloc_pd failed for conn %p: ret=%d\n",
- isert_conn, ret);
- goto out_pd;
- }
-
- isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(isert_conn->conn_mr)) {
- ret = PTR_ERR(isert_conn->conn_mr);
- isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
- isert_conn, ret);
- goto out_mr;
- }
-
ret = isert_conn_setup_qp(isert_conn, cma_id);
if (ret)
goto out_conn_dev;
@@ -689,7 +773,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
goto out_conn_dev;
mutex_lock(&isert_np->np_accept_mutex);
- list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
+ list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list);
mutex_unlock(&isert_np->np_accept_mutex);
isert_info("np %p: Allow accept_np to continue\n", np);
@@ -697,19 +781,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
return 0;
out_conn_dev:
- ib_dereg_mr(isert_conn->conn_mr);
-out_mr:
- ib_dealloc_pd(isert_conn->conn_pd);
-out_pd:
- isert_device_try_release(device);
+ isert_device_put(device);
out_rsp_dma_map:
- ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
- ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
-out_req_dma_map:
- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
-out_login_buf:
- kfree(isert_conn->login_buf);
+ isert_free_login_buf(isert_conn);
out:
kfree(isert_conn);
rdma_reject(cma_id, NULL, 0);
@@ -719,43 +793,32 @@ out:
static void
isert_connect_release(struct isert_conn *isert_conn)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_device *device = isert_conn->device;
isert_dbg("conn %p\n", isert_conn);
- if (device && device->use_fastreg)
+ BUG_ON(!device);
+
+ if (device->use_fastreg)
isert_conn_free_fastreg_pool(isert_conn);
isert_free_rx_descriptors(isert_conn);
- rdma_destroy_id(isert_conn->conn_cm_id);
+ if (isert_conn->cm_id)
+ rdma_destroy_id(isert_conn->cm_id);
- if (isert_conn->conn_qp) {
- struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
+ if (isert_conn->qp) {
+ struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
- isert_dbg("dec completion context %p active_qps\n", comp);
- mutex_lock(&device_list_mutex);
- comp->active_qps--;
- mutex_unlock(&device_list_mutex);
-
- ib_destroy_qp(isert_conn->conn_qp);
+ isert_comp_put(comp);
+ ib_destroy_qp(isert_conn->qp);
}
- ib_dereg_mr(isert_conn->conn_mr);
- ib_dealloc_pd(isert_conn->conn_pd);
+ if (isert_conn->login_buf)
+ isert_free_login_buf(isert_conn);
- if (isert_conn->login_buf) {
- ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
- ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISCSI_DEF_MAX_RECV_SEG_LEN,
- DMA_FROM_DEVICE);
- kfree(isert_conn->login_buf);
- }
- kfree(isert_conn);
+ isert_device_put(device);
- if (device)
- isert_device_try_release(device);
+ kfree(isert_conn);
}
static void
@@ -765,22 +828,22 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
isert_info("conn %p\n", isert_conn);
- if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
+ if (!kref_get_unless_zero(&isert_conn->kref)) {
isert_warn("conn %p connect_release is running\n", isert_conn);
return;
}
- mutex_lock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->mutex);
if (isert_conn->state != ISER_CONN_FULL_FEATURE)
isert_conn->state = ISER_CONN_UP;
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
}
static void
-isert_release_conn_kref(struct kref *kref)
+isert_release_kref(struct kref *kref)
{
struct isert_conn *isert_conn = container_of(kref,
- struct isert_conn, conn_kref);
+ struct isert_conn, kref);
isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
current->pid);
@@ -791,7 +854,7 @@ isert_release_conn_kref(struct kref *kref)
static void
isert_put_conn(struct isert_conn *isert_conn)
{
- kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
+ kref_put(&isert_conn->kref, isert_release_kref);
}
/**
@@ -803,7 +866,7 @@ isert_put_conn(struct isert_conn *isert_conn)
* to TEMINATING and start teardown sequence (rdma_disconnect).
* In case the connection state is UP, complete flush as well.
*
- * This routine must be called with conn_mutex held. Thus it is
+ * This routine must be called with mutex held. Thus it is
* safe to call multiple times.
*/
static void
@@ -819,7 +882,7 @@ isert_conn_terminate(struct isert_conn *isert_conn)
isert_info("Terminating conn %p state %d\n",
isert_conn, isert_conn->state);
isert_conn->state = ISER_CONN_TERMINATING;
- err = rdma_disconnect(isert_conn->conn_cm_id);
+ err = rdma_disconnect(isert_conn->cm_id);
if (err)
isert_warn("Failed rdma_disconnect isert_conn %p\n",
isert_conn);
@@ -868,22 +931,25 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
isert_conn = cma_id->qp->qp_context;
- mutex_lock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->mutex);
isert_conn_terminate(isert_conn);
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
- isert_info("conn %p completing conn_wait\n", isert_conn);
- complete(&isert_conn->conn_wait);
+ isert_info("conn %p completing wait\n", isert_conn);
+ complete(&isert_conn->wait);
return 0;
}
-static void
+static int
isert_connect_error(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ isert_conn->cm_id = NULL;
isert_put_conn(isert_conn);
+
+ return -1;
}
static int
@@ -912,7 +978,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
case RDMA_CM_EVENT_CONNECT_ERROR:
- isert_connect_error(cma_id);
+ ret = isert_connect_error(cma_id);
break;
default:
isert_err("Unhandled RDMA CMA event: %d\n", event->event);
@@ -927,11 +993,11 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
{
struct ib_recv_wr *rx_wr, *rx_wr_failed;
int i, ret;
- unsigned int rx_head = isert_conn->conn_rx_desc_head;
+ unsigned int rx_head = isert_conn->rx_desc_head;
struct iser_rx_desc *rx_desc;
- for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
- rx_desc = &isert_conn->conn_rx_descs[rx_head];
+ for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
+ rx_desc = &isert_conn->rx_descs[rx_head];
rx_wr->wr_id = (uintptr_t)rx_desc;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
@@ -943,14 +1009,14 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
rx_wr->next = NULL; /* mark end of work requests list */
isert_conn->post_recv_buf_count += count;
- ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
+ ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
&rx_wr_failed);
if (ret) {
isert_err("ib_post_recv() failed with ret: %d\n", ret);
isert_conn->post_recv_buf_count -= count;
} else {
isert_dbg("Posted %d RX buffers\n", count);
- isert_conn->conn_rx_desc_head = rx_head;
+ isert_conn->rx_desc_head = rx_head;
}
return ret;
}
@@ -958,7 +1024,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
static int
isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
struct ib_send_wr send_wr, *send_wr_failed;
int ret;
@@ -972,7 +1038,7 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
+ ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
if (ret)
isert_err("ib_post_send() failed, ret: %d\n", ret);
@@ -984,7 +1050,8 @@ isert_create_send_desc(struct isert_conn *isert_conn,
struct isert_cmd *isert_cmd,
struct iser_tx_desc *tx_desc)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
@@ -995,8 +1062,8 @@ isert_create_send_desc(struct isert_conn *isert_conn,
tx_desc->num_sge = 1;
tx_desc->isert_cmd = isert_cmd;
- if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
- tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
+ if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
+ tx_desc->tx_sg[0].lkey = device->mr->lkey;
isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
}
}
@@ -1005,7 +1072,8 @@ static int
isert_init_tx_hdrs(struct isert_conn *isert_conn,
struct iser_tx_desc *tx_desc)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
u64 dma_addr;
dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
@@ -1018,7 +1086,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
- tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
+ tx_desc->tx_sg[0].lkey = device->mr->lkey;
isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
@@ -1051,7 +1119,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
memset(&sge, 0, sizeof(struct ib_sge));
sge.addr = isert_conn->login_req_dma;
sge.length = ISER_RX_LOGIN_SIZE;
- sge.lkey = isert_conn->conn_mr->lkey;
+ sge.lkey = isert_conn->device->mr->lkey;
isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
@@ -1062,7 +1130,7 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
rx_wr.num_sge = 1;
isert_conn->post_recv_buf_count++;
- ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
+ ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
if (ret) {
isert_err("ib_post_recv() failed: %d\n", ret);
isert_conn->post_recv_buf_count--;
@@ -1076,8 +1144,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
u32 length)
{
struct isert_conn *isert_conn = conn->context;
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
- struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
+ struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
int ret;
isert_create_send_desc(isert_conn, NULL, tx_desc);
@@ -1100,13 +1169,13 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
tx_dsg->addr = isert_conn->login_rsp_dma;
tx_dsg->length = length;
- tx_dsg->lkey = isert_conn->conn_mr->lkey;
+ tx_dsg->lkey = isert_conn->device->mr->lkey;
tx_desc->num_sge = 2;
}
if (!login->login_failed) {
if (login->login_complete) {
if (!conn->sess->sess_ops->SessionType &&
- isert_conn->conn_device->use_fastreg) {
+ isert_conn->device->use_fastreg) {
ret = isert_conn_create_fastreg_pool(isert_conn);
if (ret) {
isert_err("Conn: %p failed to create"
@@ -1124,9 +1193,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
return ret;
/* Now we are in FULL_FEATURE phase */
- mutex_lock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->mutex);
isert_conn->state = ISER_CONN_FULL_FEATURE;
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
goto post_send;
}
@@ -1185,7 +1254,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
memcpy(login->req_buf, &rx_desc->data[0], size);
if (login->first_request) {
- complete(&isert_conn->conn_login_comp);
+ complete(&isert_conn->login_comp);
return;
}
schedule_delayed_work(&conn->login_work, 0);
@@ -1194,7 +1263,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
static struct iscsi_cmd
*isert_allocate_cmd(struct iscsi_conn *conn)
{
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct isert_cmd *isert_cmd;
struct iscsi_cmd *cmd;
@@ -1379,13 +1448,12 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
{
struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
struct iscsi_conn *conn = isert_conn->conn;
- struct iscsi_session *sess = conn->sess;
struct iscsi_cmd *cmd;
struct isert_cmd *isert_cmd;
int ret = -EINVAL;
u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
- if (sess->sess_ops->SessionType &&
+ if (conn->sess->sess_ops->SessionType &&
(!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
" ignoring\n", opcode);
@@ -1497,10 +1565,11 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
}
static void
-isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
- u32 xfer_len)
+isert_rcv_completion(struct iser_rx_desc *desc,
+ struct isert_conn *isert_conn,
+ u32 xfer_len)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iscsi_hdr *hdr;
u64 rx_dma;
int rx_buflen, outstanding;
@@ -1532,9 +1601,9 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
if (login && !login->first_request)
isert_rx_login_req(isert_conn);
}
- mutex_lock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->mutex);
complete(&isert_conn->login_req_comp);
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
} else {
isert_rx_do_work(desc, isert_conn);
}
@@ -1566,7 +1635,7 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
struct scatterlist *sg, u32 nents, u32 length, u32 offset,
enum iser_ib_op_code op, struct isert_data_buf *data)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
data->dma_dir = op == ISER_IB_RDMA_WRITE ?
DMA_TO_DEVICE : DMA_FROM_DEVICE;
@@ -1597,7 +1666,7 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
static void
isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
memset(data, 0, sizeof(*data));
@@ -1634,7 +1703,6 @@ static void
isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
{
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- LIST_HEAD(unmap_list);
isert_dbg("Cmd %p\n", isert_cmd);
@@ -1644,9 +1712,9 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
isert_unmap_data_buf(isert_conn, &wr->prot);
wr->fr_desc->ind &= ~ISERT_PROTECTED;
}
- spin_lock_bh(&isert_conn->conn_lock);
- list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
- spin_unlock_bh(&isert_conn->conn_lock);
+ spin_lock_bh(&isert_conn->pool_lock);
+ list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool);
+ spin_unlock_bh(&isert_conn->pool_lock);
wr->fr_desc = NULL;
}
@@ -1665,7 +1733,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
struct iscsi_conn *conn = isert_conn->conn;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_device *device = isert_conn->device;
struct iscsi_text_rsp *hdr;
isert_dbg("Cmd %p\n", isert_cmd);
@@ -1815,7 +1883,7 @@ isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_device *device = isert_conn->device;
int ret = 0;
if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
@@ -1841,7 +1909,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_device *device = isert_conn->device;
int ret = 0;
if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
@@ -1861,11 +1929,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
- if (ret)
+ if (ret) {
+ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
transport_send_check_condition_and_sense(se_cmd,
se_cmd->pi_err, 0);
- else
+ } else {
target_execute_cmd(se_cmd);
+ }
}
static void
@@ -1874,7 +1944,7 @@ isert_do_control_comp(struct work_struct *work)
struct isert_cmd *isert_cmd = container_of(work,
struct isert_cmd, comp_work);
struct isert_conn *isert_conn = isert_cmd->conn;
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
@@ -1922,10 +1992,10 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
}
static void
-isert_send_completion(struct iser_tx_desc *tx_desc,
+isert_snd_completion(struct iser_tx_desc *tx_desc,
struct isert_conn *isert_conn)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
struct isert_rdma_wr *wr;
@@ -1938,10 +2008,6 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
switch (wr->iser_ib_op) {
- case ISER_IB_RECV:
- isert_err("Got ISER_IB_RECV\n");
- dump_stack();
- break;
case ISER_IB_SEND:
isert_response_completion(tx_desc, isert_cmd,
isert_conn, ib_dev);
@@ -1973,8 +2039,8 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
static inline bool
is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
{
- void *start = isert_conn->conn_rx_descs;
- int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
+ void *start = isert_conn->rx_descs;
+ int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
if (wr_id >= start && wr_id < start + len)
return false;
@@ -1986,11 +2052,11 @@ static void
isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
{
if (wc->wr_id == ISER_BEACON_WRID) {
- isert_info("conn %p completing conn_wait_comp_err\n",
+ isert_info("conn %p completing wait_comp_err\n",
isert_conn);
- complete(&isert_conn->conn_wait_comp_err);
+ complete(&isert_conn->wait_comp_err);
} else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->cm_id->device;
struct isert_cmd *isert_cmd;
struct iser_tx_desc *desc;
@@ -2018,10 +2084,10 @@ isert_handle_wc(struct ib_wc *wc)
if (likely(wc->status == IB_WC_SUCCESS)) {
if (wc->opcode == IB_WC_RECV) {
rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
- isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
+ isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
} else {
tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
- isert_send_completion(tx_desc, isert_conn);
+ isert_snd_completion(tx_desc, isert_conn);
}
} else {
if (wc->status != IB_WC_WR_FLUSH_ERR)
@@ -2070,7 +2136,7 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
struct ib_send_wr *wr_failed;
int ret;
- ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
+ ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
&wr_failed);
if (ret) {
isert_err("ib_post_send failed with %d\n", ret);
@@ -2083,7 +2149,7 @@ static int
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
&isert_cmd->tx_desc.iscsi_header;
@@ -2097,7 +2163,8 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
if (cmd->se_cmd.sense_buffer &&
((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
u32 padding, pdu_len;
@@ -2116,7 +2183,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_cmd->pdu_buf_len = pdu_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = pdu_len;
- tx_dsg->lkey = isert_conn->conn_mr->lkey;
+ tx_dsg->lkey = device->mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
@@ -2131,8 +2198,8 @@ static void
isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_conn *isert_conn = conn->context;
+ struct isert_device *device = isert_conn->device;
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
@@ -2148,8 +2215,8 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
static enum target_prot_op
isert_get_sup_prot_ops(struct iscsi_conn *conn)
{
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_conn *isert_conn = conn->context;
+ struct isert_device *device = isert_conn->device;
if (conn->tpg->tpg_attrib.t10_pi) {
if (device->pi_capable) {
@@ -2170,7 +2237,7 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
bool nopout_response)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
@@ -2189,7 +2256,7 @@ static int
isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
@@ -2207,7 +2274,7 @@ static int
isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
@@ -2225,9 +2292,10 @@ static int
isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
struct iscsi_reject *hdr =
(struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
@@ -2243,7 +2311,7 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = ISCSI_HDR_LEN;
- tx_dsg->lkey = isert_conn->conn_mr->lkey;
+ tx_dsg->lkey = device->mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
@@ -2257,7 +2325,7 @@ static int
isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct iscsi_text_rsp *hdr =
(struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
@@ -2273,7 +2341,8 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
if (txt_rsp_len) {
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
void *txt_rsp_buf = cmd->buf_ptr;
@@ -2283,7 +2352,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
isert_cmd->pdu_buf_len = txt_rsp_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = txt_rsp_len;
- tx_dsg->lkey = isert_conn->conn_mr->lkey;
+ tx_dsg->lkey = device->mr->lkey;
isert_cmd->tx_desc.num_sge = 2;
}
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
@@ -2300,7 +2369,8 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
{
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct scatterlist *sg_start, *tmp_sg;
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
u32 sg_off, page_off;
int i = 0, sg_nents;
@@ -2324,7 +2394,7 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
ib_sge->length = min_t(u32, data_left,
ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
- ib_sge->lkey = isert_conn->conn_mr->lkey;
+ ib_sge->lkey = device->mr->lkey;
isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
ib_sge->addr, ib_sge->length, ib_sge->lkey);
@@ -2346,7 +2416,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
struct isert_data_buf *data = &wr->data;
struct ib_send_wr *send_wr;
struct ib_sge *ib_sge;
@@ -2485,7 +2555,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
enum isert_indicator ind,
struct ib_sge *sge)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_device *device = isert_conn->device;
+ struct ib_device *ib_dev = device->ib_device;
struct ib_mr *mr;
struct ib_fast_reg_page_list *frpl;
struct ib_send_wr fr_wr, inv_wr;
@@ -2494,7 +2565,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
u32 page_off;
if (mem->dma_nents == 1) {
- sge->lkey = isert_conn->conn_mr->lkey;
+ sge->lkey = device->mr->lkey;
sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
@@ -2542,7 +2613,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
else
wr->next = &fr_wr;
- ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
+ ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
if (ret) {
isert_err("fast registration failed, ret:%d\n", ret);
return ret;
@@ -2655,7 +2726,7 @@ isert_reg_sig_mr(struct isert_conn *isert_conn,
else
wr->next = &sig_wr;
- ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
+ ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
if (ret) {
isert_err("fast registration failed, ret:%d\n", ret);
goto err;
@@ -2685,14 +2756,14 @@ isert_handle_prot_cmd(struct isert_conn *isert_conn,
struct isert_cmd *isert_cmd,
struct isert_rdma_wr *wr)
{
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_device *device = isert_conn->device;
struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
int ret;
if (!wr->fr_desc->pi_ctx) {
ret = isert_create_pi_ctx(wr->fr_desc,
device->ib_device,
- isert_conn->conn_pd);
+ device->pd);
if (ret) {
isert_err("conn %p failed to allocate pi_ctx\n",
isert_conn);
@@ -2763,11 +2834,11 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
return ret;
if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
- spin_lock_irqsave(&isert_conn->conn_lock, flags);
- fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
+ spin_lock_irqsave(&isert_conn->pool_lock, flags);
+ fr_desc = list_first_entry(&isert_conn->fr_pool,
struct fast_reg_descriptor, list);
list_del(&fr_desc->list);
- spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+ spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
wr->fr_desc = fr_desc;
}
@@ -2814,9 +2885,9 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
unmap_cmd:
if (fr_desc) {
- spin_lock_irqsave(&isert_conn->conn_lock, flags);
- list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
- spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+ spin_lock_irqsave(&isert_conn->pool_lock, flags);
+ list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
+ spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
}
isert_unmap_data_buf(isert_conn, &wr->data);
@@ -2829,8 +2900,8 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_conn *isert_conn = conn->context;
+ struct isert_device *device = isert_conn->device;
struct ib_send_wr *wr_failed;
int rc;
@@ -2859,7 +2930,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
wr->send_wr_num += 1;
}
- rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
+ rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
if (rc)
isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
@@ -2879,8 +2950,8 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
- struct isert_device *device = isert_conn->conn_device;
+ struct isert_conn *isert_conn = conn->context;
+ struct isert_device *device = isert_conn->device;
struct ib_send_wr *wr_failed;
int rc;
@@ -2893,7 +2964,7 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
return rc;
}
- rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
+ rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
if (rc)
isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
@@ -2987,7 +3058,7 @@ isert_setup_id(struct isert_np *isert_np)
goto out_id;
}
- ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
+ ret = rdma_listen(id, 0);
if (ret) {
isert_err("rdma_listen() failed: %d\n", ret);
goto out_id;
@@ -3046,7 +3117,7 @@ out:
static int
isert_rdma_accept(struct isert_conn *isert_conn)
{
- struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
+ struct rdma_cm_id *cm_id = isert_conn->cm_id;
struct rdma_conn_param cp;
int ret;
@@ -3067,7 +3138,7 @@ isert_rdma_accept(struct isert_conn *isert_conn)
static int
isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
{
- struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+ struct isert_conn *isert_conn = conn->context;
int ret;
isert_info("before login_req comp conn: %p\n", isert_conn);
@@ -3090,8 +3161,8 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
isert_rx_login_req(isert_conn);
- isert_info("before conn_login_comp conn: %p\n", conn);
- ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
+ isert_info("before login_comp conn: %p\n", conn);
+ ret = wait_for_completion_interruptible(&isert_conn->login_comp);
if (ret)
return ret;
@@ -3104,7 +3175,7 @@ static void
isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
struct isert_conn *isert_conn)
{
- struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
+ struct rdma_cm_id *cm_id = isert_conn->cm_id;
struct rdma_route *cm_route = &cm_id->route;
struct sockaddr_in *sock_in;
struct sockaddr_in6 *sock_in6;
@@ -3137,13 +3208,13 @@ isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
static int
isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
{
- struct isert_np *isert_np = (struct isert_np *)np->np_context;
+ struct isert_np *isert_np = np->np_context;
struct isert_conn *isert_conn;
- int max_accept = 0, ret;
+ int ret;
accept_wait:
ret = down_interruptible(&isert_np->np_sem);
- if (ret || max_accept > 5)
+ if (ret)
return -ENODEV;
spin_lock_bh(&np->np_thread_lock);
@@ -3162,17 +3233,15 @@ accept_wait:
mutex_lock(&isert_np->np_accept_mutex);
if (list_empty(&isert_np->np_accept_list)) {
mutex_unlock(&isert_np->np_accept_mutex);
- max_accept++;
goto accept_wait;
}
isert_conn = list_first_entry(&isert_np->np_accept_list,
- struct isert_conn, conn_accept_node);
- list_del_init(&isert_conn->conn_accept_node);
+ struct isert_conn, accept_node);
+ list_del_init(&isert_conn->accept_node);
mutex_unlock(&isert_np->np_accept_mutex);
conn->context = isert_conn;
isert_conn->conn = conn;
- max_accept = 0;
isert_set_conn_info(np, conn, isert_conn);
@@ -3184,7 +3253,7 @@ accept_wait:
static void
isert_free_np(struct iscsi_np *np)
{
- struct isert_np *isert_np = (struct isert_np *)np->np_context;
+ struct isert_np *isert_np = np->np_context;
struct isert_conn *isert_conn, *n;
if (isert_np->np_cm_id)
@@ -3202,7 +3271,7 @@ isert_free_np(struct iscsi_np *np)
isert_info("Still have isert connections, cleaning up...\n");
list_for_each_entry_safe(isert_conn, n,
&isert_np->np_accept_list,
- conn_accept_node) {
+ accept_node) {
isert_info("cleaning isert_conn %p state (%d)\n",
isert_conn, isert_conn->state);
isert_connect_release(isert_conn);
@@ -3222,11 +3291,11 @@ static void isert_release_work(struct work_struct *work)
isert_info("Starting release conn %p\n", isert_conn);
- wait_for_completion(&isert_conn->conn_wait);
+ wait_for_completion(&isert_conn->wait);
- mutex_lock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->mutex);
isert_conn->state = ISER_CONN_DOWN;
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
isert_info("Destroying conn %p\n", isert_conn);
isert_put_conn(isert_conn);
@@ -3264,15 +3333,15 @@ isert_wait4flush(struct isert_conn *isert_conn)
isert_info("conn %p\n", isert_conn);
- init_completion(&isert_conn->conn_wait_comp_err);
+ init_completion(&isert_conn->wait_comp_err);
isert_conn->beacon.wr_id = ISER_BEACON_WRID;
/* post an indication that all flush errors were consumed */
- if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
+ if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
isert_err("conn %p failed to post beacon", isert_conn);
return;
}
- wait_for_completion(&isert_conn->conn_wait_comp_err);
+ wait_for_completion(&isert_conn->wait_comp_err);
}
static void isert_wait_conn(struct iscsi_conn *conn)
@@ -3281,17 +3350,17 @@ static void isert_wait_conn(struct iscsi_conn *conn)
isert_info("Starting conn %p\n", isert_conn);
- mutex_lock(&isert_conn->conn_mutex);
+ mutex_lock(&isert_conn->mutex);
/*
- * Only wait for conn_wait_comp_err if the isert_conn made it
+ * Only wait for wait_comp_err if the isert_conn made it
* into full feature phase..
*/
if (isert_conn->state == ISER_CONN_INIT) {
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
return;
}
isert_conn_terminate(isert_conn);
- mutex_unlock(&isert_conn->conn_mutex);
+ mutex_unlock(&isert_conn->mutex);
isert_wait4cmds(conn);
isert_wait4flush(isert_conn);
@@ -3370,7 +3439,7 @@ static void __exit isert_exit(void)
}
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
-MODULE_VERSION("0.1");
+MODULE_VERSION("1.0");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 8dc8415d152d..9ec23a786c02 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -31,7 +31,6 @@
#define isert_err(fmt, arg...) \
pr_err(PFX "%s: " fmt, __func__ , ## arg)
-#define ISERT_RDMA_LISTEN_BACKLOG 10
#define ISCSI_ISER_SG_TABLESIZE 256
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
#define ISER_BEACON_WRID 0xfffffffffffffffeULL
@@ -160,27 +159,25 @@ struct isert_conn {
u64 login_req_dma;
int login_req_len;
u64 login_rsp_dma;
- unsigned int conn_rx_desc_head;
- struct iser_rx_desc *conn_rx_descs;
- struct ib_recv_wr conn_rx_wr[ISERT_MIN_POSTED_RX];
+ unsigned int rx_desc_head;
+ struct iser_rx_desc *rx_descs;
+ struct ib_recv_wr rx_wr[ISERT_MIN_POSTED_RX];
struct iscsi_conn *conn;
- struct list_head conn_accept_node;
- struct completion conn_login_comp;
+ struct list_head accept_node;
+ struct completion login_comp;
struct completion login_req_comp;
- struct iser_tx_desc conn_login_tx_desc;
- struct rdma_cm_id *conn_cm_id;
- struct ib_pd *conn_pd;
- struct ib_mr *conn_mr;
- struct ib_qp *conn_qp;
- struct isert_device *conn_device;
- struct mutex conn_mutex;
- struct completion conn_wait;
- struct completion conn_wait_comp_err;
- struct kref conn_kref;
- struct list_head conn_fr_pool;
- int conn_fr_pool_size;
+ struct iser_tx_desc login_tx_desc;
+ struct rdma_cm_id *cm_id;
+ struct ib_qp *qp;
+ struct isert_device *device;
+ struct mutex mutex;
+ struct completion wait;
+ struct completion wait_comp_err;
+ struct kref kref;
+ struct list_head fr_pool;
+ int fr_pool_size;
/* lock to protect fastreg pool */
- spinlock_t conn_lock;
+ spinlock_t pool_lock;
struct work_struct release_work;
struct ib_recv_wr beacon;
bool logout_posted;
@@ -211,6 +208,8 @@ struct isert_device {
bool pi_capable;
int refcount;
struct ib_device *ib_device;
+ struct ib_pd *pd;
+ struct ib_mr *mr;
struct isert_comp *comps;
int comps_used;
struct list_head dev_node;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 0747c0595a9d..918814cd0f80 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -40,6 +40,7 @@
#include <linux/parser.h>
#include <linux/random.h>
#include <linux/jiffies.h>
+#include <rdma/ib_cache.h>
#include <linux/atomic.h>
@@ -265,10 +266,10 @@ static int srp_init_qp(struct srp_target_port *target,
if (!attr)
return -ENOMEM;
- ret = ib_find_pkey(target->srp_host->srp_dev->dev,
- target->srp_host->port,
- be16_to_cpu(target->pkey),
- &attr->pkey_index);
+ ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
+ target->srp_host->port,
+ be16_to_cpu(target->pkey),
+ &attr->pkey_index);
if (ret)
goto out;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 6e0a477681e9..9b84b4c0a000 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -93,7 +93,7 @@ MODULE_PARM_DESC(srpt_service_guid,
" instead of using the node_guid of the first HCA.");
static struct ib_client srpt_client;
-static struct target_fabric_configfs *srpt_target;
+static const struct target_core_fabric_ops srpt_template;
static void srpt_release_channel(struct srpt_rdma_ch *ch);
static int srpt_queue_status(struct se_cmd *cmd);
@@ -207,7 +207,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
}
break;
default:
- printk(KERN_ERR "received unrecognized IB event %d\n",
+ pr_err("received unrecognized IB event %d\n",
event->event);
break;
}
@@ -218,7 +218,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
*/
static void srpt_srq_event(struct ib_event *event, void *ctx)
{
- printk(KERN_INFO "SRQ event %d\n", event->event);
+ pr_info("SRQ event %d\n", event->event);
}
/**
@@ -242,8 +242,7 @@ static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
ch->sess_name, srpt_get_ch_state(ch));
break;
default:
- printk(KERN_ERR "received unrecognized IB QP event %d\n",
- event->event);
+ pr_err("received unrecognized IB QP event %d\n", event->event);
break;
}
}
@@ -602,7 +601,7 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev)
sport = &sdev->port[i - 1];
WARN_ON(sport->port != i);
if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
- printk(KERN_ERR "disabling MAD processing failed.\n");
+ pr_err("disabling MAD processing failed.\n");
if (sport->mad_agent) {
ib_unregister_mad_agent(sport->mad_agent);
sport->mad_agent = NULL;
@@ -810,7 +809,7 @@ static int srpt_post_send(struct srpt_rdma_ch *ch,
ret = -ENOMEM;
if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
- printk(KERN_WARNING "IB send queue full (needed 1)\n");
+ pr_warn("IB send queue full (needed 1)\n");
goto out;
}
@@ -912,7 +911,7 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
if (ioctx->n_rbuf >
(srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
- printk(KERN_ERR "received unsupported SRP_CMD request"
+ pr_err("received unsupported SRP_CMD request"
" type (%u out + %u in != %u / %zu)\n",
srp_cmd->data_out_desc_cnt,
srp_cmd->data_in_desc_cnt,
@@ -1432,7 +1431,7 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
srpt_unmap_sg_to_ib_sge(ch, ioctx);
transport_generic_free_cmd(&ioctx->cmd, 0);
} else {
- printk(KERN_ERR "IB completion has been received too late for"
+ pr_err("IB completion has been received too late for"
" wr_id = %u.\n", ioctx->ioctx.index);
}
}
@@ -1457,7 +1456,7 @@ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
SRPT_STATE_DATA_IN))
target_execute_cmd(&ioctx->cmd);
else
- printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
+ pr_err("%s[%d]: wrong state = %d\n", __func__,
__LINE__, srpt_get_cmd_state(ioctx));
} else if (opcode == SRPT_RDMA_ABORT) {
ioctx->rdma_aborted = true;
@@ -1481,7 +1480,7 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
switch (opcode) {
case SRPT_RDMA_READ_LAST:
if (ioctx->n_rdma <= 0) {
- printk(KERN_ERR "Received invalid RDMA read"
+ pr_err("Received invalid RDMA read"
" error completion with idx %d\n",
ioctx->ioctx.index);
break;
@@ -1490,14 +1489,13 @@ static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
if (state == SRPT_STATE_NEED_DATA)
srpt_abort_cmd(ioctx);
else
- printk(KERN_ERR "%s[%d]: wrong state = %d\n",
+ pr_err("%s[%d]: wrong state = %d\n",
__func__, __LINE__, state);
break;
case SRPT_RDMA_WRITE_LAST:
break;
default:
- printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
- __LINE__, opcode);
+ pr_err("%s[%d]: opcode = %u\n", __func__, __LINE__, opcode);
break;
}
}
@@ -1549,8 +1547,8 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
if (sense_data_len > max_sense_len) {
- printk(KERN_WARNING "truncated sense data from %d to %d"
- " bytes\n", sense_data_len, max_sense_len);
+ pr_warn("truncated sense data from %d to %d"
+ " bytes\n", sense_data_len, max_sense_len);
sense_data_len = max_sense_len;
}
@@ -1628,8 +1626,8 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
int addressing_method;
if (unlikely(len < 2)) {
- printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
- "more", len);
+ pr_err("Illegal LUN length %d, expected 2 bytes or more\n",
+ len);
goto out;
}
@@ -1663,7 +1661,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
default:
- printk(KERN_ERR "Unimplemented LUN addressing method %u",
+ pr_err("Unimplemented LUN addressing method %u\n",
addressing_method);
break;
}
@@ -1672,8 +1670,7 @@ out:
return res;
out_err:
- printk(KERN_ERR "Support for multi-level LUNs has not yet been"
- " implemented");
+ pr_err("Support for multi-level LUNs has not yet been implemented\n");
goto out;
}
@@ -1723,7 +1720,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
}
if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
- printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
+ pr_err("0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag);
ret = TCM_INVALID_CDB_FIELD;
goto send_sense;
@@ -1912,7 +1909,7 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
break;
case SRP_I_LOGOUT:
- printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
+ pr_err("Not yet implemented: SRP_I_LOGOUT\n");
break;
case SRP_CRED_RSP:
pr_debug("received SRP_CRED_RSP\n");
@@ -1921,10 +1918,10 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
pr_debug("received SRP_AER_RSP\n");
break;
case SRP_RSP:
- printk(KERN_ERR "Received SRP_RSP\n");
+ pr_err("Received SRP_RSP\n");
break;
default:
- printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
+ pr_err("received IU with unknown opcode 0x%x\n",
srp_cmd->opcode);
break;
}
@@ -1948,12 +1945,12 @@ static void srpt_process_rcv_completion(struct ib_cq *cq,
req_lim = atomic_dec_return(&ch->req_lim);
if (unlikely(req_lim < 0))
- printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
+ pr_err("req_lim = %d < 0\n", req_lim);
ioctx = sdev->ioctx_ring[index];
srpt_handle_new_iu(ch, ioctx, NULL);
} else {
- printk(KERN_INFO "receiving failed for idx %u with status %d\n",
- index, wc->status);
+ pr_info("receiving failed for idx %u with status %d\n",
+ index, wc->status);
}
}
@@ -1993,12 +1990,12 @@ static void srpt_process_send_completion(struct ib_cq *cq,
}
} else {
if (opcode == SRPT_SEND) {
- printk(KERN_INFO "sending response for idx %u failed"
- " with status %d\n", index, wc->status);
+ pr_info("sending response for idx %u failed"
+ " with status %d\n", index, wc->status);
srpt_handle_send_err_comp(ch, wc->wr_id);
} else if (opcode != SRPT_RDMA_MID) {
- printk(KERN_INFO "RDMA t %d for idx %u failed with"
- " status %d", opcode, index, wc->status);
+ pr_info("RDMA t %d for idx %u failed with"
+ " status %d\n", opcode, index, wc->status);
srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
}
}
@@ -2062,15 +2059,15 @@ static int srpt_compl_thread(void *arg)
ch = arg;
BUG_ON(!ch);
- printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
- ch->sess_name, ch->thread->comm, current->pid);
+ pr_info("Session %s: kernel thread %s (PID %d) started\n",
+ ch->sess_name, ch->thread->comm, current->pid);
while (!kthread_should_stop()) {
wait_event_interruptible(ch->wait_queue,
(srpt_process_completion(ch->cq, ch),
kthread_should_stop()));
}
- printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
- ch->sess_name, ch->thread->comm, current->pid);
+ pr_info("Session %s: kernel thread %s (PID %d) stopped\n",
+ ch->sess_name, ch->thread->comm, current->pid);
return 0;
}
@@ -2097,7 +2094,7 @@ retry:
ch->rq_size + srp_sq_size, 0);
if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq);
- printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
+ pr_err("failed to create CQ cqe= %d ret= %d\n",
ch->rq_size + srp_sq_size, ret);
goto out;
}
@@ -2123,7 +2120,7 @@ retry:
goto retry;
}
}
- printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
+ pr_err("failed to create_qp ret= %d\n", ret);
goto err_destroy_cq;
}
@@ -2143,7 +2140,7 @@ retry:
ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
if (IS_ERR(ch->thread)) {
- printk(KERN_ERR "failed to create kernel thread %ld\n",
+ pr_err("failed to create kernel thread %ld\n",
PTR_ERR(ch->thread));
ch->thread = NULL;
goto err_destroy_qp;
@@ -2204,7 +2201,7 @@ static void __srpt_close_ch(struct srpt_rdma_ch *ch)
/* fall through */
case CH_LIVE:
if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
- printk(KERN_ERR "sending CM DREQ failed.\n");
+ pr_err("sending CM DREQ failed.\n");
break;
case CH_DISCONNECTING:
break;
@@ -2291,7 +2288,7 @@ static void srpt_drain_channel(struct ib_cm_id *cm_id)
ret = srpt_ch_qp_err(ch);
if (ret < 0)
- printk(KERN_ERR "Setting queue pair in error state"
+ pr_err("Setting queue pair in error state"
" failed: %d\n", ret);
}
}
@@ -2435,17 +2432,17 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
it_iu_len = be32_to_cpu(req->req_it_iu_len);
- printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
- " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
- " (guid=0x%llx:0x%llx)\n",
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
- be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
- it_iu_len,
- param->port,
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
- be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
+ pr_info("Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
+ " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
+ " (guid=0x%llx:0x%llx)\n",
+ be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
+ be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
+ be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
+ be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
+ it_iu_len,
+ param->port,
+ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
+ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
rej = kzalloc(sizeof *rej, GFP_KERNEL);
@@ -2460,7 +2457,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
ret = -EINVAL;
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
+ pr_err("rejected SRP_LOGIN_REQ because its"
" length (%d bytes) is out of range (%d .. %d)\n",
it_iu_len, 64, srp_max_req_size);
goto reject;
@@ -2470,7 +2467,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
ret = -EINVAL;
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
+ pr_err("rejected SRP_LOGIN_REQ because the target port"
" has not yet been enabled\n");
goto reject;
}
@@ -2516,7 +2513,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
ret = -ENOMEM;
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
+ pr_err("rejected SRP_LOGIN_REQ because it"
" has an invalid target port identifier.\n");
goto reject;
}
@@ -2525,7 +2522,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if (!ch) {
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
+ pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
ret = -ENOMEM;
goto reject;
}
@@ -2562,7 +2559,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if (ret) {
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
+ pr_err("rejected SRP_LOGIN_REQ because creating"
" a new RDMA channel failed.\n");
goto free_ring;
}
@@ -2571,7 +2568,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
if (ret) {
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
- printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
+ pr_err("rejected SRP_LOGIN_REQ because enabling"
" RTR failed (error code = %d)\n", ret);
goto destroy_ib;
}
@@ -2586,8 +2583,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
nacl = srpt_lookup_acl(sport, ch->i_port_id);
if (!nacl) {
- printk(KERN_INFO "Rejected login because no ACL has been"
- " configured yet for initiator %s.\n", ch->sess_name);
+ pr_info("Rejected login because no ACL has been"
+ " configured yet for initiator %s.\n", ch->sess_name);
rej->reason = __constant_cpu_to_be32(
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
goto destroy_ib;
@@ -2631,7 +2628,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
ret = ib_send_cm_rep(cm_id, rep_param);
if (ret) {
- printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
+ pr_err("sending SRP_LOGIN_REQ response failed"
" (error code = %d)\n", ret);
goto release_channel;
}
@@ -2679,7 +2676,7 @@ out:
static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
{
- printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
+ pr_info("Received IB REJ for cm_id %p.\n", cm_id);
srpt_drain_channel(cm_id);
}
@@ -2714,13 +2711,13 @@ static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
{
- printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
+ pr_info("Received IB TimeWait exit for cm_id %p.\n", cm_id);
srpt_drain_channel(cm_id);
}
static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
{
- printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
+ pr_info("Received IB REP error for cm_id %p.\n", cm_id);
srpt_drain_channel(cm_id);
}
@@ -2755,9 +2752,9 @@ static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
if (send_drep) {
if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
- printk(KERN_ERR "Sending IB DREP failed.\n");
- printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
- ch->sess_name);
+ pr_err("Sending IB DREP failed.\n");
+ pr_info("Received DREQ and sent DREP for session %s.\n",
+ ch->sess_name);
}
}
@@ -2766,8 +2763,7 @@ static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
*/
static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
{
- printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
- cm_id);
+ pr_info("Received InfiniBand DREP message for cm_id %p.\n", cm_id);
srpt_drain_channel(cm_id);
}
@@ -2811,14 +2807,13 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
srpt_cm_rep_error(cm_id);
break;
case IB_CM_DREQ_ERROR:
- printk(KERN_INFO "Received IB DREQ ERROR event.\n");
+ pr_info("Received IB DREQ ERROR event.\n");
break;
case IB_CM_MRA_RECEIVED:
- printk(KERN_INFO "Received IB MRA event\n");
+ pr_info("Received IB MRA event\n");
break;
default:
- printk(KERN_ERR "received unrecognized IB CM event %d\n",
- event->event);
+ pr_err("received unrecognized IB CM event %d\n", event->event);
break;
}
@@ -2848,8 +2843,8 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
ret = -ENOMEM;
sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
if (sq_wr_avail < 0) {
- printk(KERN_WARNING "IB send queue full (needed %d)\n",
- n_rdma);
+ pr_warn("IB send queue full (needed %d)\n",
+ n_rdma);
goto out;
}
}
@@ -2889,7 +2884,7 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
}
if (ret)
- printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
+ pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n",
__func__, __LINE__, ret, i, n_rdma);
if (ret && i > 0) {
wr.num_sge = 0;
@@ -2897,12 +2892,12 @@ static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
wr.send_flags = IB_SEND_SIGNALED;
while (ch->state == CH_LIVE &&
ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
- printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
+ pr_info("Trying to abort failed RDMA transfer [%d]\n",
ioctx->ioctx.index);
msleep(1000);
}
while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
- printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
+ pr_info("Waiting until RDMA abort finished [%d]\n",
ioctx->ioctx.index);
msleep(1000);
}
@@ -2923,17 +2918,17 @@ static int srpt_xfer_data(struct srpt_rdma_ch *ch,
ret = srpt_map_sg_to_ib_sge(ch, ioctx);
if (ret) {
- printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
+ pr_err("%s[%d] ret=%d\n", __func__, __LINE__, ret);
goto out;
}
ret = srpt_perform_rdmas(ch, ioctx);
if (ret) {
if (ret == -EAGAIN || ret == -ENOMEM)
- printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
- __func__, __LINE__, ret);
+ pr_info("%s[%d] queue full -- ret=%d\n",
+ __func__, __LINE__, ret);
else
- printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
+ pr_err("%s[%d] fatal error -- ret=%d\n",
__func__, __LINE__, ret);
goto out_unmap;
}
@@ -3058,7 +3053,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
!ioctx->queue_status_only) {
ret = srpt_xfer_data(ch, ioctx);
if (ret) {
- printk(KERN_ERR "xfer_data failed for tag %llu\n",
+ pr_err("xfer_data failed for tag %llu\n",
ioctx->tag);
return;
}
@@ -3075,7 +3070,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
}
ret = srpt_post_send(ch, ioctx, resp_len);
if (ret) {
- printk(KERN_ERR "sending cmd response failed for tag %llu\n",
+ pr_err("sending cmd response failed for tag %llu\n",
ioctx->tag);
srpt_unmap_sg_to_ib_sge(ch, ioctx);
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
@@ -3154,7 +3149,7 @@ static int srpt_release_sdev(struct srpt_device *sdev)
res = wait_event_interruptible(sdev->ch_releaseQ,
srpt_ch_list_empty(sdev));
if (res)
- printk(KERN_ERR "%s: interrupted.\n", __func__);
+ pr_err("%s: interrupted.\n", __func__);
return 0;
}
@@ -3293,7 +3288,7 @@ static void srpt_add_one(struct ib_device *device)
spin_lock_init(&sport->port_acl_lock);
if (srpt_refresh_port(sport)) {
- printk(KERN_ERR "MAD registration failed for %s-%d.\n",
+ pr_err("MAD registration failed for %s-%d.\n",
srpt_sdev_name(sdev), i);
goto err_ring;
}
@@ -3330,7 +3325,7 @@ free_dev:
kfree(sdev);
err:
sdev = NULL;
- printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
+ pr_info("%s(%s) failed.\n", __func__, device->name);
goto out;
}
@@ -3344,8 +3339,7 @@ static void srpt_remove_one(struct ib_device *device)
sdev = ib_get_client_data(device, &srpt_client);
if (!sdev) {
- printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
- device->name);
+ pr_info("%s(%s): nothing to do.\n", __func__, device->name);
return;
}
@@ -3464,7 +3458,7 @@ static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
if (!nacl) {
- printk(KERN_ERR "Unable to allocate struct srpt_node_acl\n");
+ pr_err("Unable to allocate struct srpt_node_acl\n");
return NULL;
}
@@ -3615,7 +3609,7 @@ static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
u8 i_port_id[16];
if (srpt_parse_i_port_id(i_port_id, name) < 0) {
- printk(KERN_ERR "invalid initiator port ID %s\n", name);
+ pr_err("invalid initiator port ID %s\n", name);
ret = -EINVAL;
goto err;
}
@@ -3816,12 +3810,12 @@ static ssize_t srpt_tpg_store_enable(
ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
+ pr_err("Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
+ pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
return -EINVAL;
}
if (tmp == 1)
@@ -3851,7 +3845,7 @@ static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
int res;
/* Initialize sport->port_wwn and sport->port_tpg_1 */
- res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
+ res = core_tpg_register(&srpt_template, &sport->port_wwn,
&sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
if (res)
return ERR_PTR(res);
@@ -3919,7 +3913,9 @@ static struct configfs_attribute *srpt_wwn_attrs[] = {
NULL,
};
-static struct target_core_fabric_ops srpt_template = {
+static const struct target_core_fabric_ops srpt_template = {
+ .module = THIS_MODULE,
+ .name = "srpt",
.get_fabric_name = srpt_get_fabric_name,
.get_fabric_proto_ident = srpt_get_fabric_proto_ident,
.tpg_get_wwn = srpt_get_fabric_wwn,
@@ -3964,6 +3960,10 @@ static struct target_core_fabric_ops srpt_template = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = srpt_make_nodeacl,
.fabric_drop_nodeacl = srpt_drop_nodeacl,
+
+ .tfc_wwn_attrs = srpt_wwn_attrs,
+ .tfc_tpg_base_attrs = srpt_tpg_attrs,
+ .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
};
/**
@@ -3980,7 +3980,7 @@ static int __init srpt_init_module(void)
ret = -EINVAL;
if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
- printk(KERN_ERR "invalid value %d for kernel module parameter"
+ pr_err("invalid value %d for kernel module parameter"
" srp_max_req_size -- must be at least %d.\n",
srp_max_req_size, MIN_MAX_REQ_SIZE);
goto out;
@@ -3988,54 +3988,26 @@ static int __init srpt_init_module(void)
if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
|| srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
- printk(KERN_ERR "invalid value %d for kernel module parameter"
+ pr_err("invalid value %d for kernel module parameter"
" srpt_srq_size -- must be in the range [%d..%d].\n",
srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
goto out;
}
- srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
- if (IS_ERR(srpt_target)) {
- printk(KERN_ERR "couldn't register\n");
- ret = PTR_ERR(srpt_target);
+ ret = target_register_template(&srpt_template);
+ if (ret)
goto out;
- }
-
- srpt_target->tf_ops = srpt_template;
-
- /*
- * Set up default attribute lists.
- */
- srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
- srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
- srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
- srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
-
- ret = target_fabric_configfs_register(srpt_target);
- if (ret < 0) {
- printk(KERN_ERR "couldn't register\n");
- goto out_free_target;
- }
ret = ib_register_client(&srpt_client);
if (ret) {
- printk(KERN_ERR "couldn't register IB client\n");
+ pr_err("couldn't register IB client\n");
goto out_unregister_target;
}
return 0;
out_unregister_target:
- target_fabric_configfs_deregister(srpt_target);
- srpt_target = NULL;
-out_free_target:
- if (srpt_target)
- target_fabric_configfs_free(srpt_target);
+ target_unregister_template(&srpt_template);
out:
return ret;
}
@@ -4043,8 +4015,7 @@ out:
static void __exit srpt_cleanup_module(void)
{
ib_unregister_client(&srpt_client);
- target_fabric_configfs_deregister(srpt_target);
- srpt_target = NULL;
+ target_unregister_template(&srpt_template);
}
module_init(srpt_init_module);
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 64b9b59ad4cb..b50c5b8b8a4d 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -148,16 +148,19 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
static int cros_ec_keyb_get_state(struct cros_ec_keyb *ckdev, uint8_t *kb_state)
{
+ int ret;
struct cros_ec_command msg = {
- .version = 0,
.command = EC_CMD_MKBP_STATE,
- .outdata = NULL,
- .outsize = 0,
- .indata = kb_state,
.insize = ckdev->cols,
};
- return cros_ec_cmd_xfer(ckdev->ec, &msg);
+ ret = cros_ec_cmd_xfer(ckdev->ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ memcpy(kb_state, msg.indata, ckdev->cols);
+
+ return 0;
}
static irqreturn_t cros_ec_keyb_irq(int irq, void *data)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a35927cd42e5..68d43beccb7e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -50,6 +50,7 @@
#define CONTEXT_SIZE VTD_PAGE_SIZE
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
+#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
@@ -184,32 +185,11 @@ static int force_on = 0;
* 64-127: Reserved
*/
struct root_entry {
- u64 val;
- u64 rsvd1;
+ u64 lo;
+ u64 hi;
};
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
-static inline bool root_present(struct root_entry *root)
-{
- return (root->val & 1);
-}
-static inline void set_root_present(struct root_entry *root)
-{
- root->val |= 1;
-}
-static inline void set_root_value(struct root_entry *root, unsigned long value)
-{
- root->val &= ~VTD_PAGE_MASK;
- root->val |= value & VTD_PAGE_MASK;
-}
-static inline struct context_entry *
-get_context_addr_from_root(struct root_entry *root)
-{
- return (struct context_entry *)
- (root_present(root)?phys_to_virt(
- root->val & VTD_PAGE_MASK) :
- NULL);
-}
/*
* low 64 bits:
@@ -682,6 +662,40 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain->iommu_superpage = domain_update_iommu_superpage(NULL);
}
+static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
+ u8 bus, u8 devfn, int alloc)
+{
+ struct root_entry *root = &iommu->root_entry[bus];
+ struct context_entry *context;
+ u64 *entry;
+
+ if (ecap_ecs(iommu->ecap)) {
+ if (devfn >= 0x80) {
+ devfn -= 0x80;
+ entry = &root->hi;
+ }
+ devfn *= 2;
+ }
+ entry = &root->lo;
+ if (*entry & 1)
+ context = phys_to_virt(*entry & VTD_PAGE_MASK);
+ else {
+ unsigned long phy_addr;
+ if (!alloc)
+ return NULL;
+
+ context = alloc_pgtable_page(iommu->node);
+ if (!context)
+ return NULL;
+
+ __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
+ phy_addr = virt_to_phys((void *)context);
+ *entry = phy_addr | 1;
+ __iommu_flush_cache(iommu, entry, sizeof(*entry));
+ }
+ return &context[devfn];
+}
+
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
@@ -741,75 +755,36 @@ static void domain_flush_cache(struct dmar_domain *domain,
clflush_cache_range(addr, size);
}
-/* Gets context entry for a given bus and devfn */
-static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
- u8 bus, u8 devfn)
-{
- struct root_entry *root;
- struct context_entry *context;
- unsigned long phy_addr;
- unsigned long flags;
-
- spin_lock_irqsave(&iommu->lock, flags);
- root = &iommu->root_entry[bus];
- context = get_context_addr_from_root(root);
- if (!context) {
- context = (struct context_entry *)
- alloc_pgtable_page(iommu->node);
- if (!context) {
- spin_unlock_irqrestore(&iommu->lock, flags);
- return NULL;
- }
- __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
- phy_addr = virt_to_phys((void *)context);
- set_root_value(root, phy_addr);
- set_root_present(root);
- __iommu_flush_cache(iommu, root, sizeof(*root));
- }
- spin_unlock_irqrestore(&iommu->lock, flags);
- return &context[devfn];
-}
-
static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
- struct root_entry *root;
struct context_entry *context;
- int ret;
+ int ret = 0;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
- root = &iommu->root_entry[bus];
- context = get_context_addr_from_root(root);
- if (!context) {
- ret = 0;
- goto out;
- }
- ret = context_present(&context[devfn]);
-out:
+ context = iommu_context_addr(iommu, bus, devfn, 0);
+ if (context)
+ ret = context_present(context);
spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
- struct root_entry *root;
struct context_entry *context;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
- root = &iommu->root_entry[bus];
- context = get_context_addr_from_root(root);
+ context = iommu_context_addr(iommu, bus, devfn, 0);
if (context) {
- context_clear_entry(&context[devfn]);
- __iommu_flush_cache(iommu, &context[devfn], \
- sizeof(*context));
+ context_clear_entry(context);
+ __iommu_flush_cache(iommu, context, sizeof(*context));
}
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void free_context_table(struct intel_iommu *iommu)
{
- struct root_entry *root;
int i;
unsigned long flags;
struct context_entry *context;
@@ -819,10 +794,17 @@ static void free_context_table(struct intel_iommu *iommu)
goto out;
}
for (i = 0; i < ROOT_ENTRY_NR; i++) {
- root = &iommu->root_entry[i];
- context = get_context_addr_from_root(root);
+ context = iommu_context_addr(iommu, i, 0, 0);
+ if (context)
+ free_pgtable_page(context);
+
+ if (!ecap_ecs(iommu->ecap))
+ continue;
+
+ context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
free_pgtable_page(context);
+
}
free_pgtable_page(iommu->root_entry);
iommu->root_entry = NULL;
@@ -1146,14 +1128,16 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
static void iommu_set_root_entry(struct intel_iommu *iommu)
{
- void *addr;
+ u64 addr;
u32 sts;
unsigned long flag;
- addr = iommu->root_entry;
+ addr = virt_to_phys(iommu->root_entry);
+ if (ecap_ecs(iommu->ecap))
+ addr |= DMA_RTADDR_RTT;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
- dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
+ dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
@@ -1800,7 +1784,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
translation != CONTEXT_TT_MULTI_LEVEL);
- context = device_to_context_entry(iommu, bus, devfn);
+ spin_lock_irqsave(&iommu->lock, flags);
+ context = iommu_context_addr(iommu, bus, devfn, 1);
+ spin_unlock_irqrestore(&iommu->lock, flags);
if (!context)
return -ENOMEM;
spin_lock_irqsave(&iommu->lock, flags);
@@ -2564,6 +2550,10 @@ static bool device_has_rmrr(struct device *dev)
* In both cases we assume that PCI USB devices with RMRRs have them largely
* for historical reasons and that the RMRR space is not actively used post
* boot. This exclusion may change if vendors begin to abuse it.
+ *
+ * The same exception is made for graphics devices, with the requirement that
+ * any use of the RMRR regions will be torn down before assigning the device
+ * to a guest.
*/
static bool device_is_rmrr_locked(struct device *dev)
{
@@ -2573,7 +2563,7 @@ static bool device_is_rmrr_locked(struct device *dev)
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
- if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
+ if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
return false;
}
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 6c25b3c5b729..5709ae9c3e77 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -637,10 +637,7 @@ static int __init intel_enable_irq_remapping(void)
if (x2apic_supported()) {
eim = !dmar_x2apic_optout();
if (!eim)
- printk(KERN_WARNING
- "Your BIOS is broken and requested that x2apic be disabled.\n"
- "This will slightly decrease performance.\n"
- "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
+ pr_info("x2apic is disabled because BIOS sets x2apic opt out bit. You can use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
}
for_each_iommu(iommu, drhd) {
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index a6ce3476834e..7b315e385ba3 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -33,12 +33,14 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/acpi.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
+#include <linux/irqchip/arm-gic-acpi.h>
#include <asm/cputype.h>
#include <asm/irq.h>
@@ -1107,3 +1109,105 @@ IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
#endif
+
+#ifdef CONFIG_ACPI
+static phys_addr_t dist_phy_base, cpu_phy_base __initdata;
+
+static int __init
+gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *processor;
+ phys_addr_t gic_cpu_base;
+ static int cpu_base_assigned;
+
+ processor = (struct acpi_madt_generic_interrupt *)header;
+
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ /*
+ * There is no support for non-banked GICv1/2 register in ACPI spec.
+ * All CPU interface addresses have to be the same.
+ */
+ gic_cpu_base = processor->base_address;
+ if (cpu_base_assigned && gic_cpu_base != cpu_phy_base)
+ return -EINVAL;
+
+ cpu_phy_base = gic_cpu_base;
+ cpu_base_assigned = 1;
+ return 0;
+}
+
+static int __init
+gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_distributor *dist;
+
+ dist = (struct acpi_madt_generic_distributor *)header;
+
+ if (BAD_MADT_ENTRY(dist, end))
+ return -EINVAL;
+
+ dist_phy_base = dist->base_address;
+ return 0;
+}
+
+int __init
+gic_v2_acpi_init(struct acpi_table_header *table)
+{
+ void __iomem *cpu_base, *dist_base;
+ int count;
+
+ /* Collect CPU base addresses */
+ count = acpi_parse_entries(ACPI_SIG_MADT,
+ sizeof(struct acpi_table_madt),
+ gic_acpi_parse_madt_cpu, table,
+ ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
+ if (count <= 0) {
+ pr_err("No valid GICC entries exist\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Find distributor base address. We expect one distributor entry since
+ * ACPI 5.1 spec neither support multi-GIC instances nor GIC cascade.
+ */
+ count = acpi_parse_entries(ACPI_SIG_MADT,
+ sizeof(struct acpi_table_madt),
+ gic_acpi_parse_madt_distributor, table,
+ ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
+ if (count <= 0) {
+ pr_err("No valid GICD entries exist\n");
+ return -EINVAL;
+ } else if (count > 1) {
+ pr_err("More than one GICD entry detected\n");
+ return -EINVAL;
+ }
+
+ cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
+ if (!cpu_base) {
+ pr_err("Unable to map GICC registers\n");
+ return -ENOMEM;
+ }
+
+ dist_base = ioremap(dist_phy_base, ACPI_GICV2_DIST_MEM_SIZE);
+ if (!dist_base) {
+ pr_err("Unable to map GICD registers\n");
+ iounmap(cpu_base);
+ return -ENOMEM;
+ }
+
+ /*
+ * Initialize zero GIC instance (no multi-GIC support). Also, set GIC
+ * as default IRQ domain to allow for GSI registration and GSI to IRQ
+ * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
+ */
+ gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
+ irq_set_default_host(gic_data[0].domain);
+
+ acpi_irq_model = ACPI_IRQ_MODEL_GIC;
+ return 0;
+}
+#endif
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 0fe2f718d81c..afd1af3dfe5a 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -8,6 +8,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/acpi_irq.h>
#include <linux/init.h>
#include <linux/of_irq.h>
#include <linux/irqchip.h>
@@ -26,4 +27,6 @@ extern struct of_device_id __irqchip_of_table[];
void __init irqchip_init(void)
{
of_irq_init(__irqchip_of_table);
+
+ acpi_irq_init();
}
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 1219af493c0f..19a32280731d 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -211,10 +211,9 @@ static void initialize(struct lg_cpu *cpu)
/*
* The Guest tells us where we're not to deliver interrupts by putting
- * the range of addresses into "struct lguest_data".
+ * the instruction address into "struct lguest_data".
*/
- if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start)
- || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end))
+ if (get_user(cpu->lg->noirq_iret, &cpu->lg->lguest_data->noirq_iret))
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
/*
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index 70dfcdc29f1f..5e7559be222a 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -56,21 +56,16 @@ static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
}
/*H:210
- * The set_guest_interrupt() routine actually delivers the interrupt or
- * trap. The mechanics of delivering traps and interrupts to the Guest are the
- * same, except some traps have an "error code" which gets pushed onto the
- * stack as well: the caller tells us if this is one.
- *
- * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
- * interrupt or trap. It's split into two parts for traditional reasons: gcc
- * on i386 used to be frightened by 64 bit numbers.
+ * The push_guest_interrupt_stack() routine saves Guest state on the stack for
+ * an interrupt or trap. The mechanics of delivering traps and interrupts to
+ * the Guest are the same, except some traps have an "error code" which gets
+ * pushed onto the stack as well: the caller tells us if this is one.
*
* We set up the stack just like the CPU does for a real interrupt, so it's
* identical for the Guest (and the standard "iret" instruction will undo
* it).
*/
-static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
- bool has_err)
+static void push_guest_interrupt_stack(struct lg_cpu *cpu, bool has_err)
{
unsigned long gstack, origstack;
u32 eflags, ss, irq_enable;
@@ -130,12 +125,28 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
if (has_err)
push_guest_stack(cpu, &gstack, cpu->regs->errcode);
- /*
- * Now we've pushed all the old state, we change the stack, the code
- * segment and the address to execute.
- */
+ /* Adjust the stack pointer and stack segment. */
cpu->regs->ss = ss;
cpu->regs->esp = virtstack + (gstack - origstack);
+}
+
+/*
+ * This actually makes the Guest start executing the given interrupt/trap
+ * handler.
+ *
+ * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
+ * interrupt or trap. It's split into two parts for traditional reasons: gcc
+ * on i386 used to be frightened by 64 bit numbers.
+ */
+static void guest_run_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi)
+{
+ /* If we're already in the kernel, we don't change stacks. */
+ if ((cpu->regs->ss&0x3) != GUEST_PL)
+ cpu->regs->ss = cpu->esp1;
+
+ /*
+ * Set the code segment and the address to execute.
+ */
cpu->regs->cs = (__KERNEL_CS|GUEST_PL);
cpu->regs->eip = idt_address(lo, hi);
@@ -158,6 +169,24 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
kill_guest(cpu, "Disabling interrupts");
}
+/* This restores the eflags word which was pushed on the stack by a trap */
+static void restore_eflags(struct lg_cpu *cpu)
+{
+ /* This is the physical address of the stack. */
+ unsigned long stack_pa = guest_pa(cpu, cpu->regs->esp);
+
+ /*
+ * Stack looks like this:
+ * Address Contents
+ * esp EIP
+ * esp + 4 CS
+ * esp + 8 EFLAGS
+ */
+ cpu->regs->eflags = lgread(cpu, stack_pa + 8, u32);
+ cpu->regs->eflags &=
+ ~(X86_EFLAGS_TF|X86_EFLAGS_VM|X86_EFLAGS_RF|X86_EFLAGS_NT);
+}
+
/*H:205
* Virtual Interrupts.
*
@@ -200,14 +229,6 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
BUG_ON(irq >= LGUEST_IRQS);
- /*
- * They may be in the middle of an iret, where they asked us never to
- * deliver interrupts.
- */
- if (cpu->regs->eip >= cpu->lg->noirq_start &&
- (cpu->regs->eip < cpu->lg->noirq_end))
- return;
-
/* If they're halted, interrupts restart them. */
if (cpu->halted) {
/* Re-enable interrupts. */
@@ -237,12 +258,34 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
if (idt_present(idt->a, idt->b)) {
/* OK, mark it no longer pending and deliver it. */
clear_bit(irq, cpu->irqs_pending);
+
/*
- * set_guest_interrupt() takes the interrupt descriptor and a
- * flag to say whether this interrupt pushes an error code onto
- * the stack as well: virtual interrupts never do.
+ * They may be about to iret, where they asked us never to
+ * deliver interrupts. In this case, we can emulate that iret
+ * then immediately deliver the interrupt. This is basically
+ * a noop: the iret would pop the interrupt frame and restore
+ * eflags, and then we'd set it up again. So just restore the
+ * eflags word and jump straight to the handler in this case.
+ *
+ * Denys Vlasenko points out that this isn't quite right: if
+ * the iret was returning to userspace, then that interrupt
+ * would reset the stack pointer (which the Guest told us
+ * about via LHCALL_SET_STACK). But unless the Guest is being
+ * *really* weird, that will be the same as the current stack
+ * anyway.
*/
- set_guest_interrupt(cpu, idt->a, idt->b, false);
+ if (cpu->regs->eip == cpu->lg->noirq_iret) {
+ restore_eflags(cpu);
+ } else {
+ /*
+ * set_guest_interrupt() takes a flag to say whether
+ * this interrupt pushes an error code onto the stack
+ * as well: virtual interrupts never do.
+ */
+ push_guest_interrupt_stack(cpu, false);
+ }
+ /* Actually make Guest cpu jump to handler. */
+ guest_run_interrupt(cpu, idt->a, idt->b);
}
/*
@@ -353,8 +396,9 @@ bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
*/
if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
return false;
- set_guest_interrupt(cpu, cpu->arch.idt[num].a,
- cpu->arch.idt[num].b, has_err(num));
+ push_guest_interrupt_stack(cpu, has_err(num));
+ guest_run_interrupt(cpu, cpu->arch.idt[num].a,
+ cpu->arch.idt[num].b);
return true;
}
@@ -395,8 +439,9 @@ static bool direct_trap(unsigned int num)
* The Guest has the ability to turn its interrupt gates into trap gates,
* if it is careful. The Host will let trap gates can go directly to the
* Guest, but the Guest needs the interrupts atomically disabled for an
- * interrupt gate. It can do this by pointing the trap gate at instructions
- * within noirq_start and noirq_end, where it can safely disable interrupts.
+ * interrupt gate. The Host could provide a mechanism to register more
+ * "no-interrupt" regions, and the Guest could point the trap gate at
+ * instructions within that region, where it can safely disable interrupts.
*/
/*M:006
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 307e8b39e7d1..ac8ad0461e80 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -102,7 +102,7 @@ struct lguest {
struct pgdir pgdirs[4];
- unsigned long noirq_start, noirq_end;
+ unsigned long noirq_iret;
unsigned int stack_pages;
u32 tsc_khz;
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index c4c6113eb9a6..30c60687d277 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -339,6 +339,13 @@ static ssize_t write(struct file *file, const char __user *in,
}
}
+static int open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+
+ return 0;
+}
+
/*L:060
* The final piece of interface code is the close() routine. It reverses
* everything done in initialize(). This is usually called because the
@@ -409,6 +416,7 @@ static int close(struct inode *inode, struct file *file)
*/
static const struct file_operations lguest_fops = {
.owner = THIS_MODULE,
+ .open = open,
.release = close,
.write = write,
.read = read,
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 6ddc983417d5..edcf4ab66e00 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -175,6 +175,22 @@ config MD_FAULTY
In unsure, say N.
+
+config MD_CLUSTER
+ tristate "Cluster Support for MD (EXPERIMENTAL)"
+ depends on BLK_DEV_MD
+ depends on DLM
+ default n
+ ---help---
+ Clustering support for MD devices. This enables locking and
+ synchronization across multiple systems on the cluster, so all
+ nodes in the cluster can access the MD devices simultaneously.
+
+ This brings the redundancy (and uptime) of RAID levels across the
+ nodes of the cluster.
+
+ If unsure, say N.
+
source "drivers/md/bcache/Kconfig"
config BLK_DEV_DM_BUILTIN
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 1863feaa5846..dba4db5985fb 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_MD_RAID10) += raid10.o
obj-$(CONFIG_MD_RAID456) += raid456.o
obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o
+obj-$(CONFIG_MD_CLUSTER) += md-cluster.o
obj-$(CONFIG_BCACHE) += bcache/
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 3a5767968ba0..2bc56e2a3526 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -205,6 +205,10 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
struct block_device *bdev;
struct mddev *mddev = bitmap->mddev;
struct bitmap_storage *store = &bitmap->storage;
+ int node_offset = 0;
+
+ if (mddev_is_clustered(bitmap->mddev))
+ node_offset = bitmap->cluster_slot * store->file_pages;
while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
int size = PAGE_SIZE;
@@ -433,6 +437,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
/* This might have been changed by a reshape */
sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
+ sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
bitmap_info.space);
kunmap_atomic(sb);
@@ -544,6 +549,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
bitmap_super_t *sb;
unsigned long chunksize, daemon_sleep, write_behind;
unsigned long long events;
+ int nodes = 0;
unsigned long sectors_reserved = 0;
int err = -EINVAL;
struct page *sb_page;
@@ -562,6 +568,22 @@ static int bitmap_read_sb(struct bitmap *bitmap)
return -ENOMEM;
bitmap->storage.sb_page = sb_page;
+re_read:
+ /* If cluster_slot is set, the cluster is setup */
+ if (bitmap->cluster_slot >= 0) {
+ sector_t bm_blocks = bitmap->mddev->resync_max_sectors;
+
+ sector_div(bm_blocks,
+ bitmap->mddev->bitmap_info.chunksize >> 9);
+ /* bits to bytes */
+ bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
+ /* to 4k blocks */
+ bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
+ bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3);
+ pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
+ bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset);
+ }
+
if (bitmap->storage.file) {
loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host);
int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize;
@@ -577,12 +599,15 @@ static int bitmap_read_sb(struct bitmap *bitmap)
if (err)
return err;
+ err = -EINVAL;
sb = kmap_atomic(sb_page);
chunksize = le32_to_cpu(sb->chunksize);
daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
write_behind = le32_to_cpu(sb->write_behind);
sectors_reserved = le32_to_cpu(sb->sectors_reserved);
+ nodes = le32_to_cpu(sb->nodes);
+ strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
/* verify that the bitmap-specific fields are valid */
if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -619,7 +644,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
goto out;
}
events = le64_to_cpu(sb->events);
- if (events < bitmap->mddev->events) {
+ if (!nodes && (events < bitmap->mddev->events)) {
printk(KERN_INFO
"%s: bitmap file is out of date (%llu < %llu) "
"-- forcing full recovery\n",
@@ -634,20 +659,40 @@ static int bitmap_read_sb(struct bitmap *bitmap)
if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN)
set_bit(BITMAP_HOSTENDIAN, &bitmap->flags);
bitmap->events_cleared = le64_to_cpu(sb->events_cleared);
+ strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
err = 0;
+
out:
kunmap_atomic(sb);
+ /* Assiging chunksize is required for "re_read" */
+ bitmap->mddev->bitmap_info.chunksize = chunksize;
+ if (nodes && (bitmap->cluster_slot < 0)) {
+ err = md_setup_cluster(bitmap->mddev, nodes);
+ if (err) {
+ pr_err("%s: Could not setup cluster service (%d)\n",
+ bmname(bitmap), err);
+ goto out_no_sb;
+ }
+ bitmap->cluster_slot = md_cluster_ops->slot_number(bitmap->mddev);
+ goto re_read;
+ }
+
+
out_no_sb:
if (test_bit(BITMAP_STALE, &bitmap->flags))
bitmap->events_cleared = bitmap->mddev->events;
bitmap->mddev->bitmap_info.chunksize = chunksize;
bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
bitmap->mddev->bitmap_info.max_write_behind = write_behind;
+ bitmap->mddev->bitmap_info.nodes = nodes;
if (bitmap->mddev->bitmap_info.space == 0 ||
bitmap->mddev->bitmap_info.space > sectors_reserved)
bitmap->mddev->bitmap_info.space = sectors_reserved;
- if (err)
+ if (err) {
bitmap_print_sb(bitmap);
+ if (bitmap->cluster_slot < 0)
+ md_cluster_stop(bitmap->mddev);
+ }
return err;
}
@@ -692,9 +737,10 @@ static inline struct page *filemap_get_page(struct bitmap_storage *store,
}
static int bitmap_storage_alloc(struct bitmap_storage *store,
- unsigned long chunks, int with_super)
+ unsigned long chunks, int with_super,
+ int slot_number)
{
- int pnum;
+ int pnum, offset = 0;
unsigned long num_pages;
unsigned long bytes;
@@ -703,6 +749,7 @@ static int bitmap_storage_alloc(struct bitmap_storage *store,
bytes += sizeof(bitmap_super_t);
num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
+ offset = slot_number * (num_pages - 1);
store->filemap = kmalloc(sizeof(struct page *)
* num_pages, GFP_KERNEL);
@@ -713,20 +760,22 @@ static int bitmap_storage_alloc(struct bitmap_storage *store,
store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (store->sb_page == NULL)
return -ENOMEM;
- store->sb_page->index = 0;
}
+
pnum = 0;
if (store->sb_page) {
store->filemap[0] = store->sb_page;
pnum = 1;
+ store->sb_page->index = offset;
}
+
for ( ; pnum < num_pages; pnum++) {
store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO);
if (!store->filemap[pnum]) {
store->file_pages = pnum;
return -ENOMEM;
}
- store->filemap[pnum]->index = pnum;
+ store->filemap[pnum]->index = pnum + offset;
}
store->file_pages = pnum;
@@ -885,6 +934,28 @@ static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
}
}
+static int bitmap_file_test_bit(struct bitmap *bitmap, sector_t block)
+{
+ unsigned long bit;
+ struct page *page;
+ void *paddr;
+ unsigned long chunk = block >> bitmap->counts.chunkshift;
+ int set = 0;
+
+ page = filemap_get_page(&bitmap->storage, chunk);
+ if (!page)
+ return -EINVAL;
+ bit = file_page_offset(&bitmap->storage, chunk);
+ paddr = kmap_atomic(page);
+ if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
+ set = test_bit(bit, paddr);
+ else
+ set = test_bit_le(bit, paddr);
+ kunmap_atomic(paddr);
+ return set;
+}
+
+
/* this gets called when the md device is ready to unplug its underlying
* (slave) device queues -- before we let any writes go down, we need to
* sync the dirty pages of the bitmap file to disk */
@@ -935,7 +1006,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
*/
static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
{
- unsigned long i, chunks, index, oldindex, bit;
+ unsigned long i, chunks, index, oldindex, bit, node_offset = 0;
struct page *page = NULL;
unsigned long bit_cnt = 0;
struct file *file;
@@ -981,6 +1052,9 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
if (!bitmap->mddev->bitmap_info.external)
offset = sizeof(bitmap_super_t);
+ if (mddev_is_clustered(bitmap->mddev))
+ node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE));
+
for (i = 0; i < chunks; i++) {
int b;
index = file_page_index(&bitmap->storage, i);
@@ -1001,7 +1075,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
bitmap->mddev,
bitmap->mddev->bitmap_info.offset,
page,
- index, count);
+ index + node_offset, count);
if (ret)
goto err;
@@ -1207,7 +1281,6 @@ void bitmap_daemon_work(struct mddev *mddev)
j < bitmap->storage.file_pages
&& !test_bit(BITMAP_STALE, &bitmap->flags);
j++) {
-
if (test_page_attr(bitmap, j,
BITMAP_PAGE_DIRTY))
/* bitmap_unplug will handle the rest */
@@ -1530,11 +1603,13 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
return;
}
if (!*bmc) {
- *bmc = 2 | (needed ? NEEDED_MASK : 0);
+ *bmc = 2;
bitmap_count_page(&bitmap->counts, offset, 1);
bitmap_set_pending(&bitmap->counts, offset);
bitmap->allclean = 0;
}
+ if (needed)
+ *bmc |= NEEDED_MASK;
spin_unlock_irq(&bitmap->counts.lock);
}
@@ -1591,6 +1666,10 @@ static void bitmap_free(struct bitmap *bitmap)
if (!bitmap) /* there was no bitmap */
return;
+ if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info &&
+ bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev))
+ md_cluster_stop(bitmap->mddev);
+
/* Shouldn't be needed - but just in case.... */
wait_event(bitmap->write_wait,
atomic_read(&bitmap->pending_writes) == 0);
@@ -1636,7 +1715,7 @@ void bitmap_destroy(struct mddev *mddev)
* initialize the bitmap structure
* if this returns an error, bitmap_destroy must be called to do clean up
*/
-int bitmap_create(struct mddev *mddev)
+struct bitmap *bitmap_create(struct mddev *mddev, int slot)
{
struct bitmap *bitmap;
sector_t blocks = mddev->resync_max_sectors;
@@ -1650,7 +1729,7 @@ int bitmap_create(struct mddev *mddev)
bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL);
if (!bitmap)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
spin_lock_init(&bitmap->counts.lock);
atomic_set(&bitmap->pending_writes, 0);
@@ -1659,6 +1738,7 @@ int bitmap_create(struct mddev *mddev)
init_waitqueue_head(&bitmap->behind_wait);
bitmap->mddev = mddev;
+ bitmap->cluster_slot = slot;
if (mddev->kobj.sd)
bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap");
@@ -1706,12 +1786,14 @@ int bitmap_create(struct mddev *mddev)
printk(KERN_INFO "created bitmap (%lu pages) for device %s\n",
bitmap->counts.pages, bmname(bitmap));
- mddev->bitmap = bitmap;
- return test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
+ err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0;
+ if (err)
+ goto error;
+ return bitmap;
error:
bitmap_free(bitmap);
- return err;
+ return ERR_PTR(err);
}
int bitmap_load(struct mddev *mddev)
@@ -1765,6 +1847,60 @@ out:
}
EXPORT_SYMBOL_GPL(bitmap_load);
+/* Loads the bitmap associated with slot and copies the resync information
+ * to our bitmap
+ */
+int bitmap_copy_from_slot(struct mddev *mddev, int slot,
+ sector_t *low, sector_t *high, bool clear_bits)
+{
+ int rv = 0, i, j;
+ sector_t block, lo = 0, hi = 0;
+ struct bitmap_counts *counts;
+ struct bitmap *bitmap = bitmap_create(mddev, slot);
+
+ if (IS_ERR(bitmap))
+ return PTR_ERR(bitmap);
+
+ rv = bitmap_read_sb(bitmap);
+ if (rv)
+ goto err;
+
+ rv = bitmap_init_from_disk(bitmap, 0);
+ if (rv)
+ goto err;
+
+ counts = &bitmap->counts;
+ for (j = 0; j < counts->chunks; j++) {
+ block = (sector_t)j << counts->chunkshift;
+ if (bitmap_file_test_bit(bitmap, block)) {
+ if (!lo)
+ lo = block;
+ hi = block;
+ bitmap_file_clear_bit(bitmap, block);
+ bitmap_set_memory_bits(mddev->bitmap, block, 1);
+ bitmap_file_set_bit(mddev->bitmap, block);
+ }
+ }
+
+ if (clear_bits) {
+ bitmap_update_sb(bitmap);
+ /* Setting this for the ev_page should be enough.
+ * And we do not require both write_all and PAGE_DIRT either
+ */
+ for (i = 0; i < bitmap->storage.file_pages; i++)
+ set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
+ bitmap_write_all(bitmap);
+ bitmap_unplug(bitmap);
+ }
+ *low = lo;
+ *high = hi;
+err:
+ bitmap_free(bitmap);
+ return rv;
+}
+EXPORT_SYMBOL_GPL(bitmap_copy_from_slot);
+
+
void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
{
unsigned long chunk_kb;
@@ -1849,7 +1985,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
memset(&store, 0, sizeof(store));
if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
ret = bitmap_storage_alloc(&store, chunks,
- !bitmap->mddev->bitmap_info.external);
+ !bitmap->mddev->bitmap_info.external,
+ bitmap->cluster_slot);
if (ret)
goto err;
@@ -2021,13 +2158,18 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
return -EINVAL;
mddev->bitmap_info.offset = offset;
if (mddev->pers) {
+ struct bitmap *bitmap;
mddev->pers->quiesce(mddev, 1);
- rv = bitmap_create(mddev);
- if (!rv)
+ bitmap = bitmap_create(mddev, -1);
+ if (IS_ERR(bitmap))
+ rv = PTR_ERR(bitmap);
+ else {
+ mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
- if (rv) {
- bitmap_destroy(mddev);
- mddev->bitmap_info.offset = 0;
+ if (rv) {
+ bitmap_destroy(mddev);
+ mddev->bitmap_info.offset = 0;
+ }
}
mddev->pers->quiesce(mddev, 0);
if (rv)
@@ -2186,6 +2328,8 @@ __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);
static ssize_t metadata_show(struct mddev *mddev, char *page)
{
+ if (mddev_is_clustered(mddev))
+ return sprintf(page, "clustered\n");
return sprintf(page, "%s\n", (mddev->bitmap_info.external
? "external" : "internal"));
}
@@ -2198,7 +2342,8 @@ static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
return -EBUSY;
if (strncmp(buf, "external", 8) == 0)
mddev->bitmap_info.external = 1;
- else if (strncmp(buf, "internal", 8) == 0)
+ else if ((strncmp(buf, "internal", 8) == 0) ||
+ (strncmp(buf, "clustered", 9) == 0))
mddev->bitmap_info.external = 0;
else
return -EINVAL;
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 30210b9c4ef9..f1f4dd01090d 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -130,8 +130,9 @@ typedef struct bitmap_super_s {
__le32 write_behind; /* 60 number of outstanding write-behind writes */
__le32 sectors_reserved; /* 64 number of 512-byte sectors that are
* reserved for the bitmap. */
-
- __u8 pad[256 - 68]; /* set to zero */
+ __le32 nodes; /* 68 the maximum number of nodes in cluster. */
+ __u8 cluster_name[64]; /* 72 cluster name to which this md belongs */
+ __u8 pad[256 - 136]; /* set to zero */
} bitmap_super_t;
/* notes:
@@ -226,12 +227,13 @@ struct bitmap {
wait_queue_head_t behind_wait;
struct kernfs_node *sysfs_can_clear;
+ int cluster_slot; /* Slot offset for clustered env */
};
/* the bitmap API */
/* these are used only by md/bitmap */
-int bitmap_create(struct mddev *mddev);
+struct bitmap *bitmap_create(struct mddev *mddev, int slot);
int bitmap_load(struct mddev *mddev);
void bitmap_flush(struct mddev *mddev);
void bitmap_destroy(struct mddev *mddev);
@@ -260,6 +262,8 @@ void bitmap_daemon_work(struct mddev *mddev);
int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
int chunksize, int init);
+int bitmap_copy_from_slot(struct mddev *mddev, int slot,
+ sector_t *lo, sector_t *hi, bool clear_bits);
#endif
#endif
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
new file mode 100644
index 000000000000..fcfc4b9b2672
--- /dev/null
+++ b/drivers/md/md-cluster.c
@@ -0,0 +1,965 @@
+/*
+ * Copyright (C) 2015, SUSE
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/dlm.h>
+#include <linux/sched.h>
+#include <linux/raid/md_p.h>
+#include "md.h"
+#include "bitmap.h"
+#include "md-cluster.h"
+
+#define LVB_SIZE 64
+#define NEW_DEV_TIMEOUT 5000
+
+struct dlm_lock_resource {
+ dlm_lockspace_t *ls;
+ struct dlm_lksb lksb;
+ char *name; /* lock name. */
+ uint32_t flags; /* flags to pass to dlm_lock() */
+ struct completion completion; /* completion for synchronized locking */
+ void (*bast)(void *arg, int mode); /* blocking AST function pointer*/
+ struct mddev *mddev; /* pointing back to mddev. */
+};
+
+struct suspend_info {
+ int slot;
+ sector_t lo;
+ sector_t hi;
+ struct list_head list;
+};
+
+struct resync_info {
+ __le64 lo;
+ __le64 hi;
+};
+
+/* md_cluster_info flags */
+#define MD_CLUSTER_WAITING_FOR_NEWDISK 1
+
+
+struct md_cluster_info {
+ /* dlm lock space and resources for clustered raid. */
+ dlm_lockspace_t *lockspace;
+ int slot_number;
+ struct completion completion;
+ struct dlm_lock_resource *sb_lock;
+ struct mutex sb_mutex;
+ struct dlm_lock_resource *bitmap_lockres;
+ struct list_head suspend_list;
+ spinlock_t suspend_lock;
+ struct md_thread *recovery_thread;
+ unsigned long recovery_map;
+ /* communication loc resources */
+ struct dlm_lock_resource *ack_lockres;
+ struct dlm_lock_resource *message_lockres;
+ struct dlm_lock_resource *token_lockres;
+ struct dlm_lock_resource *no_new_dev_lockres;
+ struct md_thread *recv_thread;
+ struct completion newdisk_completion;
+ unsigned long state;
+};
+
+enum msg_type {
+ METADATA_UPDATED = 0,
+ RESYNCING,
+ NEWDISK,
+ REMOVE,
+ RE_ADD,
+};
+
+struct cluster_msg {
+ int type;
+ int slot;
+ /* TODO: Unionize this for smaller footprint */
+ sector_t low;
+ sector_t high;
+ char uuid[16];
+ int raid_slot;
+};
+
+static void sync_ast(void *arg)
+{
+ struct dlm_lock_resource *res;
+
+ res = (struct dlm_lock_resource *) arg;
+ complete(&res->completion);
+}
+
+static int dlm_lock_sync(struct dlm_lock_resource *res, int mode)
+{
+ int ret = 0;
+
+ init_completion(&res->completion);
+ ret = dlm_lock(res->ls, mode, &res->lksb,
+ res->flags, res->name, strlen(res->name),
+ 0, sync_ast, res, res->bast);
+ if (ret)
+ return ret;
+ wait_for_completion(&res->completion);
+ return res->lksb.sb_status;
+}
+
+static int dlm_unlock_sync(struct dlm_lock_resource *res)
+{
+ return dlm_lock_sync(res, DLM_LOCK_NL);
+}
+
+static struct dlm_lock_resource *lockres_init(struct mddev *mddev,
+ char *name, void (*bastfn)(void *arg, int mode), int with_lvb)
+{
+ struct dlm_lock_resource *res = NULL;
+ int ret, namelen;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ res = kzalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
+ if (!res)
+ return NULL;
+ res->ls = cinfo->lockspace;
+ res->mddev = mddev;
+ namelen = strlen(name);
+ res->name = kzalloc(namelen + 1, GFP_KERNEL);
+ if (!res->name) {
+ pr_err("md-cluster: Unable to allocate resource name for resource %s\n", name);
+ goto out_err;
+ }
+ strlcpy(res->name, name, namelen + 1);
+ if (with_lvb) {
+ res->lksb.sb_lvbptr = kzalloc(LVB_SIZE, GFP_KERNEL);
+ if (!res->lksb.sb_lvbptr) {
+ pr_err("md-cluster: Unable to allocate LVB for resource %s\n", name);
+ goto out_err;
+ }
+ res->flags = DLM_LKF_VALBLK;
+ }
+
+ if (bastfn)
+ res->bast = bastfn;
+
+ res->flags |= DLM_LKF_EXPEDITE;
+
+ ret = dlm_lock_sync(res, DLM_LOCK_NL);
+ if (ret) {
+ pr_err("md-cluster: Unable to lock NL on new lock resource %s\n", name);
+ goto out_err;
+ }
+ res->flags &= ~DLM_LKF_EXPEDITE;
+ res->flags |= DLM_LKF_CONVERT;
+
+ return res;
+out_err:
+ kfree(res->lksb.sb_lvbptr);
+ kfree(res->name);
+ kfree(res);
+ return NULL;
+}
+
+static void lockres_free(struct dlm_lock_resource *res)
+{
+ if (!res)
+ return;
+
+ init_completion(&res->completion);
+ dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res);
+ wait_for_completion(&res->completion);
+
+ kfree(res->name);
+ kfree(res->lksb.sb_lvbptr);
+ kfree(res);
+}
+
+static char *pretty_uuid(char *dest, char *src)
+{
+ int i, len = 0;
+
+ for (i = 0; i < 16; i++) {
+ if (i == 4 || i == 6 || i == 8 || i == 10)
+ len += sprintf(dest + len, "-");
+ len += sprintf(dest + len, "%02x", (__u8)src[i]);
+ }
+ return dest;
+}
+
+static void add_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres,
+ sector_t lo, sector_t hi)
+{
+ struct resync_info *ri;
+
+ ri = (struct resync_info *)lockres->lksb.sb_lvbptr;
+ ri->lo = cpu_to_le64(lo);
+ ri->hi = cpu_to_le64(hi);
+}
+
+static struct suspend_info *read_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres)
+{
+ struct resync_info ri;
+ struct suspend_info *s = NULL;
+ sector_t hi = 0;
+
+ dlm_lock_sync(lockres, DLM_LOCK_CR);
+ memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
+ hi = le64_to_cpu(ri.hi);
+ if (ri.hi > 0) {
+ s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
+ if (!s)
+ goto out;
+ s->hi = hi;
+ s->lo = le64_to_cpu(ri.lo);
+ }
+ dlm_unlock_sync(lockres);
+out:
+ return s;
+}
+
+static void recover_bitmaps(struct md_thread *thread)
+{
+ struct mddev *mddev = thread->mddev;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct dlm_lock_resource *bm_lockres;
+ char str[64];
+ int slot, ret;
+ struct suspend_info *s, *tmp;
+ sector_t lo, hi;
+
+ while (cinfo->recovery_map) {
+ slot = fls64((u64)cinfo->recovery_map) - 1;
+
+ /* Clear suspend_area associated with the bitmap */
+ spin_lock_irq(&cinfo->suspend_lock);
+ list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+ if (slot == s->slot) {
+ list_del(&s->list);
+ kfree(s);
+ }
+ spin_unlock_irq(&cinfo->suspend_lock);
+
+ snprintf(str, 64, "bitmap%04d", slot);
+ bm_lockres = lockres_init(mddev, str, NULL, 1);
+ if (!bm_lockres) {
+ pr_err("md-cluster: Cannot initialize bitmaps\n");
+ goto clear_bit;
+ }
+
+ ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
+ if (ret) {
+ pr_err("md-cluster: Could not DLM lock %s: %d\n",
+ str, ret);
+ goto clear_bit;
+ }
+ ret = bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
+ if (ret) {
+ pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
+ goto dlm_unlock;
+ }
+ if (hi > 0) {
+ /* TODO:Wait for current resync to get over */
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ if (lo < mddev->recovery_cp)
+ mddev->recovery_cp = lo;
+ md_check_recovery(mddev);
+ }
+dlm_unlock:
+ dlm_unlock_sync(bm_lockres);
+clear_bit:
+ clear_bit(slot, &cinfo->recovery_map);
+ }
+}
+
+static void recover_prep(void *arg)
+{
+}
+
+static void recover_slot(void *arg, struct dlm_slot *slot)
+{
+ struct mddev *mddev = arg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n",
+ mddev->bitmap_info.cluster_name,
+ slot->nodeid, slot->slot,
+ cinfo->slot_number);
+ set_bit(slot->slot - 1, &cinfo->recovery_map);
+ if (!cinfo->recovery_thread) {
+ cinfo->recovery_thread = md_register_thread(recover_bitmaps,
+ mddev, "recover");
+ if (!cinfo->recovery_thread) {
+ pr_warn("md-cluster: Could not create recovery thread\n");
+ return;
+ }
+ }
+ md_wakeup_thread(cinfo->recovery_thread);
+}
+
+static void recover_done(void *arg, struct dlm_slot *slots,
+ int num_slots, int our_slot,
+ uint32_t generation)
+{
+ struct mddev *mddev = arg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ cinfo->slot_number = our_slot;
+ complete(&cinfo->completion);
+}
+
+static const struct dlm_lockspace_ops md_ls_ops = {
+ .recover_prep = recover_prep,
+ .recover_slot = recover_slot,
+ .recover_done = recover_done,
+};
+
+/*
+ * The BAST function for the ack lock resource
+ * This function wakes up the receive thread in
+ * order to receive and process the message.
+ */
+static void ack_bast(void *arg, int mode)
+{
+ struct dlm_lock_resource *res = (struct dlm_lock_resource *)arg;
+ struct md_cluster_info *cinfo = res->mddev->cluster_info;
+
+ if (mode == DLM_LOCK_EX)
+ md_wakeup_thread(cinfo->recv_thread);
+}
+
+static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
+{
+ struct suspend_info *s, *tmp;
+
+ list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+ if (slot == s->slot) {
+ pr_info("%s:%d Deleting suspend_info: %d\n",
+ __func__, __LINE__, slot);
+ list_del(&s->list);
+ kfree(s);
+ break;
+ }
+}
+
+static void remove_suspend_info(struct md_cluster_info *cinfo, int slot)
+{
+ spin_lock_irq(&cinfo->suspend_lock);
+ __remove_suspend_info(cinfo, slot);
+ spin_unlock_irq(&cinfo->suspend_lock);
+}
+
+
+static void process_suspend_info(struct md_cluster_info *cinfo,
+ int slot, sector_t lo, sector_t hi)
+{
+ struct suspend_info *s;
+
+ if (!hi) {
+ remove_suspend_info(cinfo, slot);
+ return;
+ }
+ s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
+ if (!s)
+ return;
+ s->slot = slot;
+ s->lo = lo;
+ s->hi = hi;
+ spin_lock_irq(&cinfo->suspend_lock);
+ /* Remove existing entry (if exists) before adding */
+ __remove_suspend_info(cinfo, slot);
+ list_add(&s->list, &cinfo->suspend_list);
+ spin_unlock_irq(&cinfo->suspend_lock);
+}
+
+static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
+{
+ char disk_uuid[64];
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ char event_name[] = "EVENT=ADD_DEVICE";
+ char raid_slot[16];
+ char *envp[] = {event_name, disk_uuid, raid_slot, NULL};
+ int len;
+
+ len = snprintf(disk_uuid, 64, "DEVICE_UUID=");
+ pretty_uuid(disk_uuid + len, cmsg->uuid);
+ snprintf(raid_slot, 16, "RAID_DISK=%d", cmsg->raid_slot);
+ pr_info("%s:%d Sending kobject change with %s and %s\n", __func__, __LINE__, disk_uuid, raid_slot);
+ init_completion(&cinfo->newdisk_completion);
+ set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
+ kobject_uevent_env(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE, envp);
+ wait_for_completion_timeout(&cinfo->newdisk_completion,
+ NEW_DEV_TIMEOUT);
+ clear_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
+}
+
+
+static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ md_reload_sb(mddev);
+ dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
+}
+
+static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg)
+{
+ struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot);
+
+ if (rdev)
+ md_kick_rdev_from_array(rdev);
+ else
+ pr_warn("%s: %d Could not find disk(%d) to REMOVE\n", __func__, __LINE__, msg->raid_slot);
+}
+
+static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg)
+{
+ struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev, msg->raid_slot);
+
+ if (rdev && test_bit(Faulty, &rdev->flags))
+ clear_bit(Faulty, &rdev->flags);
+ else
+ pr_warn("%s: %d Could not find disk(%d) which is faulty", __func__, __LINE__, msg->raid_slot);
+}
+
+static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
+{
+ switch (msg->type) {
+ case METADATA_UPDATED:
+ pr_info("%s: %d Received message: METADATA_UPDATE from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_metadata_update(mddev, msg);
+ break;
+ case RESYNCING:
+ pr_info("%s: %d Received message: RESYNCING from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_suspend_info(mddev->cluster_info, msg->slot,
+ msg->low, msg->high);
+ break;
+ case NEWDISK:
+ pr_info("%s: %d Received message: NEWDISK from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_add_new_disk(mddev, msg);
+ break;
+ case REMOVE:
+ pr_info("%s: %d Received REMOVE from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_remove_disk(mddev, msg);
+ break;
+ case RE_ADD:
+ pr_info("%s: %d Received RE_ADD from %d\n",
+ __func__, __LINE__, msg->slot);
+ process_readd_disk(mddev, msg);
+ break;
+ default:
+ pr_warn("%s:%d Received unknown message from %d\n",
+ __func__, __LINE__, msg->slot);
+ }
+}
+
+/*
+ * thread for receiving message
+ */
+static void recv_daemon(struct md_thread *thread)
+{
+ struct md_cluster_info *cinfo = thread->mddev->cluster_info;
+ struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres;
+ struct dlm_lock_resource *message_lockres = cinfo->message_lockres;
+ struct cluster_msg msg;
+
+ /*get CR on Message*/
+ if (dlm_lock_sync(message_lockres, DLM_LOCK_CR)) {
+ pr_err("md/raid1:failed to get CR on MESSAGE\n");
+ return;
+ }
+
+ /* read lvb and wake up thread to process this message_lockres */
+ memcpy(&msg, message_lockres->lksb.sb_lvbptr, sizeof(struct cluster_msg));
+ process_recvd_msg(thread->mddev, &msg);
+
+ /*release CR on ack_lockres*/
+ dlm_unlock_sync(ack_lockres);
+ /*up-convert to EX on message_lockres*/
+ dlm_lock_sync(message_lockres, DLM_LOCK_EX);
+ /*get CR on ack_lockres again*/
+ dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
+ /*release CR on message_lockres*/
+ dlm_unlock_sync(message_lockres);
+}
+
+/* lock_comm()
+ * Takes the lock on the TOKEN lock resource so no other
+ * node can communicate while the operation is underway.
+ */
+static int lock_comm(struct md_cluster_info *cinfo)
+{
+ int error;
+
+ error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
+ if (error)
+ pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
+ __func__, __LINE__, error);
+ return error;
+}
+
+static void unlock_comm(struct md_cluster_info *cinfo)
+{
+ dlm_unlock_sync(cinfo->token_lockres);
+}
+
+/* __sendmsg()
+ * This function performs the actual sending of the message. This function is
+ * usually called after performing the encompassing operation
+ * The function:
+ * 1. Grabs the message lockresource in EX mode
+ * 2. Copies the message to the message LVB
+ * 3. Downconverts message lockresource to CR
+ * 4. Upconverts ack lock resource from CR to EX. This forces the BAST on other nodes
+ * and the other nodes read the message. The thread will wait here until all other
+ * nodes have released ack lock resource.
+ * 5. Downconvert ack lockresource to CR
+ */
+static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
+{
+ int error;
+ int slot = cinfo->slot_number - 1;
+
+ cmsg->slot = cpu_to_le32(slot);
+ /*get EX on Message*/
+ error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX);
+ if (error) {
+ pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error);
+ goto failed_message;
+ }
+
+ memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg,
+ sizeof(struct cluster_msg));
+ /*down-convert EX to CR on Message*/
+ error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CR);
+ if (error) {
+ pr_err("md-cluster: failed to convert EX to CR on MESSAGE(%d)\n",
+ error);
+ goto failed_message;
+ }
+
+ /*up-convert CR to EX on Ack*/
+ error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_EX);
+ if (error) {
+ pr_err("md-cluster: failed to convert CR to EX on ACK(%d)\n",
+ error);
+ goto failed_ack;
+ }
+
+ /*down-convert EX to CR on Ack*/
+ error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR);
+ if (error) {
+ pr_err("md-cluster: failed to convert EX to CR on ACK(%d)\n",
+ error);
+ goto failed_ack;
+ }
+
+failed_ack:
+ dlm_unlock_sync(cinfo->message_lockres);
+failed_message:
+ return error;
+}
+
+static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
+{
+ int ret;
+
+ lock_comm(cinfo);
+ ret = __sendmsg(cinfo, cmsg);
+ unlock_comm(cinfo);
+ return ret;
+}
+
+static int gather_all_resync_info(struct mddev *mddev, int total_slots)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ int i, ret = 0;
+ struct dlm_lock_resource *bm_lockres;
+ struct suspend_info *s;
+ char str[64];
+
+
+ for (i = 0; i < total_slots; i++) {
+ memset(str, '\0', 64);
+ snprintf(str, 64, "bitmap%04d", i);
+ bm_lockres = lockres_init(mddev, str, NULL, 1);
+ if (!bm_lockres)
+ return -ENOMEM;
+ if (i == (cinfo->slot_number - 1))
+ continue;
+
+ bm_lockres->flags |= DLM_LKF_NOQUEUE;
+ ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
+ if (ret == -EAGAIN) {
+ memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
+ s = read_resync_info(mddev, bm_lockres);
+ if (s) {
+ pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
+ __func__, __LINE__,
+ (unsigned long long) s->lo,
+ (unsigned long long) s->hi, i);
+ spin_lock_irq(&cinfo->suspend_lock);
+ s->slot = i;
+ list_add(&s->list, &cinfo->suspend_list);
+ spin_unlock_irq(&cinfo->suspend_lock);
+ }
+ ret = 0;
+ lockres_free(bm_lockres);
+ continue;
+ }
+ if (ret)
+ goto out;
+ /* TODO: Read the disk bitmap sb and check if it needs recovery */
+ dlm_unlock_sync(bm_lockres);
+ lockres_free(bm_lockres);
+ }
+out:
+ return ret;
+}
+
+static int join(struct mddev *mddev, int nodes)
+{
+ struct md_cluster_info *cinfo;
+ int ret, ops_rv;
+ char str[64];
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
+ cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL);
+ if (!cinfo)
+ return -ENOMEM;
+
+ init_completion(&cinfo->completion);
+
+ mutex_init(&cinfo->sb_mutex);
+ mddev->cluster_info = cinfo;
+
+ memset(str, 0, 64);
+ pretty_uuid(str, mddev->uuid);
+ ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
+ DLM_LSFL_FS, LVB_SIZE,
+ &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
+ if (ret)
+ goto err;
+ wait_for_completion(&cinfo->completion);
+ if (nodes < cinfo->slot_number) {
+ pr_err("md-cluster: Slot allotted(%d) is greater than available slots(%d).",
+ cinfo->slot_number, nodes);
+ ret = -ERANGE;
+ goto err;
+ }
+ cinfo->sb_lock = lockres_init(mddev, "cmd-super",
+ NULL, 0);
+ if (!cinfo->sb_lock) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ /* Initiate the communication resources */
+ ret = -ENOMEM;
+ cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
+ if (!cinfo->recv_thread) {
+ pr_err("md-cluster: cannot allocate memory for recv_thread!\n");
+ goto err;
+ }
+ cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1);
+ if (!cinfo->message_lockres)
+ goto err;
+ cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0);
+ if (!cinfo->token_lockres)
+ goto err;
+ cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
+ if (!cinfo->ack_lockres)
+ goto err;
+ cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0);
+ if (!cinfo->no_new_dev_lockres)
+ goto err;
+
+ /* get sync CR lock on ACK. */
+ if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
+ pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
+ ret);
+ /* get sync CR lock on no-new-dev. */
+ if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR))
+ pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret);
+
+
+ pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
+ snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
+ cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
+ if (!cinfo->bitmap_lockres)
+ goto err;
+ if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
+ pr_err("Failed to get bitmap lock\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&cinfo->suspend_list);
+ spin_lock_init(&cinfo->suspend_lock);
+
+ ret = gather_all_resync_info(mddev, nodes);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ lockres_free(cinfo->message_lockres);
+ lockres_free(cinfo->token_lockres);
+ lockres_free(cinfo->ack_lockres);
+ lockres_free(cinfo->no_new_dev_lockres);
+ lockres_free(cinfo->bitmap_lockres);
+ lockres_free(cinfo->sb_lock);
+ if (cinfo->lockspace)
+ dlm_release_lockspace(cinfo->lockspace, 2);
+ mddev->cluster_info = NULL;
+ kfree(cinfo);
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static int leave(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ if (!cinfo)
+ return 0;
+ md_unregister_thread(&cinfo->recovery_thread);
+ md_unregister_thread(&cinfo->recv_thread);
+ lockres_free(cinfo->message_lockres);
+ lockres_free(cinfo->token_lockres);
+ lockres_free(cinfo->ack_lockres);
+ lockres_free(cinfo->no_new_dev_lockres);
+ lockres_free(cinfo->sb_lock);
+ lockres_free(cinfo->bitmap_lockres);
+ dlm_release_lockspace(cinfo->lockspace, 2);
+ return 0;
+}
+
+/* slot_number(): Returns the MD slot number to use
+ * DLM starts the slot numbers from 1, wheras cluster-md
+ * wants the number to be from zero, so we deduct one
+ */
+static int slot_number(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ return cinfo->slot_number - 1;
+}
+
+static void resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ add_resync_info(mddev, cinfo->bitmap_lockres, lo, hi);
+ /* Re-acquire the lock to refresh LVB */
+ dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW);
+}
+
+static int metadata_update_start(struct mddev *mddev)
+{
+ return lock_comm(mddev->cluster_info);
+}
+
+static int metadata_update_finish(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg;
+ int ret;
+
+ memset(&cmsg, 0, sizeof(cmsg));
+ cmsg.type = cpu_to_le32(METADATA_UPDATED);
+ ret = __sendmsg(cinfo, &cmsg);
+ unlock_comm(cinfo);
+ return ret;
+}
+
+static int metadata_update_cancel(struct mddev *mddev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ return dlm_unlock_sync(cinfo->token_lockres);
+}
+
+static int resync_send(struct mddev *mddev, enum msg_type type,
+ sector_t lo, sector_t hi)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg;
+ int slot = cinfo->slot_number - 1;
+
+ pr_info("%s:%d lo: %llu hi: %llu\n", __func__, __LINE__,
+ (unsigned long long)lo,
+ (unsigned long long)hi);
+ resync_info_update(mddev, lo, hi);
+ cmsg.type = cpu_to_le32(type);
+ cmsg.slot = cpu_to_le32(slot);
+ cmsg.low = cpu_to_le64(lo);
+ cmsg.high = cpu_to_le64(hi);
+ return sendmsg(cinfo, &cmsg);
+}
+
+static int resync_start(struct mddev *mddev, sector_t lo, sector_t hi)
+{
+ pr_info("%s:%d\n", __func__, __LINE__);
+ return resync_send(mddev, RESYNCING, lo, hi);
+}
+
+static void resync_finish(struct mddev *mddev)
+{
+ pr_info("%s:%d\n", __func__, __LINE__);
+ resync_send(mddev, RESYNCING, 0, 0);
+}
+
+static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ int ret = 0;
+ struct suspend_info *s;
+
+ spin_lock_irq(&cinfo->suspend_lock);
+ if (list_empty(&cinfo->suspend_list))
+ goto out;
+ list_for_each_entry(s, &cinfo->suspend_list, list)
+ if (hi > s->lo && lo < s->hi) {
+ ret = 1;
+ break;
+ }
+out:
+ spin_unlock_irq(&cinfo->suspend_lock);
+ return ret;
+}
+
+static int add_new_disk_start(struct mddev *mddev, struct md_rdev *rdev)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg;
+ int ret = 0;
+ struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
+ char *uuid = sb->device_uuid;
+
+ memset(&cmsg, 0, sizeof(cmsg));
+ cmsg.type = cpu_to_le32(NEWDISK);
+ memcpy(cmsg.uuid, uuid, 16);
+ cmsg.raid_slot = rdev->desc_nr;
+ lock_comm(cinfo);
+ ret = __sendmsg(cinfo, &cmsg);
+ if (ret)
+ return ret;
+ cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
+ ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
+ cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
+ /* Some node does not "see" the device */
+ if (ret == -EAGAIN)
+ ret = -ENOENT;
+ else
+ dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
+ return ret;
+}
+
+static int add_new_disk_finish(struct mddev *mddev)
+{
+ struct cluster_msg cmsg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ int ret;
+ /* Write sb and inform others */
+ md_update_sb(mddev, 1);
+ cmsg.type = METADATA_UPDATED;
+ ret = __sendmsg(cinfo, &cmsg);
+ unlock_comm(cinfo);
+ return ret;
+}
+
+static int new_disk_ack(struct mddev *mddev, bool ack)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ if (!test_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state)) {
+ pr_warn("md-cluster(%s): Spurious cluster confirmation\n", mdname(mddev));
+ return -EINVAL;
+ }
+
+ if (ack)
+ dlm_unlock_sync(cinfo->no_new_dev_lockres);
+ complete(&cinfo->newdisk_completion);
+ return 0;
+}
+
+static int remove_disk(struct mddev *mddev, struct md_rdev *rdev)
+{
+ struct cluster_msg cmsg;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ cmsg.type = REMOVE;
+ cmsg.raid_slot = rdev->desc_nr;
+ return __sendmsg(cinfo, &cmsg);
+}
+
+static int gather_bitmaps(struct md_rdev *rdev)
+{
+ int sn, err;
+ sector_t lo, hi;
+ struct cluster_msg cmsg;
+ struct mddev *mddev = rdev->mddev;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ cmsg.type = RE_ADD;
+ cmsg.raid_slot = rdev->desc_nr;
+ err = sendmsg(cinfo, &cmsg);
+ if (err)
+ goto out;
+
+ for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) {
+ if (sn == (cinfo->slot_number - 1))
+ continue;
+ err = bitmap_copy_from_slot(mddev, sn, &lo, &hi, false);
+ if (err) {
+ pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
+ goto out;
+ }
+ if ((hi > 0) && (lo < mddev->recovery_cp))
+ mddev->recovery_cp = lo;
+ }
+out:
+ return err;
+}
+
+static struct md_cluster_operations cluster_ops = {
+ .join = join,
+ .leave = leave,
+ .slot_number = slot_number,
+ .resync_info_update = resync_info_update,
+ .resync_start = resync_start,
+ .resync_finish = resync_finish,
+ .metadata_update_start = metadata_update_start,
+ .metadata_update_finish = metadata_update_finish,
+ .metadata_update_cancel = metadata_update_cancel,
+ .area_resyncing = area_resyncing,
+ .add_new_disk_start = add_new_disk_start,
+ .add_new_disk_finish = add_new_disk_finish,
+ .new_disk_ack = new_disk_ack,
+ .remove_disk = remove_disk,
+ .gather_bitmaps = gather_bitmaps,
+};
+
+static int __init cluster_init(void)
+{
+ pr_warn("md-cluster: EXPERIMENTAL. Use with caution\n");
+ pr_info("Registering Cluster MD functions\n");
+ register_md_cluster_operations(&cluster_ops, THIS_MODULE);
+ return 0;
+}
+
+static void cluster_exit(void)
+{
+ unregister_md_cluster_operations();
+}
+
+module_init(cluster_init);
+module_exit(cluster_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Clustering support for MD");
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
new file mode 100644
index 000000000000..6817ee00e053
--- /dev/null
+++ b/drivers/md/md-cluster.h
@@ -0,0 +1,29 @@
+
+
+#ifndef _MD_CLUSTER_H
+#define _MD_CLUSTER_H
+
+#include "md.h"
+
+struct mddev;
+struct md_rdev;
+
+struct md_cluster_operations {
+ int (*join)(struct mddev *mddev, int nodes);
+ int (*leave)(struct mddev *mddev);
+ int (*slot_number)(struct mddev *mddev);
+ void (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi);
+ int (*resync_start)(struct mddev *mddev, sector_t lo, sector_t hi);
+ void (*resync_finish)(struct mddev *mddev);
+ int (*metadata_update_start)(struct mddev *mddev);
+ int (*metadata_update_finish)(struct mddev *mddev);
+ int (*metadata_update_cancel)(struct mddev *mddev);
+ int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi);
+ int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
+ int (*add_new_disk_finish)(struct mddev *mddev);
+ int (*new_disk_ack)(struct mddev *mddev, bool ack);
+ int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev);
+ int (*gather_bitmaps)(struct md_rdev *rdev);
+};
+
+#endif /* _MD_CLUSTER_H */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e6178787ce3d..d4f31e195e26 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -53,6 +53,7 @@
#include <linux/slab.h>
#include "md.h"
#include "bitmap.h"
+#include "md-cluster.h"
#ifndef MODULE
static void autostart_arrays(int part);
@@ -66,6 +67,11 @@ static void autostart_arrays(int part);
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
+struct md_cluster_operations *md_cluster_ops;
+EXPORT_SYMBOL(md_cluster_ops);
+struct module *md_cluster_mod;
+EXPORT_SYMBOL(md_cluster_mod);
+
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
static struct workqueue_struct *md_misc_wq;
@@ -640,7 +646,7 @@ void mddev_unlock(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(mddev_unlock);
-static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
+struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
{
struct md_rdev *rdev;
@@ -650,6 +656,7 @@ static struct md_rdev *find_rdev_nr_rcu(struct mddev *mddev, int nr)
return NULL;
}
+EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
{
@@ -2047,11 +2054,11 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
int choice = 0;
if (mddev->pers)
choice = mddev->raid_disks;
- while (find_rdev_nr_rcu(mddev, choice))
+ while (md_find_rdev_nr_rcu(mddev, choice))
choice++;
rdev->desc_nr = choice;
} else {
- if (find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
+ if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
rcu_read_unlock();
return -EBUSY;
}
@@ -2166,11 +2173,12 @@ static void export_rdev(struct md_rdev *rdev)
kobject_put(&rdev->kobj);
}
-static void kick_rdev_from_array(struct md_rdev *rdev)
+void md_kick_rdev_from_array(struct md_rdev *rdev)
{
unbind_rdev_from_array(rdev);
export_rdev(rdev);
}
+EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
static void export_array(struct mddev *mddev)
{
@@ -2179,7 +2187,7 @@ static void export_array(struct mddev *mddev)
while (!list_empty(&mddev->disks)) {
rdev = list_first_entry(&mddev->disks, struct md_rdev,
same_set);
- kick_rdev_from_array(rdev);
+ md_kick_rdev_from_array(rdev);
}
mddev->raid_disks = 0;
mddev->major_version = 0;
@@ -2208,7 +2216,7 @@ static void sync_sbs(struct mddev *mddev, int nospares)
}
}
-static void md_update_sb(struct mddev *mddev, int force_change)
+void md_update_sb(struct mddev *mddev, int force_change)
{
struct md_rdev *rdev;
int sync_req;
@@ -2369,6 +2377,37 @@ repeat:
wake_up(&rdev->blocked_wait);
}
}
+EXPORT_SYMBOL(md_update_sb);
+
+static int add_bound_rdev(struct md_rdev *rdev)
+{
+ struct mddev *mddev = rdev->mddev;
+ int err = 0;
+
+ if (!mddev->pers->hot_remove_disk) {
+ /* If there is hot_add_disk but no hot_remove_disk
+ * then added disks for geometry changes,
+ * and should be added immediately.
+ */
+ super_types[mddev->major_version].
+ validate_super(mddev, rdev);
+ err = mddev->pers->hot_add_disk(mddev, rdev);
+ if (err) {
+ unbind_rdev_from_array(rdev);
+ export_rdev(rdev);
+ return err;
+ }
+ }
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
+
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ if (mddev->degraded)
+ set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_new_event(mddev);
+ md_wakeup_thread(mddev->thread);
+ return 0;
+}
/* words written to sysfs files may, or may not, be \n terminated.
* We want to accept with case. For this we use cmd_match.
@@ -2471,10 +2510,16 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = -EBUSY;
else {
struct mddev *mddev = rdev->mddev;
- kick_rdev_from_array(rdev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->remove_disk(mddev, rdev);
+ md_kick_rdev_from_array(rdev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
if (mddev->pers)
md_update_sb(mddev, 1);
md_new_event(mddev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
err = 0;
}
} else if (cmd_match(buf, "writemostly")) {
@@ -2553,6 +2598,21 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
clear_bit(Replacement, &rdev->flags);
err = 0;
}
+ } else if (cmd_match(buf, "re-add")) {
+ if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
+ /* clear_bit is performed _after_ all the devices
+ * have their local Faulty bit cleared. If any writes
+ * happen in the meantime in the local node, they
+ * will land in the local bitmap, which will be synced
+ * by this node eventually
+ */
+ if (!mddev_is_clustered(rdev->mddev) ||
+ (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
+ clear_bit(Faulty, &rdev->flags);
+ err = add_bound_rdev(rdev);
+ }
+ } else
+ err = -EBUSY;
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -3127,7 +3187,7 @@ static void analyze_sbs(struct mddev *mddev)
"md: fatal superblock inconsistency in %s"
" -- removing from array\n",
bdevname(rdev->bdev,b));
- kick_rdev_from_array(rdev);
+ md_kick_rdev_from_array(rdev);
}
super_types[mddev->major_version].
@@ -3142,18 +3202,27 @@ static void analyze_sbs(struct mddev *mddev)
"md: %s: %s: only %d devices permitted\n",
mdname(mddev), bdevname(rdev->bdev, b),
mddev->max_disks);
- kick_rdev_from_array(rdev);
+ md_kick_rdev_from_array(rdev);
continue;
}
- if (rdev != freshest)
+ if (rdev != freshest) {
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
printk(KERN_WARNING "md: kicking non-fresh %s"
" from array!\n",
bdevname(rdev->bdev,b));
- kick_rdev_from_array(rdev);
+ md_kick_rdev_from_array(rdev);
continue;
}
+ /* No device should have a Candidate flag
+ * when reading devices
+ */
+ if (test_bit(Candidate, &rdev->flags)) {
+ pr_info("md: kicking Cluster Candidate %s from array!\n",
+ bdevname(rdev->bdev, b));
+ md_kick_rdev_from_array(rdev);
+ }
+ }
if (mddev->level == LEVEL_MULTIPATH) {
rdev->desc_nr = i++;
rdev->raid_disk = rdev->desc_nr;
@@ -4008,8 +4077,12 @@ size_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
if (mddev->pers) {
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
err = update_size(mddev, sectors);
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
} else {
if (mddev->dev_sectors == 0 ||
mddev->dev_sectors > sectors)
@@ -4354,7 +4427,6 @@ min_sync_store(struct mddev *mddev, const char *buf, size_t len)
{
unsigned long long min;
int err;
- int chunk;
if (kstrtoull(buf, 10, &min))
return -EINVAL;
@@ -4368,16 +4440,8 @@ min_sync_store(struct mddev *mddev, const char *buf, size_t len)
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
goto out_unlock;
- /* Must be a multiple of chunk_size */
- chunk = mddev->chunk_sectors;
- if (chunk) {
- sector_t temp = min;
-
- err = -EINVAL;
- if (sector_div(temp, chunk))
- goto out_unlock;
- }
- mddev->resync_min = min;
+ /* Round down to multiple of 4K for safety */
+ mddev->resync_min = round_down(min, 8);
err = 0;
out_unlock:
@@ -5077,10 +5141,16 @@ int md_run(struct mddev *mddev)
}
if (err == 0 && pers->sync_request &&
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
- err = bitmap_create(mddev);
- if (err)
+ struct bitmap *bitmap;
+
+ bitmap = bitmap_create(mddev, -1);
+ if (IS_ERR(bitmap)) {
+ err = PTR_ERR(bitmap);
printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
+ } else
+ mddev->bitmap = bitmap;
+
}
if (err) {
mddev_detach(mddev);
@@ -5232,6 +5302,8 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
flush_workqueue(md_misc_wq);
if (mddev->sync_thread) {
@@ -5250,6 +5322,8 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->in_sync = 1;
md_update_sb(mddev, 1);
}
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
}
void md_stop_writes(struct mddev *mddev)
@@ -5636,6 +5710,8 @@ static int get_array_info(struct mddev *mddev, void __user *arg)
info.state = (1<<MD_SB_CLEAN);
if (mddev->bitmap && mddev->bitmap_info.offset)
info.state |= (1<<MD_SB_BITMAP_PRESENT);
+ if (mddev_is_clustered(mddev))
+ info.state |= (1<<MD_SB_CLUSTERED);
info.active_disks = insync;
info.working_disks = working;
info.failed_disks = failed;
@@ -5691,7 +5767,7 @@ static int get_disk_info(struct mddev *mddev, void __user * arg)
return -EFAULT;
rcu_read_lock();
- rdev = find_rdev_nr_rcu(mddev, info.number);
+ rdev = md_find_rdev_nr_rcu(mddev, info.number);
if (rdev) {
info.major = MAJOR(rdev->bdev->bd_dev);
info.minor = MINOR(rdev->bdev->bd_dev);
@@ -5724,6 +5800,13 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
struct md_rdev *rdev;
dev_t dev = MKDEV(info->major,info->minor);
+ if (mddev_is_clustered(mddev) &&
+ !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
+ pr_err("%s: Cannot add to clustered mddev.\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+
if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
return -EOVERFLOW;
@@ -5810,31 +5893,38 @@ static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
else
clear_bit(WriteMostly, &rdev->flags);
+ /*
+ * check whether the device shows up in other nodes
+ */
+ if (mddev_is_clustered(mddev)) {
+ if (info->state & (1 << MD_DISK_CANDIDATE)) {
+ /* Through --cluster-confirm */
+ set_bit(Candidate, &rdev->flags);
+ err = md_cluster_ops->new_disk_ack(mddev, true);
+ if (err) {
+ export_rdev(rdev);
+ return err;
+ }
+ } else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
+ /* --add initiated by this node */
+ err = md_cluster_ops->add_new_disk_start(mddev, rdev);
+ if (err) {
+ md_cluster_ops->add_new_disk_finish(mddev);
+ export_rdev(rdev);
+ return err;
+ }
+ }
+ }
+
rdev->raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
- if (!err && !mddev->pers->hot_remove_disk) {
- /* If there is hot_add_disk but no hot_remove_disk
- * then added disks for geometry changes,
- * and should be added immediately.
- */
- super_types[mddev->major_version].
- validate_super(mddev, rdev);
- err = mddev->pers->hot_add_disk(mddev, rdev);
- if (err)
- unbind_rdev_from_array(rdev);
- }
if (err)
export_rdev(rdev);
else
- sysfs_notify_dirent_safe(rdev->sysfs_state);
-
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- if (mddev->degraded)
- set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- if (!err)
- md_new_event(mddev);
- md_wakeup_thread(mddev->thread);
+ err = add_bound_rdev(rdev);
+ if (mddev_is_clustered(mddev) &&
+ (info->state & (1 << MD_DISK_CLUSTER_ADD)))
+ md_cluster_ops->add_new_disk_finish(mddev);
return err;
}
@@ -5895,18 +5985,29 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
if (!rdev)
return -ENXIO;
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
+
clear_bit(Blocked, &rdev->flags);
remove_and_add_spares(mddev, rdev);
if (rdev->raid_disk >= 0)
goto busy;
- kick_rdev_from_array(rdev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->remove_disk(mddev, rdev);
+
+ md_kick_rdev_from_array(rdev);
md_update_sb(mddev, 1);
md_new_event(mddev);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
+
return 0;
busy:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_cancel(mddev);
printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n",
bdevname(rdev->bdev,b), mdname(mddev));
return -EBUSY;
@@ -5956,12 +6057,15 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
err = -EINVAL;
goto abort_export;
}
+
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
clear_bit(In_sync, &rdev->flags);
rdev->desc_nr = -1;
rdev->saved_raid_disk = -1;
err = bind_rdev_to_array(rdev, mddev);
if (err)
- goto abort_export;
+ goto abort_clustered;
/*
* The rest should better be atomic, we can have disk failures
@@ -5972,6 +6076,8 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
/*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
@@ -5981,6 +6087,9 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
md_new_event(mddev);
return 0;
+abort_clustered:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_cancel(mddev);
abort_export:
export_rdev(rdev);
return err;
@@ -6038,9 +6147,14 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
if (mddev->pers) {
mddev->pers->quiesce(mddev, 1);
if (fd >= 0) {
- err = bitmap_create(mddev);
- if (!err)
+ struct bitmap *bitmap;
+
+ bitmap = bitmap_create(mddev, -1);
+ if (!IS_ERR(bitmap)) {
+ mddev->bitmap = bitmap;
err = bitmap_load(mddev);
+ } else
+ err = PTR_ERR(bitmap);
}
if (fd < 0 || err) {
bitmap_destroy(mddev);
@@ -6293,6 +6407,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
return rv;
}
}
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2);
@@ -6300,33 +6416,49 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
rv = update_raid_disks(mddev, info->raid_disks);
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
- if (mddev->pers->quiesce == NULL || mddev->thread == NULL)
- return -EINVAL;
- if (mddev->recovery || mddev->sync_thread)
- return -EBUSY;
+ if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
+ rv = -EINVAL;
+ goto err;
+ }
+ if (mddev->recovery || mddev->sync_thread) {
+ rv = -EBUSY;
+ goto err;
+ }
if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
+ struct bitmap *bitmap;
/* add the bitmap */
- if (mddev->bitmap)
- return -EEXIST;
- if (mddev->bitmap_info.default_offset == 0)
- return -EINVAL;
+ if (mddev->bitmap) {
+ rv = -EEXIST;
+ goto err;
+ }
+ if (mddev->bitmap_info.default_offset == 0) {
+ rv = -EINVAL;
+ goto err;
+ }
mddev->bitmap_info.offset =
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
mddev->pers->quiesce(mddev, 1);
- rv = bitmap_create(mddev);
- if (!rv)
+ bitmap = bitmap_create(mddev, -1);
+ if (!IS_ERR(bitmap)) {
+ mddev->bitmap = bitmap;
rv = bitmap_load(mddev);
+ } else
+ rv = PTR_ERR(bitmap);
if (rv)
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
} else {
/* remove the bitmap */
- if (!mddev->bitmap)
- return -ENOENT;
- if (mddev->bitmap->storage.file)
- return -EINVAL;
+ if (!mddev->bitmap) {
+ rv = -ENOENT;
+ goto err;
+ }
+ if (mddev->bitmap->storage.file) {
+ rv = -EINVAL;
+ goto err;
+ }
mddev->pers->quiesce(mddev, 1);
bitmap_destroy(mddev);
mddev->pers->quiesce(mddev, 0);
@@ -6334,6 +6466,12 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
}
}
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
+ return rv;
+err:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_cancel(mddev);
return rv;
}
@@ -6393,6 +6531,7 @@ static inline bool md_ioctl_valid(unsigned int cmd)
case SET_DISK_FAULTY:
case STOP_ARRAY:
case STOP_ARRAY_RO:
+ case CLUSTERED_DISK_NACK:
return true;
default:
return false;
@@ -6665,6 +6804,13 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto unlock;
}
+ case CLUSTERED_DISK_NACK:
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->new_disk_ack(mddev, false);
+ else
+ err = -EINVAL;
+ goto unlock;
+
case HOT_ADD_DISK:
err = hot_add_disk(mddev, new_decode_dev(arg));
goto unlock;
@@ -7238,6 +7384,55 @@ int unregister_md_personality(struct md_personality *p)
}
EXPORT_SYMBOL(unregister_md_personality);
+int register_md_cluster_operations(struct md_cluster_operations *ops, struct module *module)
+{
+ if (md_cluster_ops != NULL)
+ return -EALREADY;
+ spin_lock(&pers_lock);
+ md_cluster_ops = ops;
+ md_cluster_mod = module;
+ spin_unlock(&pers_lock);
+ return 0;
+}
+EXPORT_SYMBOL(register_md_cluster_operations);
+
+int unregister_md_cluster_operations(void)
+{
+ spin_lock(&pers_lock);
+ md_cluster_ops = NULL;
+ spin_unlock(&pers_lock);
+ return 0;
+}
+EXPORT_SYMBOL(unregister_md_cluster_operations);
+
+int md_setup_cluster(struct mddev *mddev, int nodes)
+{
+ int err;
+
+ err = request_module("md-cluster");
+ if (err) {
+ pr_err("md-cluster module not found.\n");
+ return err;
+ }
+
+ spin_lock(&pers_lock);
+ if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
+ spin_unlock(&pers_lock);
+ return -ENOENT;
+ }
+ spin_unlock(&pers_lock);
+
+ return md_cluster_ops->join(mddev, nodes);
+}
+
+void md_cluster_stop(struct mddev *mddev)
+{
+ if (!md_cluster_ops)
+ return;
+ md_cluster_ops->leave(mddev);
+ module_put(md_cluster_mod);
+}
+
static int is_mddev_idle(struct mddev *mddev, int init)
{
struct md_rdev *rdev;
@@ -7375,7 +7570,11 @@ int md_allow_write(struct mddev *mddev)
mddev->safemode == 0)
mddev->safemode = 1;
spin_unlock(&mddev->lock);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
sysfs_notify_dirent_safe(mddev->sysfs_state);
} else
spin_unlock(&mddev->lock);
@@ -7576,6 +7775,9 @@ void md_do_sync(struct md_thread *thread)
md_new_event(mddev);
update_time = jiffies;
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->resync_start(mddev, j, max_sectors);
+
blk_start_plug(&plug);
while (j < max_sectors) {
sector_t sectors;
@@ -7618,8 +7820,7 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
- sectors = mddev->pers->sync_request(mddev, j, &skipped,
- currspeed < speed_min(mddev));
+ sectors = mddev->pers->sync_request(mddev, j, &skipped);
if (sectors == 0) {
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
break;
@@ -7636,6 +7837,8 @@ void md_do_sync(struct md_thread *thread)
j += sectors;
if (j > 2)
mddev->curr_resync = j;
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->resync_info_update(mddev, j, max_sectors);
mddev->curr_mark_cnt = io_sectors;
if (last_check == 0)
/* this is the earliest that rebuild will be
@@ -7677,11 +7880,18 @@ void md_do_sync(struct md_thread *thread)
/((jiffies-mddev->resync_mark)/HZ +1) +1;
if (currspeed > speed_min(mddev)) {
- if ((currspeed > speed_max(mddev)) ||
- !is_mddev_idle(mddev, 0)) {
+ if (currspeed > speed_max(mddev)) {
msleep(500);
goto repeat;
}
+ if (!is_mddev_idle(mddev, 0)) {
+ /*
+ * Give other IO more of a chance.
+ * The faster the devices, the less we wait.
+ */
+ wait_event(mddev->recovery_wait,
+ !atomic_read(&mddev->recovery_active));
+ }
}
}
printk(KERN_INFO "md: %s: %s %s.\n",mdname(mddev), desc,
@@ -7694,7 +7904,10 @@ void md_do_sync(struct md_thread *thread)
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
- mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
+ mddev->pers->sync_request(mddev, max_sectors, &skipped);
+
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->resync_finish(mddev);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
mddev->curr_resync > 2) {
@@ -7925,8 +8138,13 @@ void md_check_recovery(struct mddev *mddev)
sysfs_notify_dirent_safe(mddev->sysfs_state);
}
- if (mddev->flags & MD_UPDATE_SB_FLAGS)
+ if (mddev->flags & MD_UPDATE_SB_FLAGS) {
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
md_update_sb(mddev, 0);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
+ }
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
@@ -8024,6 +8242,8 @@ void md_reap_sync_thread(struct mddev *mddev)
set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_start(mddev);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
mddev->pers->finish_reshape)
mddev->pers->finish_reshape(mddev);
@@ -8036,6 +8256,8 @@ void md_reap_sync_thread(struct mddev *mddev)
rdev->saved_raid_disk = -1;
md_update_sb(mddev, 1);
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->metadata_update_finish(mddev);
clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -8656,6 +8878,28 @@ err_wq:
return ret;
}
+void md_reload_sb(struct mddev *mddev)
+{
+ struct md_rdev *rdev, *tmp;
+
+ rdev_for_each_safe(rdev, tmp, mddev) {
+ rdev->sb_loaded = 0;
+ ClearPageUptodate(rdev->sb_page);
+ }
+ mddev->raid_disks = 0;
+ analyze_sbs(mddev);
+ rdev_for_each_safe(rdev, tmp, mddev) {
+ struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
+ /* since we don't write to faulty devices, we figure out if the
+ * disk is faulty by comparing events
+ */
+ if (mddev->events > sb->events)
+ set_bit(Faulty, &rdev->flags);
+ }
+
+}
+EXPORT_SYMBOL(md_reload_sb);
+
#ifndef MODULE
/*
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 318ca8fd430f..4046a6c6f223 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -23,6 +23,7 @@
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include "md-cluster.h"
#define MaxSector (~(sector_t)0)
@@ -170,6 +171,10 @@ enum flag_bits {
* a want_replacement device with same
* raid_disk number.
*/
+ Candidate, /* For clustered environments only:
+ * This device is seen locally but not
+ * by the whole cluster
+ */
};
#define BB_LEN_MASK (0x00000000000001FFULL)
@@ -202,6 +207,8 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
extern void md_ack_all_badblocks(struct badblocks *bb);
+struct md_cluster_info;
+
struct mddev {
void *private;
struct md_personality *pers;
@@ -430,6 +437,8 @@ struct mddev {
unsigned long daemon_sleep; /* how many jiffies between updates? */
unsigned long max_write_behind; /* write-behind mode */
int external;
+ int nodes; /* Maximum number of nodes in the cluster */
+ char cluster_name[64]; /* Name of the cluster */
} bitmap_info;
atomic_t max_corr_read_errors; /* max read retries */
@@ -448,6 +457,7 @@ struct mddev {
struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
+ struct md_cluster_info *cluster_info;
};
static inline int __must_check mddev_lock(struct mddev *mddev)
@@ -496,7 +506,7 @@ struct md_personality
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*spare_active) (struct mddev *mddev);
- sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster);
+ sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
int (*resize) (struct mddev *mddev, sector_t sectors);
sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
int (*check_reshape) (struct mddev *mddev);
@@ -608,6 +618,11 @@ static inline void safe_put_page(struct page *p)
extern int register_md_personality(struct md_personality *p);
extern int unregister_md_personality(struct md_personality *p);
+extern int register_md_cluster_operations(struct md_cluster_operations *ops,
+ struct module *module);
+extern int unregister_md_cluster_operations(void);
+extern int md_setup_cluster(struct mddev *mddev, int nodes);
+extern void md_cluster_stop(struct mddev *mddev);
extern struct md_thread *md_register_thread(
void (*run)(struct md_thread *thread),
struct mddev *mddev,
@@ -654,6 +669,10 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev);
extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
+extern void md_reload_sb(struct mddev *mddev);
+extern void md_update_sb(struct mddev *mddev, int force);
+extern void md_kick_rdev_from_array(struct md_rdev * rdev);
+struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
static inline int mddev_check_plugged(struct mddev *mddev)
{
return !!blk_check_plugged(md_unplug, mddev,
@@ -669,4 +688,9 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
}
}
+extern struct md_cluster_operations *md_cluster_ops;
+static inline int mddev_is_clustered(struct mddev *mddev)
+{
+ return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
+}
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 3b5d7f704aa3..2cb59a641cd2 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -271,14 +271,16 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
goto abort;
}
- blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
- blk_queue_io_opt(mddev->queue,
- (mddev->chunk_sectors << 9) * mddev->raid_disks);
-
- if (!discard_supported)
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
- else
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ if (mddev->queue) {
+ blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+ blk_queue_io_opt(mddev->queue,
+ (mddev->chunk_sectors << 9) * mddev->raid_disks);
+
+ if (!discard_supported)
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ else
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+ }
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
*private_conf = conf;
@@ -429,9 +431,12 @@ static int raid0_run(struct mddev *mddev)
}
if (md_check_no_bitmap(mddev))
return -EINVAL;
- blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
- blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+
+ if (mddev->queue) {
+ blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+ }
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
@@ -448,16 +453,17 @@ static int raid0_run(struct mddev *mddev)
printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
mdname(mddev),
(unsigned long long)mddev->array_sectors);
- /* calculate the max read-ahead size.
- * For read-ahead of large files to be effective, we need to
- * readahead at least twice a whole stripe. i.e. number of devices
- * multiplied by chunk size times 2.
- * If an individual device has an ra_pages greater than the
- * chunk size, then we will not drive that device as hard as it
- * wants. We consider this a configuration error: a larger
- * chunksize should be used in that case.
- */
- {
+
+ if (mddev->queue) {
+ /* calculate the max read-ahead size.
+ * For read-ahead of large files to be effective, we need to
+ * readahead at least twice a whole stripe. i.e. number of devices
+ * multiplied by chunk size times 2.
+ * If an individual device has an ra_pages greater than the
+ * chunk size, then we will not drive that device as hard as it
+ * wants. We consider this a configuration error: a larger
+ * chunksize should be used in that case.
+ */
int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d34e238afa54..9157a29c8dbf 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -539,7 +539,13 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
has_nonrot_disk = 0;
choose_next_idle = 0;
- choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
+ if ((conf->mddev->recovery_cp < this_sector + sectors) ||
+ (mddev_is_clustered(conf->mddev) &&
+ md_cluster_ops->area_resyncing(conf->mddev, this_sector,
+ this_sector + sectors)))
+ choose_first = 1;
+ else
+ choose_first = 0;
for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
sector_t dist;
@@ -1102,8 +1108,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
md_write_start(mddev, bio); /* wait on superblock update early */
if (bio_data_dir(bio) == WRITE &&
- bio_end_sector(bio) > mddev->suspend_lo &&
- bio->bi_iter.bi_sector < mddev->suspend_hi) {
+ ((bio_end_sector(bio) > mddev->suspend_lo &&
+ bio->bi_iter.bi_sector < mddev->suspend_hi) ||
+ (mddev_is_clustered(mddev) &&
+ md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
/* As the suspend_* range is controlled by
* userspace, we want an interruptible
* wait.
@@ -1114,7 +1122,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE);
if (bio_end_sector(bio) <= mddev->suspend_lo ||
- bio->bi_iter.bi_sector >= mddev->suspend_hi)
+ bio->bi_iter.bi_sector >= mddev->suspend_hi ||
+ (mddev_is_clustered(mddev) &&
+ !md_cluster_ops->area_resyncing(mddev,
+ bio->bi_iter.bi_sector, bio_end_sector(bio))))
break;
schedule();
}
@@ -1561,6 +1572,7 @@ static int raid1_spare_active(struct mddev *mddev)
struct md_rdev *rdev = conf->mirrors[i].rdev;
struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
if (repl
+ && !test_bit(Candidate, &repl->flags)
&& repl->recovery_offset == MaxSector
&& !test_bit(Faulty, &repl->flags)
&& !test_and_set_bit(In_sync, &repl->flags)) {
@@ -2468,7 +2480,7 @@ static int init_resync(struct r1conf *conf)
* that can be installed to exclude normal IO requests.
*/
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
@@ -2521,13 +2533,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
*skipped = 1;
return sync_blocks;
}
- /*
- * If there is non-resync activity waiting for a turn,
- * and resync is going fast enough,
- * then let it though before starting on this new sync request.
- */
- if (!go_faster && conf->nr_waiting)
- msleep_interruptible(1000);
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index a7196c49d15d..e793ab6b3570 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2889,7 +2889,7 @@ static int init_resync(struct r10conf *conf)
*/
static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
- int *skipped, int go_faster)
+ int *skipped)
{
struct r10conf *conf = mddev->private;
struct r10bio *r10_bio;
@@ -2994,12 +2994,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (conf->geo.near_copies < conf->geo.raid_disks &&
max_sector > (sector_nr | chunk_mask))
max_sector = (sector_nr | chunk_mask) + 1;
- /*
- * If there is non-resync activity waiting for us then
- * put in a delay to throttle resync.
- */
- if (!go_faster && conf->nr_waiting)
- msleep_interruptible(1000);
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cd2f96b2c572..77dfd720aaa0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -54,6 +54,7 @@
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/nodemask.h>
+#include <linux/flex_array.h>
#include <trace/events/block.h>
#include "md.h"
@@ -496,7 +497,7 @@ static void shrink_buffers(struct stripe_head *sh)
}
}
-static int grow_buffers(struct stripe_head *sh)
+static int grow_buffers(struct stripe_head *sh, gfp_t gfp)
{
int i;
int num = sh->raid_conf->pool_size;
@@ -504,7 +505,7 @@ static int grow_buffers(struct stripe_head *sh)
for (i = 0; i < num; i++) {
struct page *page;
- if (!(page = alloc_page(GFP_KERNEL))) {
+ if (!(page = alloc_page(gfp))) {
return 1;
}
sh->dev[i].page = page;
@@ -525,6 +526,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
BUG_ON(atomic_read(&sh->count) != 0);
BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
BUG_ON(stripe_operations_active(sh));
+ BUG_ON(sh->batch_head);
pr_debug("init_stripe called, stripe %llu\n",
(unsigned long long)sector);
@@ -552,8 +554,10 @@ retry:
}
if (read_seqcount_retry(&conf->gen_lock, seq))
goto retry;
+ sh->overwrite_disks = 0;
insert_hash(conf, sh);
sh->cpu = smp_processor_id();
+ set_bit(STRIPE_BATCH_READY, &sh->state);
}
static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
@@ -668,20 +672,28 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
*(conf->hash_locks + hash));
sh = __find_stripe(conf, sector, conf->generation - previous);
if (!sh) {
- if (!conf->inactive_blocked)
+ if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
sh = get_free_stripe(conf, hash);
+ if (!sh && llist_empty(&conf->released_stripes) &&
+ !test_bit(R5_DID_ALLOC, &conf->cache_state))
+ set_bit(R5_ALLOC_MORE,
+ &conf->cache_state);
+ }
if (noblock && sh == NULL)
break;
if (!sh) {
- conf->inactive_blocked = 1;
+ set_bit(R5_INACTIVE_BLOCKED,
+ &conf->cache_state);
wait_event_lock_irq(
conf->wait_for_stripe,
!list_empty(conf->inactive_list + hash) &&
(atomic_read(&conf->active_stripes)
< (conf->max_nr_stripes * 3 / 4)
- || !conf->inactive_blocked),
+ || !test_bit(R5_INACTIVE_BLOCKED,
+ &conf->cache_state)),
*(conf->hash_locks + hash));
- conf->inactive_blocked = 0;
+ clear_bit(R5_INACTIVE_BLOCKED,
+ &conf->cache_state);
} else {
init_stripe(sh, sector, previous);
atomic_inc(&sh->count);
@@ -708,6 +720,130 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
return sh;
}
+static bool is_full_stripe_write(struct stripe_head *sh)
+{
+ BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded));
+ return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded);
+}
+
+static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+{
+ local_irq_disable();
+ if (sh1 > sh2) {
+ spin_lock(&sh2->stripe_lock);
+ spin_lock_nested(&sh1->stripe_lock, 1);
+ } else {
+ spin_lock(&sh1->stripe_lock);
+ spin_lock_nested(&sh2->stripe_lock, 1);
+ }
+}
+
+static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+{
+ spin_unlock(&sh1->stripe_lock);
+ spin_unlock(&sh2->stripe_lock);
+ local_irq_enable();
+}
+
+/* Only freshly new full stripe normal write stripe can be added to a batch list */
+static bool stripe_can_batch(struct stripe_head *sh)
+{
+ return test_bit(STRIPE_BATCH_READY, &sh->state) &&
+ is_full_stripe_write(sh);
+}
+
+/* we only do back search */
+static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh)
+{
+ struct stripe_head *head;
+ sector_t head_sector, tmp_sec;
+ int hash;
+ int dd_idx;
+
+ if (!stripe_can_batch(sh))
+ return;
+ /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */
+ tmp_sec = sh->sector;
+ if (!sector_div(tmp_sec, conf->chunk_sectors))
+ return;
+ head_sector = sh->sector - STRIPE_SECTORS;
+
+ hash = stripe_hash_locks_hash(head_sector);
+ spin_lock_irq(conf->hash_locks + hash);
+ head = __find_stripe(conf, head_sector, conf->generation);
+ if (head && !atomic_inc_not_zero(&head->count)) {
+ spin_lock(&conf->device_lock);
+ if (!atomic_read(&head->count)) {
+ if (!test_bit(STRIPE_HANDLE, &head->state))
+ atomic_inc(&conf->active_stripes);
+ BUG_ON(list_empty(&head->lru) &&
+ !test_bit(STRIPE_EXPANDING, &head->state));
+ list_del_init(&head->lru);
+ if (head->group) {
+ head->group->stripes_cnt--;
+ head->group = NULL;
+ }
+ }
+ atomic_inc(&head->count);
+ spin_unlock(&conf->device_lock);
+ }
+ spin_unlock_irq(conf->hash_locks + hash);
+
+ if (!head)
+ return;
+ if (!stripe_can_batch(head))
+ goto out;
+
+ lock_two_stripes(head, sh);
+ /* clear_batch_ready clear the flag */
+ if (!stripe_can_batch(head) || !stripe_can_batch(sh))
+ goto unlock_out;
+
+ if (sh->batch_head)
+ goto unlock_out;
+
+ dd_idx = 0;
+ while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
+ dd_idx++;
+ if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw)
+ goto unlock_out;
+
+ if (head->batch_head) {
+ spin_lock(&head->batch_head->batch_lock);
+ /* This batch list is already running */
+ if (!stripe_can_batch(head)) {
+ spin_unlock(&head->batch_head->batch_lock);
+ goto unlock_out;
+ }
+
+ /*
+ * at this point, head's BATCH_READY could be cleared, but we
+ * can still add the stripe to batch list
+ */
+ list_add(&sh->batch_list, &head->batch_list);
+ spin_unlock(&head->batch_head->batch_lock);
+
+ sh->batch_head = head->batch_head;
+ } else {
+ head->batch_head = head;
+ sh->batch_head = head->batch_head;
+ spin_lock(&head->batch_lock);
+ list_add_tail(&sh->batch_list, &head->batch_list);
+ spin_unlock(&head->batch_lock);
+ }
+
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ if (atomic_dec_return(&conf->preread_active_stripes)
+ < IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+
+ atomic_inc(&sh->count);
+unlock_out:
+ unlock_two_stripes(head, sh);
+out:
+ release_stripe(head);
+}
+
/* Determine if 'data_offset' or 'new_data_offset' should be used
* in this stripe_head.
*/
@@ -738,6 +874,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
{
struct r5conf *conf = sh->raid_conf;
int i, disks = sh->disks;
+ struct stripe_head *head_sh = sh;
might_sleep();
@@ -746,6 +883,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
int replace_only = 0;
struct bio *bi, *rbi;
struct md_rdev *rdev, *rrdev = NULL;
+
+ sh = head_sh;
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
rw = WRITE_FUA;
@@ -764,6 +903,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags))
rw |= REQ_SYNC;
+again:
bi = &sh->dev[i].req;
rbi = &sh->dev[i].rreq; /* For writing to replacement */
@@ -782,7 +922,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
/* We raced and saw duplicates */
rrdev = NULL;
} else {
- if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
+ if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev)
rdev = rrdev;
rrdev = NULL;
}
@@ -853,13 +993,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
__func__, (unsigned long long)sh->sector,
bi->bi_rw, i);
atomic_inc(&sh->count);
+ if (sh != head_sh)
+ atomic_inc(&head_sh->count);
if (use_new_offset(conf, sh))
bi->bi_iter.bi_sector = (sh->sector
+ rdev->new_data_offset);
else
bi->bi_iter.bi_sector = (sh->sector
+ rdev->data_offset);
- if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
+ if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
bi->bi_rw |= REQ_NOMERGE;
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
@@ -903,6 +1045,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
__func__, (unsigned long long)sh->sector,
rbi->bi_rw, i);
atomic_inc(&sh->count);
+ if (sh != head_sh)
+ atomic_inc(&head_sh->count);
if (use_new_offset(conf, sh))
rbi->bi_iter.bi_sector = (sh->sector
+ rrdev->new_data_offset);
@@ -934,8 +1078,18 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
pr_debug("skip op %ld on disc %d for sector %llu\n",
bi->bi_rw, i, (unsigned long long)sh->sector);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
+ if (sh->batch_head)
+ set_bit(STRIPE_BATCH_ERR,
+ &sh->batch_head->state);
set_bit(STRIPE_HANDLE, &sh->state);
}
+
+ if (!head_sh->batch_head)
+ continue;
+ sh = list_first_entry(&sh->batch_list, struct stripe_head,
+ batch_list);
+ if (sh != head_sh)
+ goto again;
}
}
@@ -1051,6 +1205,7 @@ static void ops_run_biofill(struct stripe_head *sh)
struct async_submit_ctl submit;
int i;
+ BUG_ON(sh->batch_head);
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
@@ -1109,16 +1264,28 @@ static void ops_complete_compute(void *stripe_head_ref)
/* return a pointer to the address conversion region of the scribble buffer */
static addr_conv_t *to_addr_conv(struct stripe_head *sh,
- struct raid5_percpu *percpu)
+ struct raid5_percpu *percpu, int i)
{
- return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
+ void *addr;
+
+ addr = flex_array_get(percpu->scribble, i);
+ return addr + sizeof(struct page *) * (sh->disks + 2);
+}
+
+/* return a pointer to the address conversion region of the scribble buffer */
+static struct page **to_addr_page(struct raid5_percpu *percpu, int i)
+{
+ void *addr;
+
+ addr = flex_array_get(percpu->scribble, i);
+ return addr;
}
static struct dma_async_tx_descriptor *
ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
{
int disks = sh->disks;
- struct page **xor_srcs = percpu->scribble;
+ struct page **xor_srcs = to_addr_page(percpu, 0);
int target = sh->ops.target;
struct r5dev *tgt = &sh->dev[target];
struct page *xor_dest = tgt->page;
@@ -1127,6 +1294,8 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
struct async_submit_ctl submit;
int i;
+ BUG_ON(sh->batch_head);
+
pr_debug("%s: stripe %llu block: %d\n",
__func__, (unsigned long long)sh->sector, target);
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
@@ -1138,7 +1307,7 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
- ops_complete_compute, sh, to_addr_conv(sh, percpu));
+ ops_complete_compute, sh, to_addr_conv(sh, percpu, 0));
if (unlikely(count == 1))
tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
else
@@ -1156,7 +1325,9 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
* destination buffer is recorded in srcs[count] and the Q destination
* is recorded in srcs[count+1]].
*/
-static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
+static int set_syndrome_sources(struct page **srcs,
+ struct stripe_head *sh,
+ int srctype)
{
int disks = sh->disks;
int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
@@ -1171,8 +1342,15 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
i = d0_idx;
do {
int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
+ struct r5dev *dev = &sh->dev[i];
- srcs[slot] = sh->dev[i].page;
+ if (i == sh->qd_idx || i == sh->pd_idx ||
+ (srctype == SYNDROME_SRC_ALL) ||
+ (srctype == SYNDROME_SRC_WANT_DRAIN &&
+ test_bit(R5_Wantdrain, &dev->flags)) ||
+ (srctype == SYNDROME_SRC_WRITTEN &&
+ dev->written))
+ srcs[slot] = sh->dev[i].page;
i = raid6_next_disk(i, disks);
} while (i != d0_idx);
@@ -1183,7 +1361,7 @@ static struct dma_async_tx_descriptor *
ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
{
int disks = sh->disks;
- struct page **blocks = percpu->scribble;
+ struct page **blocks = to_addr_page(percpu, 0);
int target;
int qd_idx = sh->qd_idx;
struct dma_async_tx_descriptor *tx;
@@ -1193,6 +1371,7 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
int i;
int count;
+ BUG_ON(sh->batch_head);
if (sh->ops.target < 0)
target = sh->ops.target2;
else if (sh->ops.target2 < 0)
@@ -1211,12 +1390,12 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
atomic_inc(&sh->count);
if (target == qd_idx) {
- count = set_syndrome_sources(blocks, sh);
+ count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
blocks[count] = NULL; /* regenerating p is not necessary */
BUG_ON(blocks[count+1] != dest); /* q should already be set */
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
} else {
/* Compute any data- or p-drive using XOR */
@@ -1229,7 +1408,7 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
NULL, ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
}
@@ -1248,9 +1427,10 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
struct r5dev *tgt = &sh->dev[target];
struct r5dev *tgt2 = &sh->dev[target2];
struct dma_async_tx_descriptor *tx;
- struct page **blocks = percpu->scribble;
+ struct page **blocks = to_addr_page(percpu, 0);
struct async_submit_ctl submit;
+ BUG_ON(sh->batch_head);
pr_debug("%s: stripe %llu block1: %d block2: %d\n",
__func__, (unsigned long long)sh->sector, target, target2);
BUG_ON(target < 0 || target2 < 0);
@@ -1290,7 +1470,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
/* Missing P+Q, just recompute */
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
return async_gen_syndrome(blocks, 0, syndrome_disks+2,
STRIPE_SIZE, &submit);
} else {
@@ -1314,21 +1494,21 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
init_async_submit(&submit,
ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
NULL, NULL, NULL,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
&submit);
- count = set_syndrome_sources(blocks, sh);
+ count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL);
init_async_submit(&submit, ASYNC_TX_FENCE, tx,
ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
return async_gen_syndrome(blocks, 0, count+2,
STRIPE_SIZE, &submit);
}
} else {
init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
ops_complete_compute, sh,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
if (failb == syndrome_disks) {
/* We're missing D+P. */
return async_raid6_datap_recov(syndrome_disks+2,
@@ -1352,17 +1532,18 @@ static void ops_complete_prexor(void *stripe_head_ref)
}
static struct dma_async_tx_descriptor *
-ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
- struct dma_async_tx_descriptor *tx)
+ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu,
+ struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
- struct page **xor_srcs = percpu->scribble;
+ struct page **xor_srcs = to_addr_page(percpu, 0);
int count = 0, pd_idx = sh->pd_idx, i;
struct async_submit_ctl submit;
/* existing parity data subtracted */
struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
+ BUG_ON(sh->batch_head);
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
@@ -1374,31 +1555,56 @@ ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
}
init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
- ops_complete_prexor, sh, to_addr_conv(sh, percpu));
+ ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
return tx;
}
static struct dma_async_tx_descriptor *
+ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu,
+ struct dma_async_tx_descriptor *tx)
+{
+ struct page **blocks = to_addr_page(percpu, 0);
+ int count;
+ struct async_submit_ctl submit;
+
+ pr_debug("%s: stripe %llu\n", __func__,
+ (unsigned long long)sh->sector);
+
+ count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN);
+
+ init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx,
+ ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0));
+ tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
+
+ return tx;
+}
+
+static struct dma_async_tx_descriptor *
ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
int i;
+ struct stripe_head *head_sh = sh;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
for (i = disks; i--; ) {
- struct r5dev *dev = &sh->dev[i];
+ struct r5dev *dev;
struct bio *chosen;
- if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
+ sh = head_sh;
+ if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) {
struct bio *wbi;
+again:
+ dev = &sh->dev[i];
spin_lock_irq(&sh->stripe_lock);
chosen = dev->towrite;
dev->towrite = NULL;
+ sh->overwrite_disks = 0;
BUG_ON(dev->written);
wbi = dev->written = chosen;
spin_unlock_irq(&sh->stripe_lock);
@@ -1423,6 +1629,15 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
}
wbi = r5_next_bio(wbi, dev->sector);
}
+
+ if (head_sh->batch_head) {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head,
+ batch_list);
+ if (sh == head_sh)
+ continue;
+ goto again;
+ }
}
}
@@ -1478,12 +1693,15 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx)
{
int disks = sh->disks;
- struct page **xor_srcs = percpu->scribble;
+ struct page **xor_srcs;
struct async_submit_ctl submit;
- int count = 0, pd_idx = sh->pd_idx, i;
+ int count, pd_idx = sh->pd_idx, i;
struct page *xor_dest;
int prexor = 0;
unsigned long flags;
+ int j = 0;
+ struct stripe_head *head_sh = sh;
+ int last_stripe;
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
@@ -1500,15 +1718,18 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
ops_complete_reconstruct(sh);
return;
}
+again:
+ count = 0;
+ xor_srcs = to_addr_page(percpu, j);
/* check if prexor is active which means only process blocks
* that are part of a read-modify-write (written)
*/
- if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
+ if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
prexor = 1;
xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (dev->written)
+ if (head_sh->dev[i].written)
xor_srcs[count++] = dev->page;
}
} else {
@@ -1525,17 +1746,32 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
* set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
* for the synchronous xor case
*/
- flags = ASYNC_TX_ACK |
- (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
-
- atomic_inc(&sh->count);
+ last_stripe = !head_sh->batch_head ||
+ list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list) == head_sh;
+ if (last_stripe) {
+ flags = ASYNC_TX_ACK |
+ (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
+
+ atomic_inc(&head_sh->count);
+ init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh,
+ to_addr_conv(sh, percpu, j));
+ } else {
+ flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST;
+ init_async_submit(&submit, flags, tx, NULL, NULL,
+ to_addr_conv(sh, percpu, j));
+ }
- init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
- to_addr_conv(sh, percpu));
if (unlikely(count == 1))
tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
else
tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
+ if (!last_stripe) {
+ j++;
+ sh = list_first_entry(&sh->batch_list, struct stripe_head,
+ batch_list);
+ goto again;
+ }
}
static void
@@ -1543,8 +1779,12 @@ ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
struct dma_async_tx_descriptor *tx)
{
struct async_submit_ctl submit;
- struct page **blocks = percpu->scribble;
- int count, i;
+ struct page **blocks;
+ int count, i, j = 0;
+ struct stripe_head *head_sh = sh;
+ int last_stripe;
+ int synflags;
+ unsigned long txflags;
pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
@@ -1562,13 +1802,36 @@ ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
return;
}
- count = set_syndrome_sources(blocks, sh);
+again:
+ blocks = to_addr_page(percpu, j);
- atomic_inc(&sh->count);
+ if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
+ synflags = SYNDROME_SRC_WRITTEN;
+ txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST;
+ } else {
+ synflags = SYNDROME_SRC_ALL;
+ txflags = ASYNC_TX_ACK;
+ }
+
+ count = set_syndrome_sources(blocks, sh, synflags);
+ last_stripe = !head_sh->batch_head ||
+ list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list) == head_sh;
- init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
- sh, to_addr_conv(sh, percpu));
+ if (last_stripe) {
+ atomic_inc(&head_sh->count);
+ init_async_submit(&submit, txflags, tx, ops_complete_reconstruct,
+ head_sh, to_addr_conv(sh, percpu, j));
+ } else
+ init_async_submit(&submit, 0, tx, NULL, NULL,
+ to_addr_conv(sh, percpu, j));
async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
+ if (!last_stripe) {
+ j++;
+ sh = list_first_entry(&sh->batch_list, struct stripe_head,
+ batch_list);
+ goto again;
+ }
}
static void ops_complete_check(void *stripe_head_ref)
@@ -1589,7 +1852,7 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
int pd_idx = sh->pd_idx;
int qd_idx = sh->qd_idx;
struct page *xor_dest;
- struct page **xor_srcs = percpu->scribble;
+ struct page **xor_srcs = to_addr_page(percpu, 0);
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
int count;
@@ -1598,6 +1861,7 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
+ BUG_ON(sh->batch_head);
count = 0;
xor_dest = sh->dev[pd_idx].page;
xor_srcs[count++] = xor_dest;
@@ -1608,7 +1872,7 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
}
init_async_submit(&submit, 0, NULL, NULL, NULL,
- to_addr_conv(sh, percpu));
+ to_addr_conv(sh, percpu, 0));
tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
&sh->ops.zero_sum_result, &submit);
@@ -1619,20 +1883,21 @@ static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
{
- struct page **srcs = percpu->scribble;
+ struct page **srcs = to_addr_page(percpu, 0);
struct async_submit_ctl submit;
int count;
pr_debug("%s: stripe %llu checkp: %d\n", __func__,
(unsigned long long)sh->sector, checkp);
- count = set_syndrome_sources(srcs, sh);
+ BUG_ON(sh->batch_head);
+ count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL);
if (!checkp)
srcs[count] = NULL;
atomic_inc(&sh->count);
init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
- sh, to_addr_conv(sh, percpu));
+ sh, to_addr_conv(sh, percpu, 0));
async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
&sh->ops.zero_sum_result, percpu->spare_page, &submit);
}
@@ -1667,8 +1932,12 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
async_tx_ack(tx);
}
- if (test_bit(STRIPE_OP_PREXOR, &ops_request))
- tx = ops_run_prexor(sh, percpu, tx);
+ if (test_bit(STRIPE_OP_PREXOR, &ops_request)) {
+ if (level < 6)
+ tx = ops_run_prexor5(sh, percpu, tx);
+ else
+ tx = ops_run_prexor6(sh, percpu, tx);
+ }
if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
tx = ops_run_biodrain(sh, tx);
@@ -1693,7 +1962,7 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
BUG();
}
- if (overlap_clear)
+ if (overlap_clear && !sh->batch_head)
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_and_clear_bit(R5_Overlap, &dev->flags))
@@ -1702,10 +1971,10 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
put_cpu();
}
-static int grow_one_stripe(struct r5conf *conf, int hash)
+static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
{
struct stripe_head *sh;
- sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
+ sh = kmem_cache_zalloc(conf->slab_cache, gfp);
if (!sh)
return 0;
@@ -1713,17 +1982,23 @@ static int grow_one_stripe(struct r5conf *conf, int hash)
spin_lock_init(&sh->stripe_lock);
- if (grow_buffers(sh)) {
+ if (grow_buffers(sh, gfp)) {
shrink_buffers(sh);
kmem_cache_free(conf->slab_cache, sh);
return 0;
}
- sh->hash_lock_index = hash;
+ sh->hash_lock_index =
+ conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
/* we just created an active stripe so... */
atomic_set(&sh->count, 1);
atomic_inc(&conf->active_stripes);
INIT_LIST_HEAD(&sh->lru);
+
+ spin_lock_init(&sh->batch_lock);
+ INIT_LIST_HEAD(&sh->batch_list);
+ sh->batch_head = NULL;
release_stripe(sh);
+ conf->max_nr_stripes++;
return 1;
}
@@ -1731,7 +2006,6 @@ static int grow_stripes(struct r5conf *conf, int num)
{
struct kmem_cache *sc;
int devs = max(conf->raid_disks, conf->previous_raid_disks);
- int hash;
if (conf->mddev->gendisk)
sprintf(conf->cache_name[0],
@@ -1749,13 +2023,10 @@ static int grow_stripes(struct r5conf *conf, int num)
return 1;
conf->slab_cache = sc;
conf->pool_size = devs;
- hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
- while (num--) {
- if (!grow_one_stripe(conf, hash))
+ while (num--)
+ if (!grow_one_stripe(conf, GFP_KERNEL))
return 1;
- conf->max_nr_stripes++;
- hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
- }
+
return 0;
}
@@ -1772,13 +2043,21 @@ static int grow_stripes(struct r5conf *conf, int num)
* calculate over all devices (not just the data blocks), using zeros in place
* of the P and Q blocks.
*/
-static size_t scribble_len(int num)
+static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags)
{
+ struct flex_array *ret;
size_t len;
len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
-
- return len;
+ ret = flex_array_alloc(len, cnt, flags);
+ if (!ret)
+ return NULL;
+ /* always prealloc all elements, so no locking is required */
+ if (flex_array_prealloc(ret, 0, cnt, flags)) {
+ flex_array_free(ret);
+ return NULL;
+ }
+ return ret;
}
static int resize_stripes(struct r5conf *conf, int newsize)
@@ -1896,16 +2175,16 @@ static int resize_stripes(struct r5conf *conf, int newsize)
err = -ENOMEM;
get_online_cpus();
- conf->scribble_len = scribble_len(newsize);
for_each_present_cpu(cpu) {
struct raid5_percpu *percpu;
- void *scribble;
+ struct flex_array *scribble;
percpu = per_cpu_ptr(conf->percpu, cpu);
- scribble = kmalloc(conf->scribble_len, GFP_NOIO);
+ scribble = scribble_alloc(newsize, conf->chunk_sectors /
+ STRIPE_SECTORS, GFP_NOIO);
if (scribble) {
- kfree(percpu->scribble);
+ flex_array_free(percpu->scribble);
percpu->scribble = scribble;
} else {
err = -ENOMEM;
@@ -1937,9 +2216,10 @@ static int resize_stripes(struct r5conf *conf, int newsize)
return err;
}
-static int drop_one_stripe(struct r5conf *conf, int hash)
+static int drop_one_stripe(struct r5conf *conf)
{
struct stripe_head *sh;
+ int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
spin_lock_irq(conf->hash_locks + hash);
sh = get_free_stripe(conf, hash);
@@ -1950,15 +2230,15 @@ static int drop_one_stripe(struct r5conf *conf, int hash)
shrink_buffers(sh);
kmem_cache_free(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes);
+ conf->max_nr_stripes--;
return 1;
}
static void shrink_stripes(struct r5conf *conf)
{
- int hash;
- for (hash = 0; hash < NR_STRIPE_HASH_LOCKS; hash++)
- while (drop_one_stripe(conf, hash))
- ;
+ while (conf->max_nr_stripes &&
+ drop_one_stripe(conf))
+ ;
if (conf->slab_cache)
kmem_cache_destroy(conf->slab_cache);
@@ -2154,10 +2434,16 @@ static void raid5_end_write_request(struct bio *bi, int error)
}
rdev_dec_pending(rdev, conf->mddev);
+ if (sh->batch_head && !uptodate)
+ set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
+
if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
+
+ if (sh->batch_head && sh != sh->batch_head)
+ release_stripe(sh->batch_head);
}
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
@@ -2535,7 +2821,7 @@ static void
schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
int rcw, int expand)
{
- int i, pd_idx = sh->pd_idx, disks = sh->disks;
+ int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks;
struct r5conf *conf = sh->raid_conf;
int level = conf->level;
@@ -2571,13 +2857,15 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
atomic_inc(&conf->pending_full_writes);
} else {
- BUG_ON(level == 6);
BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
+ BUG_ON(level == 6 &&
+ (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) ||
+ test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags))));
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (i == pd_idx)
+ if (i == pd_idx || i == qd_idx)
continue;
if (dev->towrite &&
@@ -2624,7 +2912,8 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
* toread/towrite point to the first in a chain.
* The bi_next chain must be in order.
*/
-static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
+static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
+ int forwrite, int previous)
{
struct bio **bip;
struct r5conf *conf = sh->raid_conf;
@@ -2643,6 +2932,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
* protect it.
*/
spin_lock_irq(&sh->stripe_lock);
+ /* Don't allow new IO added to stripes in batch list */
+ if (sh->batch_head)
+ goto overlap;
if (forwrite) {
bip = &sh->dev[dd_idx].towrite;
if (*bip == NULL)
@@ -2657,6 +2949,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
goto overlap;
+ if (!forwrite || previous)
+ clear_bit(STRIPE_BATCH_READY, &sh->state);
+
BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
if (*bip)
bi->bi_next = *bip;
@@ -2674,7 +2969,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
sector = bio_end_sector(bi);
}
if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
- set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
+ if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags))
+ sh->overwrite_disks++;
}
pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
@@ -2688,6 +2984,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
sh->bm_seq = conf->seq_flush+1;
set_bit(STRIPE_BIT_DELAY, &sh->state);
}
+
+ if (stripe_can_batch(sh))
+ stripe_add_to_batch_list(conf, sh);
return 1;
overlap:
@@ -2720,6 +3019,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct bio **return_bi)
{
int i;
+ BUG_ON(sh->batch_head);
for (i = disks; i--; ) {
struct bio *bi;
int bitmap_end = 0;
@@ -2746,6 +3046,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
/* fail all writes first */
bi = sh->dev[i].towrite;
sh->dev[i].towrite = NULL;
+ sh->overwrite_disks = 0;
spin_unlock_irq(&sh->stripe_lock);
if (bi)
bitmap_end = 1;
@@ -2834,6 +3135,7 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
int abort = 0;
int i;
+ BUG_ON(sh->batch_head);
clear_bit(STRIPE_SYNCING, &sh->state);
if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
wake_up(&conf->wait_for_overlap);
@@ -3064,6 +3366,7 @@ static void handle_stripe_fill(struct stripe_head *sh,
{
int i;
+ BUG_ON(sh->batch_head);
/* look for blocks to read/compute, skip this if a compute
* is already in flight, or if the stripe contents are in the
* midst of changing due to a write
@@ -3087,6 +3390,9 @@ static void handle_stripe_clean_event(struct r5conf *conf,
int i;
struct r5dev *dev;
int discard_pending = 0;
+ struct stripe_head *head_sh = sh;
+ bool do_endio = false;
+ int wakeup_nr = 0;
for (i = disks; i--; )
if (sh->dev[i].written) {
@@ -3102,8 +3408,11 @@ static void handle_stripe_clean_event(struct r5conf *conf,
clear_bit(R5_UPTODATE, &dev->flags);
if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) {
WARN_ON(test_bit(R5_UPTODATE, &dev->flags));
- dev->page = dev->orig_page;
}
+ do_endio = true;
+
+returnbi:
+ dev->page = dev->orig_page;
wbi = dev->written;
dev->written = NULL;
while (wbi && wbi->bi_iter.bi_sector <
@@ -3120,6 +3429,17 @@ static void handle_stripe_clean_event(struct r5conf *conf,
STRIPE_SECTORS,
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
+ if (head_sh->batch_head) {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head,
+ batch_list);
+ if (sh != head_sh) {
+ dev = &sh->dev[i];
+ goto returnbi;
+ }
+ }
+ sh = head_sh;
+ dev = &sh->dev[i];
} else if (test_bit(R5_Discard, &dev->flags))
discard_pending = 1;
WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
@@ -3141,8 +3461,17 @@ static void handle_stripe_clean_event(struct r5conf *conf,
* will be reinitialized
*/
spin_lock_irq(&conf->device_lock);
+unhash:
remove_hash(sh);
+ if (head_sh->batch_head) {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list);
+ if (sh != head_sh)
+ goto unhash;
+ }
spin_unlock_irq(&conf->device_lock);
+ sh = head_sh;
+
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
set_bit(STRIPE_HANDLE, &sh->state);
@@ -3151,6 +3480,45 @@ static void handle_stripe_clean_event(struct r5conf *conf,
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
md_wakeup_thread(conf->mddev->thread);
+
+ if (!head_sh->batch_head || !do_endio)
+ return;
+ for (i = 0; i < head_sh->disks; i++) {
+ if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
+ wakeup_nr++;
+ }
+ while (!list_empty(&head_sh->batch_list)) {
+ int i;
+ sh = list_first_entry(&head_sh->batch_list,
+ struct stripe_head, batch_list);
+ list_del_init(&sh->batch_list);
+
+ set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
+ head_sh->state & ~((1 << STRIPE_ACTIVE) |
+ (1 << STRIPE_PREREAD_ACTIVE) |
+ STRIPE_EXPAND_SYNC_FLAG));
+ sh->check_state = head_sh->check_state;
+ sh->reconstruct_state = head_sh->reconstruct_state;
+ for (i = 0; i < sh->disks; i++) {
+ if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+ wakeup_nr++;
+ sh->dev[i].flags = head_sh->dev[i].flags;
+ }
+
+ spin_lock_irq(&sh->stripe_lock);
+ sh->batch_head = NULL;
+ spin_unlock_irq(&sh->stripe_lock);
+ if (sh->state & STRIPE_EXPAND_SYNC_FLAG)
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+ }
+
+ spin_lock_irq(&head_sh->stripe_lock);
+ head_sh->batch_head = NULL;
+ spin_unlock_irq(&head_sh->stripe_lock);
+ wake_up_nr(&conf->wait_for_overlap, wakeup_nr);
+ if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG)
+ set_bit(STRIPE_HANDLE, &head_sh->state);
}
static void handle_stripe_dirtying(struct r5conf *conf,
@@ -3161,28 +3529,27 @@ static void handle_stripe_dirtying(struct r5conf *conf,
int rmw = 0, rcw = 0, i;
sector_t recovery_cp = conf->mddev->recovery_cp;
- /* RAID6 requires 'rcw' in current implementation.
- * Otherwise, check whether resync is now happening or should start.
+ /* Check whether resync is now happening or should start.
* If yes, then the array is dirty (after unclean shutdown or
* initial creation), so parity in some stripes might be inconsistent.
* In this case, we need to always do reconstruct-write, to ensure
* that in case of drive failure or read-error correction, we
* generate correct data from the parity.
*/
- if (conf->max_degraded == 2 ||
+ if (conf->rmw_level == PARITY_DISABLE_RMW ||
(recovery_cp < MaxSector && sh->sector >= recovery_cp &&
s->failed == 0)) {
/* Calculate the real rcw later - for now make it
* look like rcw is cheaper
*/
rcw = 1; rmw = 2;
- pr_debug("force RCW max_degraded=%u, recovery_cp=%llu sh->sector=%llu\n",
- conf->max_degraded, (unsigned long long)recovery_cp,
+ pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
+ conf->rmw_level, (unsigned long long)recovery_cp,
(unsigned long long)sh->sector);
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx) &&
+ if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
@@ -3192,7 +3559,8 @@ static void handle_stripe_dirtying(struct r5conf *conf,
rmw += 2*disks; /* cannot read it */
}
/* Would I have to read this buffer for reconstruct_write */
- if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
+ if (!test_bit(R5_OVERWRITE, &dev->flags) &&
+ i != sh->pd_idx && i != sh->qd_idx &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags))) {
@@ -3205,7 +3573,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
pr_debug("for sector %llu, rmw=%d rcw=%d\n",
(unsigned long long)sh->sector, rmw, rcw);
set_bit(STRIPE_HANDLE, &sh->state);
- if (rmw < rcw && rmw > 0) {
+ if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_ENABLE_RMW)) && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
if (conf->mddev->queue)
blk_add_trace_msg(conf->mddev->queue,
@@ -3213,7 +3581,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
(unsigned long long)sh->sector, rmw);
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if ((dev->towrite || i == sh->pd_idx) &&
+ if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) &&
!test_bit(R5_LOCKED, &dev->flags) &&
!(test_bit(R5_UPTODATE, &dev->flags) ||
test_bit(R5_Wantcompute, &dev->flags)) &&
@@ -3232,7 +3600,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
}
}
}
- if (rcw <= rmw && rcw > 0) {
+ if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_ENABLE_RMW)) && rcw > 0) {
/* want reconstruct write, but need to get some data */
int qread =0;
rcw = 0;
@@ -3290,6 +3658,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
{
struct r5dev *dev = NULL;
+ BUG_ON(sh->batch_head);
set_bit(STRIPE_HANDLE, &sh->state);
switch (sh->check_state) {
@@ -3380,6 +3749,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
int qd_idx = sh->qd_idx;
struct r5dev *dev;
+ BUG_ON(sh->batch_head);
set_bit(STRIPE_HANDLE, &sh->state);
BUG_ON(s->failed > 2);
@@ -3543,6 +3913,7 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
* copy some of them into a target stripe for expand.
*/
struct dma_async_tx_descriptor *tx = NULL;
+ BUG_ON(sh->batch_head);
clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
for (i = 0; i < sh->disks; i++)
if (i != sh->pd_idx && i != sh->qd_idx) {
@@ -3615,8 +3986,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
memset(s, 0, sizeof(*s));
- s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
- s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
+ s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head;
+ s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head;
s->failed_num[0] = -1;
s->failed_num[1] = -1;
@@ -3786,6 +4157,80 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
rcu_read_unlock();
}
+static int clear_batch_ready(struct stripe_head *sh)
+{
+ struct stripe_head *tmp;
+ if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
+ return 0;
+ spin_lock(&sh->stripe_lock);
+ if (!sh->batch_head) {
+ spin_unlock(&sh->stripe_lock);
+ return 0;
+ }
+
+ /*
+ * this stripe could be added to a batch list before we check
+ * BATCH_READY, skips it
+ */
+ if (sh->batch_head != sh) {
+ spin_unlock(&sh->stripe_lock);
+ return 1;
+ }
+ spin_lock(&sh->batch_lock);
+ list_for_each_entry(tmp, &sh->batch_list, batch_list)
+ clear_bit(STRIPE_BATCH_READY, &tmp->state);
+ spin_unlock(&sh->batch_lock);
+ spin_unlock(&sh->stripe_lock);
+
+ /*
+ * BATCH_READY is cleared, no new stripes can be added.
+ * batch_list can be accessed without lock
+ */
+ return 0;
+}
+
+static void check_break_stripe_batch_list(struct stripe_head *sh)
+{
+ struct stripe_head *head_sh, *next;
+ int i;
+
+ if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
+ return;
+
+ head_sh = sh;
+ do {
+ sh = list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list);
+ BUG_ON(sh == head_sh);
+ } while (!test_bit(STRIPE_DEGRADED, &sh->state));
+
+ while (sh != head_sh) {
+ next = list_first_entry(&sh->batch_list,
+ struct stripe_head, batch_list);
+ list_del_init(&sh->batch_list);
+
+ set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
+ head_sh->state & ~((1 << STRIPE_ACTIVE) |
+ (1 << STRIPE_PREREAD_ACTIVE) |
+ (1 << STRIPE_DEGRADED) |
+ STRIPE_EXPAND_SYNC_FLAG));
+ sh->check_state = head_sh->check_state;
+ sh->reconstruct_state = head_sh->reconstruct_state;
+ for (i = 0; i < sh->disks; i++)
+ sh->dev[i].flags = head_sh->dev[i].flags &
+ (~((1 << R5_WriteError) | (1 << R5_Overlap)));
+
+ spin_lock_irq(&sh->stripe_lock);
+ sh->batch_head = NULL;
+ spin_unlock_irq(&sh->stripe_lock);
+
+ set_bit(STRIPE_HANDLE, &sh->state);
+ release_stripe(sh);
+
+ sh = next;
+ }
+}
+
static void handle_stripe(struct stripe_head *sh)
{
struct stripe_head_state s;
@@ -3803,7 +4248,14 @@ static void handle_stripe(struct stripe_head *sh)
return;
}
- if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
+ if (clear_batch_ready(sh) ) {
+ clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
+ return;
+ }
+
+ check_break_stripe_batch_list(sh);
+
+ if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
spin_lock(&sh->stripe_lock);
/* Cannot process 'sync' concurrently with 'discard' */
if (!test_bit(STRIPE_DISCARD, &sh->state) &&
@@ -4158,7 +4610,7 @@ static int raid5_congested(struct mddev *mddev, int bits)
* how busy the stripe_cache is
*/
- if (conf->inactive_blocked)
+ if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
return 1;
if (conf->quiesce)
return 1;
@@ -4180,8 +4632,12 @@ static int raid5_mergeable_bvec(struct mddev *mddev,
unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9;
- if ((bvm->bi_rw & 1) == WRITE)
- return biovec->bv_len; /* always allow writes to be mergeable */
+ /*
+ * always allow writes to be mergeable, read as well if array
+ * is degraded as we'll go through stripe cache anyway.
+ */
+ if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
+ return biovec->bv_len;
if (mddev->new_chunk_sectors < mddev->chunk_sectors)
chunk_sectors = mddev->new_chunk_sectors;
@@ -4603,12 +5059,14 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
}
set_bit(STRIPE_DISCARD, &sh->state);
finish_wait(&conf->wait_for_overlap, &w);
+ sh->overwrite_disks = 0;
for (d = 0; d < conf->raid_disks; d++) {
if (d == sh->pd_idx || d == sh->qd_idx)
continue;
sh->dev[d].towrite = bi;
set_bit(R5_OVERWRITE, &sh->dev[d].flags);
raid5_inc_bi_active_stripes(bi);
+ sh->overwrite_disks++;
}
spin_unlock_irq(&sh->stripe_lock);
if (conf->mddev->bitmap) {
@@ -4656,7 +5114,12 @@ static void make_request(struct mddev *mddev, struct bio * bi)
md_write_start(mddev, bi);
- if (rw == READ &&
+ /*
+ * If array is degraded, better not do chunk aligned read because
+ * later we might have to read it again in order to reconstruct
+ * data on failed drives.
+ */
+ if (rw == READ && mddev->degraded == 0 &&
mddev->reshape_position == MaxSector &&
chunk_aligned_read(mddev,bi))
return;
@@ -4772,7 +5235,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
}
if (test_bit(STRIPE_EXPANDING, &sh->state) ||
- !add_stripe_bio(sh, bi, dd_idx, rw)) {
+ !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
/* Stripe is busy expanding or
* add failed due to overlap. Flush everything
* and wait a while
@@ -4785,7 +5248,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
}
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
- if ((bi->bi_rw & REQ_SYNC) &&
+ if ((!sh->batch_head || sh == sh->batch_head) &&
+ (bi->bi_rw & REQ_SYNC) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
release_stripe_plug(mddev, sh);
@@ -5050,8 +5514,7 @@ ret:
return reshape_sectors;
}
-/* FIXME go_faster isn't used */
-static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
+static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
{
struct r5conf *conf = mddev->private;
struct stripe_head *sh;
@@ -5186,7 +5649,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled;
}
- if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
+ if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
release_stripe(sh);
raid5_set_bi_processed_stripes(raid_bio, scnt);
conf->retry_read_aligned = raid_bio;
@@ -5312,6 +5775,8 @@ static void raid5d(struct md_thread *thread)
int batch_size, released;
released = release_stripe_list(conf, conf->temp_inactive_list);
+ if (released)
+ clear_bit(R5_DID_ALLOC, &conf->cache_state);
if (
!list_empty(&conf->bitmap_list)) {
@@ -5350,6 +5815,13 @@ static void raid5d(struct md_thread *thread)
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
+ if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
+ grow_one_stripe(conf, __GFP_NOWARN);
+ /* Set flag even if allocation failed. This helps
+ * slow down allocation requests when mem is short
+ */
+ set_bit(R5_DID_ALLOC, &conf->cache_state);
+ }
async_tx_issue_pending_all();
blk_finish_plug(&plug);
@@ -5365,7 +5837,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
spin_lock(&mddev->lock);
conf = mddev->private;
if (conf)
- ret = sprintf(page, "%d\n", conf->max_nr_stripes);
+ ret = sprintf(page, "%d\n", conf->min_nr_stripes);
spin_unlock(&mddev->lock);
return ret;
}
@@ -5375,30 +5847,24 @@ raid5_set_cache_size(struct mddev *mddev, int size)
{
struct r5conf *conf = mddev->private;
int err;
- int hash;
if (size <= 16 || size > 32768)
return -EINVAL;
- hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
- while (size < conf->max_nr_stripes) {
- if (drop_one_stripe(conf, hash))
- conf->max_nr_stripes--;
- else
- break;
- hash--;
- if (hash < 0)
- hash = NR_STRIPE_HASH_LOCKS - 1;
- }
+
+ conf->min_nr_stripes = size;
+ while (size < conf->max_nr_stripes &&
+ drop_one_stripe(conf))
+ ;
+
+
err = md_allow_write(mddev);
if (err)
return err;
- hash = conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
- while (size > conf->max_nr_stripes) {
- if (grow_one_stripe(conf, hash))
- conf->max_nr_stripes++;
- else break;
- hash = (hash + 1) % NR_STRIPE_HASH_LOCKS;
- }
+
+ while (size > conf->max_nr_stripes)
+ if (!grow_one_stripe(conf, GFP_KERNEL))
+ break;
+
return 0;
}
EXPORT_SYMBOL(raid5_set_cache_size);
@@ -5433,6 +5899,49 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
raid5_store_stripe_cache_size);
static ssize_t
+raid5_show_rmw_level(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf = mddev->private;
+ if (conf)
+ return sprintf(page, "%d\n", conf->rmw_level);
+ else
+ return 0;
+}
+
+static ssize_t
+raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
+{
+ struct r5conf *conf = mddev->private;
+ unsigned long new;
+
+ if (!conf)
+ return -ENODEV;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+
+ if (kstrtoul(page, 10, &new))
+ return -EINVAL;
+
+ if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
+ return -EINVAL;
+
+ if (new != PARITY_DISABLE_RMW &&
+ new != PARITY_ENABLE_RMW &&
+ new != PARITY_PREFER_RMW)
+ return -EINVAL;
+
+ conf->rmw_level = new;
+ return len;
+}
+
+static struct md_sysfs_entry
+raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
+ raid5_show_rmw_level,
+ raid5_store_rmw_level);
+
+
+static ssize_t
raid5_show_preread_threshold(struct mddev *mddev, char *page)
{
struct r5conf *conf;
@@ -5463,7 +5972,7 @@ raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
conf = mddev->private;
if (!conf)
err = -ENODEV;
- else if (new > conf->max_nr_stripes)
+ else if (new > conf->min_nr_stripes)
err = -EINVAL;
else
conf->bypass_threshold = new;
@@ -5618,6 +6127,7 @@ static struct attribute *raid5_attrs[] = {
&raid5_preread_bypass_threshold.attr,
&raid5_group_thread_cnt.attr,
&raid5_skip_copy.attr,
+ &raid5_rmw_level.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
@@ -5699,7 +6209,8 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
{
safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
+ if (percpu->scribble)
+ flex_array_free(percpu->scribble);
percpu->spare_page = NULL;
percpu->scribble = NULL;
}
@@ -5709,7 +6220,9 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
if (conf->level == 6 && !percpu->spare_page)
percpu->spare_page = alloc_page(GFP_KERNEL);
if (!percpu->scribble)
- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+ percpu->scribble = scribble_alloc(max(conf->raid_disks,
+ conf->previous_raid_disks), conf->chunk_sectors /
+ STRIPE_SECTORS, GFP_KERNEL);
if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
free_scratch_buffer(conf, percpu);
@@ -5740,6 +6253,8 @@ static void raid5_free_percpu(struct r5conf *conf)
static void free_conf(struct r5conf *conf)
{
+ if (conf->shrinker.seeks)
+ unregister_shrinker(&conf->shrinker);
free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
@@ -5807,6 +6322,30 @@ static int raid5_alloc_percpu(struct r5conf *conf)
return err;
}
+static unsigned long raid5_cache_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
+ int ret = 0;
+ while (ret < sc->nr_to_scan) {
+ if (drop_one_stripe(conf) == 0)
+ return SHRINK_STOP;
+ ret++;
+ }
+ return ret;
+}
+
+static unsigned long raid5_cache_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
+
+ if (conf->max_nr_stripes < conf->min_nr_stripes)
+ /* unlikely, but not impossible */
+ return 0;
+ return conf->max_nr_stripes - conf->min_nr_stripes;
+}
+
static struct r5conf *setup_conf(struct mddev *mddev)
{
struct r5conf *conf;
@@ -5879,7 +6418,6 @@ static struct r5conf *setup_conf(struct mddev *mddev)
else
conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
max_disks = max(conf->raid_disks, conf->previous_raid_disks);
- conf->scribble_len = scribble_len(max_disks);
conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
GFP_KERNEL);
@@ -5907,6 +6445,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
INIT_LIST_HEAD(conf->temp_inactive_list + i);
conf->level = mddev->new_level;
+ conf->chunk_sectors = mddev->new_chunk_sectors;
if (raid5_alloc_percpu(conf) != 0)
goto abort;
@@ -5939,12 +6478,17 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->fullsync = 1;
}
- conf->chunk_sectors = mddev->new_chunk_sectors;
conf->level = mddev->new_level;
- if (conf->level == 6)
+ if (conf->level == 6) {
conf->max_degraded = 2;
- else
+ if (raid6_call.xor_syndrome)
+ conf->rmw_level = PARITY_ENABLE_RMW;
+ else
+ conf->rmw_level = PARITY_DISABLE_RMW;
+ } else {
conf->max_degraded = 1;
+ conf->rmw_level = PARITY_ENABLE_RMW;
+ }
conf->algorithm = mddev->new_layout;
conf->reshape_progress = mddev->reshape_position;
if (conf->reshape_progress != MaxSector) {
@@ -5952,10 +6496,11 @@ static struct r5conf *setup_conf(struct mddev *mddev)
conf->prev_algo = mddev->layout;
}
- memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
+ conf->min_nr_stripes = NR_STRIPES;
+ memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
- if (grow_stripes(conf, NR_STRIPES)) {
+ if (grow_stripes(conf, conf->min_nr_stripes)) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate %dkB for buffers\n",
mdname(mddev), memory);
@@ -5963,6 +6508,17 @@ static struct r5conf *setup_conf(struct mddev *mddev)
} else
printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
mdname(mddev), memory);
+ /*
+ * Losing a stripe head costs more than the time to refill it,
+ * it reduces the queue depth and so can hurt throughput.
+ * So set it rather large, scaled by number of devices.
+ */
+ conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
+ conf->shrinker.scan_objects = raid5_cache_scan;
+ conf->shrinker.count_objects = raid5_cache_count;
+ conf->shrinker.batch = 128;
+ conf->shrinker.flags = 0;
+ register_shrinker(&conf->shrinker);
sprintf(pers_name, "raid%d", mddev->new_level);
conf->thread = md_register_thread(raid5d, mddev, pers_name);
@@ -6604,9 +7160,9 @@ static int check_stripe_cache(struct mddev *mddev)
*/
struct r5conf *conf = mddev->private;
if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
- > conf->max_nr_stripes ||
+ > conf->min_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
- > conf->max_nr_stripes) {
+ > conf->min_nr_stripes) {
printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n",
mdname(mddev),
((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 983e18a83db1..7dc0dd86074b 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -210,11 +210,19 @@ struct stripe_head {
atomic_t count; /* nr of active thread/requests */
int bm_seq; /* sequence number for bitmap flushes */
int disks; /* disks in stripe */
+ int overwrite_disks; /* total overwrite disks in stripe,
+ * this is only checked when stripe
+ * has STRIPE_BATCH_READY
+ */
enum check_states check_state;
enum reconstruct_states reconstruct_state;
spinlock_t stripe_lock;
int cpu;
struct r5worker_group *group;
+
+ struct stripe_head *batch_head; /* protected by stripe lock */
+ spinlock_t batch_lock; /* only header's lock is useful */
+ struct list_head batch_list; /* protected by head's batch lock*/
/**
* struct stripe_operations
* @target - STRIPE_OP_COMPUTE_BLK target
@@ -327,8 +335,15 @@ enum {
STRIPE_ON_UNPLUG_LIST,
STRIPE_DISCARD,
STRIPE_ON_RELEASE_LIST,
+ STRIPE_BATCH_READY,
+ STRIPE_BATCH_ERR,
};
+#define STRIPE_EXPAND_SYNC_FLAG \
+ ((1 << STRIPE_EXPAND_SOURCE) |\
+ (1 << STRIPE_EXPAND_READY) |\
+ (1 << STRIPE_EXPANDING) |\
+ (1 << STRIPE_SYNC_REQUESTED))
/*
* Operation request flags
*/
@@ -340,6 +355,24 @@ enum {
STRIPE_OP_RECONSTRUCT,
STRIPE_OP_CHECK,
};
+
+/*
+ * RAID parity calculation preferences
+ */
+enum {
+ PARITY_DISABLE_RMW = 0,
+ PARITY_ENABLE_RMW,
+ PARITY_PREFER_RMW,
+};
+
+/*
+ * Pages requested from set_syndrome_sources()
+ */
+enum {
+ SYNDROME_SRC_ALL,
+ SYNDROME_SRC_WANT_DRAIN,
+ SYNDROME_SRC_WRITTEN,
+};
/*
* Plugging:
*
@@ -396,10 +429,11 @@ struct r5conf {
spinlock_t hash_locks[NR_STRIPE_HASH_LOCKS];
struct mddev *mddev;
int chunk_sectors;
- int level, algorithm;
+ int level, algorithm, rmw_level;
int max_degraded;
int raid_disks;
int max_nr_stripes;
+ int min_nr_stripes;
/* reshape_progress is the leading edge of a 'reshape'
* It has value MaxSector when no reshape is happening
@@ -458,15 +492,11 @@ struct r5conf {
/* per cpu variables */
struct raid5_percpu {
struct page *spare_page; /* Used when checking P/Q in raid6 */
- void *scribble; /* space for constructing buffer
+ struct flex_array *scribble; /* space for constructing buffer
* lists and performing address
* conversions
*/
} __percpu *percpu;
- size_t scribble_len; /* size of scribble region must be
- * associated with conf to handle
- * cpu hotplug while reshaping
- */
#ifdef CONFIG_HOTPLUG_CPU
struct notifier_block cpu_notify;
#endif
@@ -480,9 +510,19 @@ struct r5conf {
struct llist_head released_stripes;
wait_queue_head_t wait_for_stripe;
wait_queue_head_t wait_for_overlap;
- int inactive_blocked; /* release of inactive stripes blocked,
- * waiting for 25% to be free
- */
+ unsigned long cache_state;
+#define R5_INACTIVE_BLOCKED 1 /* release of inactive stripes blocked,
+ * waiting for 25% to be free
+ */
+#define R5_ALLOC_MORE 2 /* It might help to allocate another
+ * stripe.
+ */
+#define R5_DID_ALLOC 4 /* A stripe was allocated, don't allocate
+ * more until at least one has been
+ * released. This avoids flooding
+ * the cache.
+ */
+ struct shrinker shrinker;
int pool_size; /* number of disks in stripeheads in pool */
spinlock_t device_lock;
struct disk_info *disks;
@@ -497,6 +537,7 @@ struct r5conf {
int worker_cnt_per_group;
};
+
/*
* Our supported algorithms
*/
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 10209c294168..efde88adf624 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -12,7 +12,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/amba/xilinx_dma.h>
+#include <linux/dma/xilinx_dma.h>
#include <linux/lcm.h>
#include <linux/list.h>
#include <linux/module.h>
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 69e0483adfee..644dec73d220 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -402,6 +402,12 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
{
struct vb2_dc_buf *buf = buf_priv;
struct dma_buf *dbuf;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &vb2_dc_dmabuf_ops;
+ exp_info.size = buf->size;
+ exp_info.flags = flags;
+ exp_info.priv = buf;
if (!buf->sgt_base)
buf->sgt_base = vb2_dc_get_base_sgt(buf);
@@ -409,7 +415,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
if (WARN_ON(!buf->sgt_base))
return NULL;
- dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags, NULL);
+ dbuf = dma_buf_export(&exp_info);
if (IS_ERR(dbuf))
return NULL;
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index b1838abb6d00..45c708e463b9 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -583,11 +583,17 @@ static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct dma_buf *dbuf;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &vb2_dma_sg_dmabuf_ops;
+ exp_info.size = buf->size;
+ exp_info.flags = flags;
+ exp_info.priv = buf;
if (WARN_ON(!buf->dma_sgt))
return NULL;
- dbuf = dma_buf_export(buf, &vb2_dma_sg_dmabuf_ops, buf->size, flags, NULL);
+ dbuf = dma_buf_export(&exp_info);
if (IS_ERR(dbuf))
return NULL;
diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c
index bcde88572429..657ab302a5cf 100644
--- a/drivers/media/v4l2-core/videobuf2-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c
@@ -368,11 +368,17 @@ static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flag
{
struct vb2_vmalloc_buf *buf = buf_priv;
struct dma_buf *dbuf;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &vb2_vmalloc_dmabuf_ops;
+ exp_info.size = buf->size;
+ exp_info.flags = flags;
+ exp_info.priv = buf;
if (WARN_ON(!buf->vaddr))
return NULL;
- dbuf = dma_buf_export(buf, &vb2_vmalloc_dmabuf_ops, buf->size, flags, NULL);
+ dbuf = dma_buf_export(&exp_info);
if (IS_ERR(dbuf))
return NULL;
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index fc0c81ef04ff..c4aecc6f8373 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -74,15 +74,11 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
ret = ec_dev->cmd_xfer(ec_dev, msg);
if (msg->result == EC_RES_IN_PROGRESS) {
int i;
- struct cros_ec_command status_msg;
- struct ec_response_get_comms_status status;
+ struct cros_ec_command status_msg = { };
+ struct ec_response_get_comms_status *status;
- status_msg.version = 0;
status_msg.command = EC_CMD_GET_COMMS_STATUS;
- status_msg.outdata = NULL;
- status_msg.outsize = 0;
- status_msg.indata = (uint8_t *)&status;
- status_msg.insize = sizeof(status);
+ status_msg.insize = sizeof(*status);
/*
* Query the EC's status until it's no longer busy or
@@ -98,7 +94,10 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
msg->result = status_msg.result;
if (status_msg.result != EC_RES_SUCCESS)
break;
- if (!(status.flags & EC_COMMS_STATUS_PROCESSING))
+
+ status = (struct ec_response_get_comms_status *)
+ status_msg.indata;
+ if (!(status->flags & EC_COMMS_STATUS_PROCESSING))
break;
}
}
@@ -119,6 +118,10 @@ static const struct mfd_cell cros_devs[] = {
.id = 2,
.of_compatible = "google,cros-ec-i2c-tunnel",
},
+ {
+ .name = "cros-ec-ctl",
+ .id = 3,
+ },
};
int cros_ec_register(struct cros_ec_device *ec_dev)
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 072f67066df3..2b6ef6bd5d5f 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -388,7 +388,7 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
{
struct dma_slave_config cfg = { 0, };
struct dma_chan *chan;
- unsigned int slave_id;
+ void *slave_data = NULL;
struct resource *res;
dma_cap_mask_t mask;
int ret;
@@ -397,13 +397,12 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
dma_cap_set(DMA_SLAVE, mask);
if (pdata)
- slave_id = direction == DMA_MEM_TO_DEV
- ? pdata->slave_id_tx : pdata->slave_id_rx;
- else
- slave_id = 0;
+ slave_data = direction == DMA_MEM_TO_DEV ?
+ (void *)pdata->slave_id_tx :
+ (void *)pdata->slave_id_rx;
chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
- (void *)(unsigned long)slave_id, &host->pd->dev,
+ slave_data, &host->pd->dev,
direction == DMA_MEM_TO_DEV ? "tx" : "rx");
dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__,
@@ -414,8 +413,6 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host,
res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
- /* In the OF case the driver will get the slave ID from the DT */
- cfg.slave_id = slave_id;
cfg.direction = direction;
if (direction == DMA_DEV_TO_MEM) {
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 6906a905cd54..354f4f335ed5 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -201,7 +201,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
of_match_device(sh_mobile_sdhi_of_match, &pdev->dev);
struct sh_mobile_sdhi *priv;
struct tmio_mmc_data *mmc_data;
- struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
+ struct tmio_mmc_data *mmd = pdev->dev.platform_data;
struct tmio_mmc_host *host;
struct resource *res;
int irq, ret, i = 0;
@@ -245,30 +245,14 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
else
host->bus_shift = 0;
- mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
- if (p) {
- mmc_data->flags = p->tmio_flags;
- mmc_data->ocr_mask = p->tmio_ocr_mask;
- mmc_data->capabilities |= p->tmio_caps;
- mmc_data->capabilities2 |= p->tmio_caps2;
- mmc_data->cd_gpio = p->cd_gpio;
-
- if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
- /*
- * Yes, we have to provide slave IDs twice to TMIO:
- * once as a filter parameter and once for channel
- * configuration as an explicit slave ID
- */
- dma_priv->chan_priv_tx = (void *)p->dma_slave_tx;
- dma_priv->chan_priv_rx = (void *)p->dma_slave_rx;
- dma_priv->slave_id_tx = p->dma_slave_tx;
- dma_priv->slave_id_rx = p->dma_slave_rx;
- }
- }
+ if (mmd)
+ *mmc_data = *mmd;
+
dma_priv->filter = shdma_chan_filter;
dma_priv->enable = sh_mobile_sdhi_enable_dma;
mmc_data->alignment_shift = 1; /* 2-byte alignment */
+ mmc_data->capabilities |= MMC_CAP_MMC_HIGHSPEED;
/*
* All SDHI blocks support 2-byte and larger block sizes in 4-bit
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index fc3805ed69d1..4a597f5a53e2 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -43,10 +43,6 @@ struct tmio_mmc_data;
struct tmio_mmc_host;
struct tmio_mmc_dma {
- void *chan_priv_tx;
- void *chan_priv_rx;
- int slave_id_tx;
- int slave_id_rx;
enum dma_slave_buswidth dma_buswidth;
bool (*filter)(struct dma_chan *chan, void *arg);
void (*enable)(struct tmio_mmc_host *host, bool enable);
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 331bb618e398..e4b05dbb9ca8 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -261,7 +261,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
{
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (!host->dma || (!host->pdev->dev.of_node &&
- (!host->dma->chan_priv_tx || !host->dma->chan_priv_rx)))
+ (!pdata->chan_priv_tx || !pdata->chan_priv_rx)))
return;
if (!host->chan_tx && !host->chan_rx) {
@@ -278,7 +278,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
dma_cap_set(DMA_SLAVE, mask);
host->chan_tx = dma_request_slave_channel_compat(mask,
- host->dma->filter, host->dma->chan_priv_tx,
+ host->dma->filter, pdata->chan_priv_tx,
&host->pdev->dev, "tx");
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
@@ -286,8 +286,6 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
if (!host->chan_tx)
return;
- if (host->dma->chan_priv_tx)
- cfg.slave_id = host->dma->slave_id_tx;
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift);
cfg.dst_addr_width = host->dma->dma_buswidth;
@@ -299,7 +297,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
goto ecfgtx;
host->chan_rx = dma_request_slave_channel_compat(mask,
- host->dma->filter, host->dma->chan_priv_rx,
+ host->dma->filter, pdata->chan_priv_rx,
&host->pdev->dev, "rx");
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
@@ -307,8 +305,6 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
if (!host->chan_rx)
goto ereqrx;
- if (host->dma->chan_priv_rx)
- cfg.slave_id = host->dma->slave_id_rx;
cfg.direction = DMA_DEV_TO_MEM;
cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset;
cfg.src_addr_width = host->dma->dma_buswidth;
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 71fea895ce38..a03ad2951c7b 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -309,6 +309,19 @@ config MTD_SWAP
The driver provides wear leveling by storing erase counter into the
OOB.
+config MTD_PARTITIONED_MASTER
+ bool "Retain master device when partitioned"
+ default n
+ depends on MTD
+ help
+ For historical reasons, by default, either a master is present or
+ several partitions are present, but not both. The concern was that
+ data listed in multiple partitions was dangerous; however, SCSI does
+ this and it is frequently useful for applications. This config option
+ leaves the master in even if the device is partitioned. It also makes
+ the parent of the partition device be the master device, rather than
+ what lies behind the master.
+
source "drivers/mtd/chips/Kconfig"
source "drivers/mtd/maps/Kconfig"
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 423666b51efb..9a1a6ffd16b8 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -206,23 +206,23 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
}
offset += (ersize * ernum);
- }
+ }
- if (offset != devsize) {
- /* Argh */
- printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
- kfree(mtd->eraseregions);
- kfree(cfi->cmdset_priv);
- kfree(mtd);
- return NULL;
- }
+ if (offset != devsize) {
+ /* Argh */
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ kfree(mtd->eraseregions);
+ kfree(cfi->cmdset_priv);
+ kfree(mtd);
+ return NULL;
+ }
- for (i=0; i<mtd->numeraseregions;i++){
- printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
- i, (unsigned long long)mtd->eraseregions[i].offset,
- mtd->eraseregions[i].erasesize,
- mtd->eraseregions[i].numblocks);
- }
+ for (i=0; i<mtd->numeraseregions;i++){
+ printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
+ i, (unsigned long long)mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+ }
/* Also select the correct geometry setup too */
mtd->_erase = cfi_staa_erase_varsize;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 66f0405f7e53..b16f3cda97ff 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -9,7 +9,15 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/*
+ * When the first attempt at device initialization fails, we may need to
+ * wait a little bit and retry. This timeout, by default 3 seconds, gives
+ * device time to start up. Required on BCM2708 and a few other chipsets.
+ */
+#define MTD_DEFAULT_TIMEOUT 3
+
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
@@ -209,10 +217,14 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
}
-static struct block2mtd_dev *add_device(char *devname, int erase_size)
+static struct block2mtd_dev *add_device(char *devname, int erase_size,
+ int timeout)
{
+#ifndef MODULE
+ int i;
+#endif
const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
- struct block_device *bdev;
+ struct block_device *bdev = ERR_PTR(-ENODEV);
struct block2mtd_dev *dev;
char *name;
@@ -225,15 +237,28 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* Get a handle on the device */
bdev = blkdev_get_by_path(devname, mode, dev);
-#ifndef MODULE
- if (IS_ERR(bdev)) {
-
- /* We might not have rootfs mounted at this point. Try
- to resolve the device name by other means. */
- dev_t devt = name_to_dev_t(devname);
- if (devt)
- bdev = blkdev_get_by_dev(devt, mode, dev);
+#ifndef MODULE
+ /*
+ * We might not have the root device mounted at this point.
+ * Try to resolve the device name by other means.
+ */
+ for (i = 0; IS_ERR(bdev) && i <= timeout; i++) {
+ dev_t devt;
+
+ if (i)
+ /*
+ * Calling wait_for_device_probe in the first loop
+ * was not enough, sleep for a bit in subsequent
+ * go-arounds.
+ */
+ msleep(1000);
+ wait_for_device_probe();
+
+ devt = name_to_dev_t(devname);
+ if (!devt)
+ continue;
+ bdev = blkdev_get_by_dev(devt, mode, dev);
}
#endif
@@ -280,6 +305,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
/* Device didn't get added, so free the entry */
goto err_destroy_mutex;
}
+
list_add(&dev->list, &blkmtd_device_list);
pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
dev->mtd.index,
@@ -348,16 +374,19 @@ static inline void kill_final_newline(char *str)
#ifndef MODULE
static int block2mtd_init_called = 0;
-static char block2mtd_paramline[80 + 12]; /* 80 for device, 12 for erase size */
+/* 80 for device, 12 for erase size */
+static char block2mtd_paramline[80 + 12];
#endif
static int block2mtd_setup2(const char *val)
{
- char buf[80 + 12]; /* 80 for device, 12 for erase size */
+ /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
+ char buf[80 + 12 + 80 + 8];
char *str = buf;
char *token[2];
char *name;
size_t erase_size = PAGE_SIZE;
+ unsigned long timeout = MTD_DEFAULT_TIMEOUT;
int i, ret;
if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
@@ -395,7 +424,7 @@ static int block2mtd_setup2(const char *val)
}
}
- add_device(name, erase_size);
+ add_device(name, erase_size, timeout);
return 0;
}
@@ -463,8 +492,7 @@ static void block2mtd_exit(void)
}
}
-
-module_init(block2mtd_init);
+late_initcall(block2mtd_init);
module_exit(block2mtd_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 448ce42f951e..866d31904475 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1805,7 +1805,7 @@ static int __init doc_dbg_register(struct docg3 *docg3)
}
}
-static void __exit doc_dbg_unregister(struct docg3 *docg3)
+static void doc_dbg_unregister(struct docg3 *docg3)
{
debugfs_remove_recursive(docg3->debugfs_root);
}
@@ -2033,7 +2033,7 @@ static int __init docg3_probe(struct platform_device *pdev)
struct mtd_info *mtd;
struct resource *ress;
void __iomem *base;
- int ret, floor, found = 0;
+ int ret, floor;
struct docg3_cascade *cascade;
ret = -ENXIO;
@@ -2073,14 +2073,11 @@ static int __init docg3_probe(struct platform_device *pdev)
0);
if (ret)
goto err_probe;
- found++;
}
ret = doc_register_sysfs(pdev, cascade);
if (ret)
goto err_probe;
- if (!found)
- goto notfound;
platform_set_drvdata(pdev, cascade);
doc_dbg_register(cascade->floors[0]->priv);
@@ -2103,7 +2100,7 @@ err_probe:
*
* Returns 0
*/
-static int __exit docg3_release(struct platform_device *pdev)
+static int docg3_release(struct platform_device *pdev)
{
struct docg3_cascade *cascade = platform_get_drvdata(pdev);
struct docg3 *docg3 = cascade->floors[0]->priv;
@@ -2134,7 +2131,7 @@ static struct platform_driver g3_driver = {
},
.suspend = docg3_suspend,
.resume = docg3_resume,
- .remove = __exit_p(docg3_release),
+ .remove = docg3_release,
};
module_platform_driver_probe(g3_driver, docg3_probe);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 85e35467fba6..7c8b1694a134 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -223,6 +223,8 @@ static int m25p_probe(struct spi_device *spi)
*/
if (data && data->type)
flash_name = data->type;
+ else if (!strcmp(spi->modalias, "nor-jedec"))
+ flash_name = NULL; /* auto-detect */
else
flash_name = spi->modalias;
@@ -247,9 +249,16 @@ static int m25p_remove(struct spi_device *spi)
}
/*
- * XXX This needs to be kept in sync with spi_nor_ids. We can't share
- * it with spi-nor, because if this is built as a module then modpost
- * won't be able to read it and add appropriate aliases.
+ * Do NOT add to this array without reading the following:
+ *
+ * Historically, many flash devices are bound to this driver by their name. But
+ * since most of these flash are compatible to some extent, and their
+ * differences can often be differentiated by the JEDEC read-ID command, we
+ * encourage new users to add support to the spi-nor library, and simply bind
+ * against a generic string here (e.g., "nor-jedec").
+ *
+ * Many flash names are kept here in this list (as well as in spi-nor.c) to
+ * keep them available as module aliases for existing platforms.
*/
static const struct spi_device_id m25p_ids[] = {
{"at25fs010"}, {"at25fs040"}, {"at25df041a"}, {"at25df321a"},
@@ -291,6 +300,12 @@ static const struct spi_device_id m25p_ids[] = {
{"w25x64"}, {"w25q64"}, {"w25q80"}, {"w25q80bl"},
{"w25q128"}, {"w25q256"}, {"cat25c11"},
{"cat25c03"}, {"cat25c09"}, {"cat25c17"}, {"cat25128"},
+
+ /*
+ * Generic support for SPI NOR that can be identified by the JEDEC READ
+ * ID opcode (0x9F). Use this, if possible.
+ */
+ {"nor-jedec"},
{ },
};
MODULE_DEVICE_TABLE(spi, m25p_ids);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index ba801d2c6dcc..e715ae90632f 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -242,7 +242,7 @@ config MTD_L440GX
config MTD_CFI_FLAGADM
tristate "CFI Flash device mapping on FlagaDM"
- depends on 8xx && MTD_CFI
+ depends on PPC_8xx && MTD_CFI
help
Mapping for the Flaga digital module. If you don't have one, ignore
this setting.
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index ea697202935a..892ad6ac63f2 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -274,7 +274,7 @@ static int sa1100_mtd_probe(struct platform_device *pdev)
return err;
}
-static int __exit sa1100_mtd_remove(struct platform_device *pdev)
+static int sa1100_mtd_remove(struct platform_device *pdev)
{
struct sa_info *info = platform_get_drvdata(pdev);
struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
@@ -286,7 +286,7 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
static struct platform_driver sa1100_mtd_driver = {
.probe = sa1100_mtd_probe,
- .remove = __exit_p(sa1100_mtd_remove),
+ .remove = sa1100_mtd_remove,
.driver = {
.name = "sa1100-mtd",
},
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index d1d671daf235..9969fedb1f13 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -117,5 +117,5 @@ module_exit(cleanup_ts5500_map);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sean Young <sean@mess.org>");
-MODULE_DESCRIPTION("MTD map driver for Techology Systems TS-5500 board");
+MODULE_DESCRIPTION("MTD map driver for Technology Systems TS-5500 board");
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index d08229eb44d8..2b0c52870999 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -171,9 +171,6 @@ static void mtd_blktrans_work(struct work_struct *work)
background_done = 0;
}
- if (req)
- __blk_end_request_all(req, -EIO);
-
spin_unlock_irq(rq->queue_lock);
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 11883bd26d9d..d172195fbd15 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -38,6 +38,7 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/reboot.h>
+#include <linux/kconfig.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -501,6 +502,29 @@ out_error:
return ret;
}
+static int mtd_add_device_partitions(struct mtd_info *mtd,
+ struct mtd_partition *real_parts,
+ int nbparts)
+{
+ int ret;
+
+ if (nbparts == 0 || IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
+ ret = add_mtd_device(mtd);
+ if (ret == 1)
+ return -ENODEV;
+ }
+
+ if (nbparts > 0) {
+ ret = add_mtd_partitions(mtd, real_parts, nbparts);
+ if (ret && IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ del_mtd_device(mtd);
+ return ret;
+ }
+
+ return 0;
+}
+
+
/**
* mtd_device_parse_register - parse partitions and register an MTD device.
*
@@ -523,7 +547,8 @@ out_error:
* found this functions tries to fallback to information specified in
* @parts/@nr_parts.
* * If any partitioning info was found, this function registers the found
- * partitions.
+ * partitions. If the MTD_PARTITIONED_MASTER option is set, then the device
+ * as a whole is registered first.
* * If no partitions were found this function just registers the MTD device
* @mtd and exits.
*
@@ -534,27 +559,21 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
const struct mtd_partition *parts,
int nr_parts)
{
- int err;
- struct mtd_partition *real_parts;
+ int ret;
+ struct mtd_partition *real_parts = NULL;
- err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
- if (err <= 0 && nr_parts && parts) {
+ ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
+ if (ret <= 0 && nr_parts && parts) {
real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
GFP_KERNEL);
if (!real_parts)
- err = -ENOMEM;
+ ret = -ENOMEM;
else
- err = nr_parts;
+ ret = nr_parts;
}
- if (err > 0) {
- err = add_mtd_partitions(mtd, real_parts, err);
- kfree(real_parts);
- } else if (err == 0) {
- err = add_mtd_device(mtd);
- if (err == 1)
- err = -ENODEV;
- }
+ if (ret >= 0)
+ ret = mtd_add_device_partitions(mtd, real_parts, ret);
/*
* FIXME: some drivers unfortunately call this function more than once.
@@ -569,7 +588,8 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
register_reboot_notifier(&mtd->reboot_notifier);
}
- return err;
+ kfree(real_parts);
+ return ret;
}
EXPORT_SYMBOL_GPL(mtd_device_parse_register);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index e779de315ade..cafdb8855a79 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -30,6 +30,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/err.h>
+#include <linux/kconfig.h>
#include "mtdcore.h"
@@ -379,10 +380,17 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd.name = name;
slave->mtd.owner = master->owner;
- /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
- * to have the same data be in two different partitions.
+ /* NOTE: Historically, we didn't arrange MTDs as a tree out of
+ * concern for showing the same data in multiple partitions.
+ * However, it is very useful to have the master node present,
+ * so the MTD_PARTITIONED_MASTER option allows that. The master
+ * will have device nodes etc only if this is set, so make the
+ * parent conditional on that option. Note, this is a way to
+ * distinguish between the master and the partition in sysfs.
*/
- slave->mtd.dev.parent = master->dev.parent;
+ slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) ?
+ &master->dev :
+ master->dev.parent;
slave->mtd._read = part_read;
slave->mtd._write = part_write;
@@ -546,12 +554,35 @@ out_register:
return slave;
}
+static ssize_t mtd_partition_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_part *part = PART(mtd);
+ return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
+}
+
+static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
+
+static const struct attribute *mtd_partition_attrs[] = {
+ &dev_attr_offset.attr,
+ NULL
+};
+
+static int mtd_add_partition_attrs(struct mtd_part *new)
+{
+ int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
+ if (ret)
+ printk(KERN_WARNING
+ "mtd: failed to create partition attrs, err=%d\n", ret);
+ return ret;
+}
+
int mtd_add_partition(struct mtd_info *master, const char *name,
long long offset, long long length)
{
struct mtd_partition part;
- struct mtd_part *p, *new;
- uint64_t start, end;
+ struct mtd_part *new;
int ret = 0;
/* the direct offset is expected */
@@ -575,31 +606,15 @@ int mtd_add_partition(struct mtd_info *master, const char *name,
if (IS_ERR(new))
return PTR_ERR(new);
- start = offset;
- end = offset + length;
-
mutex_lock(&mtd_partitions_mutex);
- list_for_each_entry(p, &mtd_partitions, list)
- if (p->master == master) {
- if ((start >= p->offset) &&
- (start < (p->offset + p->mtd.size)))
- goto err_inv;
-
- if ((end >= p->offset) &&
- (end < (p->offset + p->mtd.size)))
- goto err_inv;
- }
-
list_add(&new->list, &mtd_partitions);
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&new->mtd);
+ mtd_add_partition_attrs(new);
+
return ret;
-err_inv:
- mutex_unlock(&mtd_partitions_mutex);
- free_partition(new);
- return -EINVAL;
}
EXPORT_SYMBOL_GPL(mtd_add_partition);
@@ -612,6 +627,8 @@ int mtd_del_partition(struct mtd_info *master, int partno)
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if ((slave->master == master) &&
(slave->mtd.index == partno)) {
+ sysfs_remove_files(&slave->mtd.dev.kobj,
+ mtd_partition_attrs);
ret = del_mtd_device(&slave->mtd);
if (ret < 0)
break;
@@ -631,8 +648,8 @@ EXPORT_SYMBOL_GPL(mtd_del_partition);
* and registers slave MTD objects which are bound to the master according to
* the partition definitions.
*
- * We don't register the master, or expect the caller to have done so,
- * for reasons of data integrity.
+ * For historical reasons, this function's caller only registers the master
+ * if the MTD_PARTITIONED_MASTER config option is set.
*/
int add_mtd_partitions(struct mtd_info *master,
@@ -655,6 +672,7 @@ int add_mtd_partitions(struct mtd_info *master,
mutex_unlock(&mtd_partitions_mutex);
add_mtd_device(&slave->mtd);
+ mtd_add_partition_attrs(slave);
cur_offset = slave->offset + slave->mtd.size;
}
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index d93c849b70b5..46010bd895b1 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -485,7 +485,7 @@ static void pmecc_config_ecc_layout(struct nand_ecclayout *layout,
for (i = 0; i < ecc_len; i++)
layout->eccpos[i] = oobsize - ecc_len + i;
- layout->oobfree[0].offset = 2;
+ layout->oobfree[0].offset = PMECC_OOB_RESERVED_BYTES;
layout->oobfree[0].length =
oobsize - ecc_len - layout->oobfree[0].offset;
}
@@ -1204,14 +1204,14 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
goto err;
}
- regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev, regs_rom);
- if (IS_ERR(host->pmecc_rom_base)) {
- if (!host->has_no_lookup_table)
- /* Don't display the information again */
+ if (!host->has_no_lookup_table) {
+ regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev,
+ regs_rom);
+ if (IS_ERR(host->pmecc_rom_base)) {
dev_err(host->dev, "Can not get I/O resource for ROM, will build a lookup table in runtime!\n");
-
- host->has_no_lookup_table = true;
+ host->has_no_lookup_table = true;
+ }
}
if (host->has_no_lookup_table) {
@@ -1254,7 +1254,8 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
nand_chip->ecc.steps = mtd->writesize / sector_size;
nand_chip->ecc.total = nand_chip->ecc.bytes *
nand_chip->ecc.steps;
- if (nand_chip->ecc.total > mtd->oobsize - 2) {
+ if (nand_chip->ecc.total >
+ mtd->oobsize - PMECC_OOB_RESERVED_BYTES) {
dev_err(host->dev, "No room for ECC bytes\n");
err_no = -EINVAL;
goto err;
@@ -1719,7 +1720,7 @@ static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
comp[index++] = &host->nfc->comp_cmd_done;
if (index == 0) {
- dev_err(host->dev, "Unkown interrupt flag: 0x%08x\n", flag);
+ dev_err(host->dev, "Unknown interrupt flag: 0x%08x\n", flag);
return -EINVAL;
}
@@ -1752,11 +1753,10 @@ static int nfc_send_command(struct atmel_nand_host *host,
cmd, addr, cycle0);
timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
- while (nfc_cmd_readl(NFCADDR_CMD_NFCBUSY, host->nfc->base_cmd_regs)
- & NFCADDR_CMD_NFCBUSY) {
+ while (nfc_readl(host->nfc->hsmc_regs, SR) & NFC_SR_BUSY) {
if (time_after(jiffies, timeout)) {
dev_err(host->dev,
- "Time out to wait CMD_NFCBUSY ready!\n");
+ "Time out to wait for NFC ready!\n");
return -ETIMEDOUT;
}
}
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
index d4035e335ad8..668e7358f19b 100644
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -152,4 +152,7 @@
/* Time out value for reading PMECC status register */
#define PMECC_MAX_TIMEOUT_MS 100
+/* Reserved bytes in oob area */
+#define PMECC_OOB_RESERVED_BYTES 2
+
#endif
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
index 85b8ca6af7d2..4d5d26221a7e 100644
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -35,6 +35,7 @@
#define NFC_CTRL_DISABLE (1 << 1)
#define ATMEL_HSMC_NFC_SR 0x08 /* NFC Status Register */
+#define NFC_SR_BUSY (1 << 8)
#define NFC_SR_XFR_DONE (1 << 16)
#define NFC_SR_CMD_DONE (1 << 17)
#define NFC_SR_DTOE (1 << 20)
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index f44c6061536a..870c7fc0f759 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -225,7 +225,6 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
- uint16_t TclsRising = 1;
uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
uint16_t dv_window = 0;
uint16_t en_lo, en_hi;
@@ -276,8 +275,6 @@ static void nand_onfi_timing_set(struct denali_nand_info *denali,
re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
- if (!TclsRising)
- cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
if (cs_cnt == 0)
cs_cnt = 1;
@@ -1536,6 +1533,9 @@ int denali_init(struct denali_nand_info *denali)
denali->nand.options |= NAND_SKIP_BBTSCAN;
denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+ /* no subpage writes on denali */
+ denali->nand.options |= NAND_NO_SUBPAGE_WRITE;
+
/*
* Denali Controller only support 15bit and 8bit ECC in MRST,
* so just let controller do 15bit ECC for MLC and 8bit ECC for
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 4c05f4f6a5c6..51394e59901b 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -317,7 +317,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
- IFC_TIMEOUT_MSECS * HZ/1000);
+ msecs_to_jiffies(IFC_TIMEOUT_MSECS));
/* ctrl->nand_stat will be updated from IRQ context */
if (!ctrl->nand_stat)
@@ -860,7 +860,7 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
- IFC_TIMEOUT_MSECS * HZ/1000);
+ msecs_to_jiffies(IFC_TIMEOUT_MSECS));
if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index edfaa21b1817..e58af4bfa8c8 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -873,6 +873,7 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
{
struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
u32 val;
+ int ret;
/* Set default NAND width to 8 bits */
pdata->width = 8;
@@ -891,8 +892,12 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
sizeof(*pdata->nand_timings), GFP_KERNEL);
if (!pdata->nand_timings)
return -ENOMEM;
- of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
+ ret = of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
sizeof(*pdata->nand_timings));
+ if (ret) {
+ dev_info(&pdev->dev, "No timings in dts specified, using default timings!\n");
+ pdata->nand_timings = NULL;
+ }
/* Set default NAND bank to 0 */
pdata->bank = 0;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 33f3c3c54dbc..1b8f3500e6d2 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -446,7 +446,7 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
struct dma_async_tx_descriptor *desc)
{
struct completion *dma_c = &this->dma_done;
- int err;
+ unsigned long timeout;
init_completion(dma_c);
@@ -456,8 +456,8 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
dma_async_issue_pending(get_dma_chan(this));
/* Wait for the interrupt from the DMA block. */
- err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
- if (!err) {
+ timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
+ if (!timeout) {
dev_err(this->dev, "DMA timeout, last DMA :%d\n",
this->last_dma_type);
gpmi_dump_info(this);
@@ -477,7 +477,7 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
struct dma_async_tx_descriptor *desc)
{
struct completion *bch_c = &this->bch_done;
- int err;
+ unsigned long timeout;
/* Prepare to receive an interrupt from the BCH block. */
init_completion(bch_c);
@@ -486,8 +486,8 @@ int start_dma_with_bch_irq(struct gpmi_nand_data *this,
start_dma_without_bch_irq(this, desc);
/* Wait for the interrupt from the BCH block. */
- err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
- if (!err) {
+ timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
+ if (!timeout) {
dev_err(this->dev, "BCH timeout, last DMA :%d\n",
this->last_dma_type);
gpmi_dump_info(this);
@@ -1950,7 +1950,9 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
ret = nand_boot_init(this);
if (ret)
goto err_out;
- chip->scan_bbt(mtd);
+ ret = chip->scan_bbt(mtd);
+ if (ret)
+ goto err_out;
ppdata.of_node = this->pdev->dev.of_node;
ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index a8f550fec35e..372e0e38f59b 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -386,26 +386,51 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
/* This function polls the NANDFC to wait for the basic operation to
* complete by checking the INT bit of config2 register.
*/
-static void wait_op_done(struct mxc_nand_host *host, int useirq)
+static int wait_op_done(struct mxc_nand_host *host, int useirq)
{
- int max_retries = 8000;
+ int ret = 0;
+
+ /*
+ * If operation is already complete, don't bother to setup an irq or a
+ * loop.
+ */
+ if (host->devtype_data->check_int(host))
+ return 0;
if (useirq) {
- if (!host->devtype_data->check_int(host)) {
- reinit_completion(&host->op_completion);
- irq_control(host, 1);
- wait_for_completion(&host->op_completion);
+ unsigned long timeout;
+
+ reinit_completion(&host->op_completion);
+
+ irq_control(host, 1);
+
+ timeout = wait_for_completion_timeout(&host->op_completion, HZ);
+ if (!timeout && !host->devtype_data->check_int(host)) {
+ dev_dbg(host->dev, "timeout waiting for irq\n");
+ ret = -ETIMEDOUT;
}
} else {
- while (max_retries-- > 0) {
- if (host->devtype_data->check_int(host))
- break;
+ int max_retries = 8000;
+ int done;
+ do {
udelay(1);
+
+ done = host->devtype_data->check_int(host);
+ if (done)
+ break;
+
+ } while (--max_retries);
+
+ if (!done) {
+ dev_dbg(host->dev, "timeout polling for completion\n");
+ ret = -ETIMEDOUT;
}
- if (max_retries < 0)
- pr_debug("%s: INT not set\n", __func__);
}
+
+ WARN_ONCE(ret < 0, "timeout! useirq=%d\n", useirq);
+
+ return ret;
}
static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
@@ -527,30 +552,17 @@ static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
static void send_read_id_v3(struct mxc_nand_host *host)
{
- struct nand_chip *this = &host->nand;
-
/* Read ID into main buffer */
writel(NFC_ID, NFC_V3_LAUNCH);
wait_op_done(host, true);
memcpy32_fromio(host->data_buf, host->main_area0, 16);
-
- if (this->options & NAND_BUSWIDTH_16) {
- /* compress the ID info */
- host->data_buf[1] = host->data_buf[2];
- host->data_buf[2] = host->data_buf[4];
- host->data_buf[3] = host->data_buf[6];
- host->data_buf[4] = host->data_buf[8];
- host->data_buf[5] = host->data_buf[10];
- }
}
/* Request the NANDFC to perform a read of the NAND device ID. */
static void send_read_id_v1_v2(struct mxc_nand_host *host)
{
- struct nand_chip *this = &host->nand;
-
/* NANDFC buffer 0 is used for device ID output */
writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
@@ -560,15 +572,6 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
wait_op_done(host, true);
memcpy32_fromio(host->data_buf, host->main_area0, 16);
-
- if (this->options & NAND_BUSWIDTH_16) {
- /* compress the ID info */
- host->data_buf[1] = host->data_buf[2];
- host->data_buf[2] = host->data_buf[4];
- host->data_buf[3] = host->data_buf[6];
- host->data_buf[4] = host->data_buf[8];
- host->data_buf[5] = host->data_buf[10];
- }
}
static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
@@ -694,9 +697,17 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
if (host->status_request)
return host->devtype_data->get_dev_status(host) & 0xFF;
- ret = *(uint8_t *)(host->data_buf + host->buf_start);
- host->buf_start++;
+ if (nand_chip->options & NAND_BUSWIDTH_16) {
+ /* only take the lower byte of each word */
+ ret = *(uint16_t *)(host->data_buf + host->buf_start);
+
+ host->buf_start += 2;
+ } else {
+ ret = *(uint8_t *)(host->data_buf + host->buf_start);
+ host->buf_start++;
+ }
+ pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
return ret;
}
@@ -825,6 +836,12 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
}
}
+/*
+ * MXC NANDFC can only perform full page+spare or spare-only read/write. When
+ * the upper layers perform a read/write buf operation, the saved column address
+ * is used to index into the full page. So usually this function is called with
+ * column == 0 (unless no column cycle is needed indicated by column == -1)
+ */
static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
{
struct nand_chip *nand_chip = mtd->priv;
@@ -832,16 +849,13 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
/* Write out column address, if necessary */
if (column != -1) {
- /*
- * MXC NANDFC can only perform full page+spare or
- * spare-only read/write. When the upper layers
- * perform a read/write buf operation, the saved column
- * address is used to index into the full page.
- */
- host->devtype_data->send_addr(host, 0, page_addr == -1);
+ host->devtype_data->send_addr(host, column & 0xff,
+ page_addr == -1);
if (mtd->writesize > 512)
/* another col addr cycle for 2k page */
- host->devtype_data->send_addr(host, 0, false);
+ host->devtype_data->send_addr(host,
+ (column >> 8) & 0xff,
+ false);
}
/* Write out page address, if necessary */
@@ -903,7 +917,7 @@ static void preset_v1(struct mtd_info *mtd)
struct mxc_nand_host *host = nand_chip->priv;
uint16_t config1 = 0;
- if (nand_chip->ecc.mode == NAND_ECC_HW)
+ if (nand_chip->ecc.mode == NAND_ECC_HW && mtd->writesize)
config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
if (!host->devtype_data->irqpending_quirk)
@@ -931,9 +945,6 @@ static void preset_v2(struct mtd_info *mtd)
struct mxc_nand_host *host = nand_chip->priv;
uint16_t config1 = 0;
- if (nand_chip->ecc.mode == NAND_ECC_HW)
- config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
-
config1 |= NFC_V2_CONFIG1_FP_INT;
if (!host->devtype_data->irqpending_quirk)
@@ -942,6 +953,9 @@ static void preset_v2(struct mtd_info *mtd)
if (mtd->writesize) {
uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
+ if (nand_chip->ecc.mode == NAND_ECC_HW)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
host->eccsize = get_eccsize(mtd);
if (host->eccsize == 4)
config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
@@ -999,9 +1013,6 @@ static void preset_v3(struct mtd_info *mtd)
NFC_V3_CONFIG2_INT_MSK |
NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
- if (chip->ecc.mode == NAND_ECC_HW)
- config2 |= NFC_V3_CONFIG2_ECC_EN;
-
addr_phases = fls(chip->pagemask) >> 3;
if (mtd->writesize == 2048) {
@@ -1016,6 +1027,9 @@ static void preset_v3(struct mtd_info *mtd)
}
if (mtd->writesize) {
+ if (chip->ecc.mode == NAND_ECC_HW)
+ config2 |= NFC_V3_CONFIG2_ECC_EN;
+
config2 |= NFC_V3_CONFIG2_PPB(
ffs(mtd->erasesize / mtd->writesize) - 6,
host->devtype_data->ppb_shift);
@@ -1066,6 +1080,9 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
host->status_request = true;
host->devtype_data->send_cmd(host, command, true);
+ WARN_ONCE(column != -1 || page_addr != -1,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
@@ -1079,7 +1096,10 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
command = NAND_CMD_READ0; /* only READ0 is valid */
host->devtype_data->send_cmd(host, command, false);
- mxc_do_addr_cycle(mtd, column, page_addr);
+ WARN_ONCE(column < 0,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
+ mxc_do_addr_cycle(mtd, 0, page_addr);
if (mtd->writesize > 512)
host->devtype_data->send_cmd(host,
@@ -1100,7 +1120,10 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
host->buf_start = column;
host->devtype_data->send_cmd(host, command, false);
- mxc_do_addr_cycle(mtd, column, page_addr);
+ WARN_ONCE(column < -1,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
+ mxc_do_addr_cycle(mtd, 0, page_addr);
break;
case NAND_CMD_PAGEPROG:
@@ -1108,6 +1131,9 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
copy_spare(mtd, false);
host->devtype_data->send_page(mtd, NFC_INPUT);
host->devtype_data->send_cmd(host, command, true);
+ WARN_ONCE(column != -1 || page_addr != -1,
+ "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+ command, column, page_addr);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
@@ -1115,15 +1141,29 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
host->devtype_data->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
host->devtype_data->send_read_id(host);
- host->buf_start = column;
+ host->buf_start = 0;
break;
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
host->devtype_data->send_cmd(host, command, false);
+ WARN_ONCE(column != -1,
+ "Unexpected column value (cmd=%u, col=%d)\n",
+ command, column);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
+ case NAND_CMD_PARAM:
+ host->devtype_data->send_cmd(host, command, false);
+ mxc_do_addr_cycle(mtd, column, page_addr);
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
+ memcpy32_fromio(host->data_buf, host->main_area0, 512);
+ host->buf_start = 0;
+ break;
+ default:
+ WARN_ONCE(1, "Unimplemented command (cmd=%u)\n",
+ command);
+ break;
}
}
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index df7eb4ff07d1..c2e1232cd45c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -386,7 +386,7 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
uint8_t buf[2] = { 0, 0 };
int ret = 0, res, i = 0;
- ops.datbuf = NULL;
+ memset(&ops, 0, sizeof(ops));
ops.oobbuf = buf;
ops.ooboffs = chip->badblockpos;
if (chip->options & NAND_BUSWIDTH_16) {
@@ -566,6 +566,25 @@ void nand_wait_ready(struct mtd_info *mtd)
EXPORT_SYMBOL_GPL(nand_wait_ready);
/**
+ * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
+ * @mtd: MTD device structure
+ * @timeo: Timeout in ms
+ *
+ * Wait for status ready (i.e. command done) or timeout.
+ */
+static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
+{
+ register struct nand_chip *chip = mtd->priv;
+
+ timeo = jiffies + msecs_to_jiffies(timeo);
+ do {
+ if ((chip->read_byte(mtd) & NAND_STATUS_READY))
+ break;
+ touch_softlockup_watchdog();
+ } while (time_before(jiffies, timeo));
+};
+
+/**
* nand_command - [DEFAULT] Send command to NAND device
* @mtd: MTD device structure
* @command: the command to be sent
@@ -643,8 +662,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
NAND_CTRL_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd,
NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
- while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
- ;
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(mtd, 250);
return;
/* This applies to read commands */
@@ -740,8 +759,8 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
chip->cmd_ctrl(mtd, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
- while (!(chip->read_byte(mtd) & NAND_STATUS_READY))
- ;
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(mtd, 250);
return;
case NAND_CMD_RNDOUT:
@@ -968,7 +987,7 @@ int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
__func__, (unsigned long long)ofs, len);
if (check_offs_len(mtd, ofs, len))
- ret = -EINVAL;
+ return -EINVAL;
/* Align to last block address if size addresses end of the device */
if (ofs + len == mtd->size)
@@ -1031,7 +1050,7 @@ int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
__func__, (unsigned long long)ofs, len);
if (check_offs_len(mtd, ofs, len))
- ret = -EINVAL;
+ return -EINVAL;
nand_get_device(mtd, FL_LOCKING);
@@ -1716,9 +1735,9 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret;
nand_get_device(mtd, FL_READING);
+ memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = buf;
- ops.oobbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_read_ops(mtd, from, &ops);
*retlen = ops.retlen;
@@ -2124,7 +2143,7 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
/**
- * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
+ * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
* @mtd: mtd info structure
* @chip: nand chip info structure
* @offset: column address of subpage within the page
@@ -2508,9 +2527,9 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING);
+ memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
- ops.oobbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_write_ops(mtd, to, &ops);
@@ -2536,9 +2555,9 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
int ret;
nand_get_device(mtd, FL_WRITING);
+ memset(&ops, 0, sizeof(ops));
ops.len = len;
ops.datbuf = (uint8_t *)buf;
- ops.oobbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
ret = nand_do_write_ops(mtd, to, &ops);
*retlen = ops.retlen;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 10b1f7a4fe50..a4615fcc3d00 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -38,8 +38,8 @@
#include <linux/platform_data/mtd-nand-pxa3xx.h>
-#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
-#define NAND_STOP_DELAY (2 * HZ/50)
+#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
+#define NAND_STOP_DELAY msecs_to_jiffies(40)
#define PAGE_CHUNK_SIZE (2048)
/*
@@ -605,11 +605,24 @@ static void start_data_dma(struct pxa3xx_nand_info *info)
{}
#endif
+static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
+{
+ struct pxa3xx_nand_info *info = data;
+
+ handle_data_pio(info);
+
+ info->state = STATE_CMD_DONE;
+ nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
{
struct pxa3xx_nand_info *info = devid;
unsigned int status, is_completed = 0, is_ready = 0;
unsigned int ready, cmd_done;
+ irqreturn_t ret = IRQ_HANDLED;
if (info->cs == 0) {
ready = NDSR_FLASH_RDY;
@@ -651,7 +664,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
} else {
info->state = (status & NDSR_RDDREQ) ?
STATE_PIO_READING : STATE_PIO_WRITING;
- handle_data_pio(info);
+ ret = IRQ_WAKE_THREAD;
+ goto NORMAL_IRQ_EXIT;
}
}
if (status & cmd_done) {
@@ -692,7 +706,7 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
if (is_ready)
complete(&info->dev_ready);
NORMAL_IRQ_EXIT:
- return IRQ_HANDLED;
+ return ret;
}
static inline int is_buf_blank(uint8_t *buf, size_t len)
@@ -951,7 +965,7 @@ static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
{
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
- int ret, exec_cmd;
+ int exec_cmd;
/*
* if this is a x16 device ,then convert the input
@@ -983,9 +997,8 @@ static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
info->need_wait = 1;
pxa3xx_nand_start(info);
- ret = wait_for_completion_timeout(&info->cmd_complete,
- CHIP_DELAY_TIMEOUT);
- if (!ret) {
+ if (!wait_for_completion_timeout(&info->cmd_complete,
+ CHIP_DELAY_TIMEOUT)) {
dev_err(&info->pdev->dev, "Wait time out!!!\n");
/* Stop State Machine for next command cycle */
pxa3xx_nand_stop(info);
@@ -1000,7 +1013,7 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
{
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
- int ret, exec_cmd, ext_cmd_type;
+ int exec_cmd, ext_cmd_type;
/*
* if this is a x16 device then convert the input
@@ -1063,9 +1076,8 @@ static void nand_cmdfunc_extended(struct mtd_info *mtd,
init_completion(&info->cmd_complete);
pxa3xx_nand_start(info);
- ret = wait_for_completion_timeout(&info->cmd_complete,
- CHIP_DELAY_TIMEOUT);
- if (!ret) {
+ if (!wait_for_completion_timeout(&info->cmd_complete,
+ CHIP_DELAY_TIMEOUT)) {
dev_err(&info->pdev->dev, "Wait time out!!!\n");
/* Stop State Machine for next command cycle */
pxa3xx_nand_stop(info);
@@ -1198,13 +1210,11 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
{
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
- int ret;
if (info->need_wait) {
- ret = wait_for_completion_timeout(&info->dev_ready,
- CHIP_DELAY_TIMEOUT);
info->need_wait = 0;
- if (!ret) {
+ if (!wait_for_completion_timeout(&info->dev_ready,
+ CHIP_DELAY_TIMEOUT)) {
dev_err(&info->pdev->dev, "Ready time out!!!\n");
return NAND_STATUS_FAIL;
}
@@ -1508,6 +1518,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
return ret;
}
+ memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
+
pxa3xx_flash_ids[0].name = f->name;
pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
pxa3xx_flash_ids[0].pagesize = f->page_size;
@@ -1710,7 +1722,9 @@ static int alloc_nand_resource(struct platform_device *pdev)
/* initialize all interrupts to be disabled */
disable_int(info, NDSR_MASK);
- ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
+ ret = request_threaded_irq(irq, pxa3xx_nand_irq,
+ pxa3xx_nand_irq_thread, IRQF_ONESHOT,
+ pdev->name, info);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ\n");
goto fail_free_buf;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 35aef5edb588..0e02be47ce1d 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -948,8 +948,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
cpu_type = platform_get_device_id(pdev)->driver_data;
- pr_debug("s3c2410_nand_probe(%p)\n", pdev);
-
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
@@ -1045,7 +1043,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
}
- pr_debug("initialised ok\n");
return 0;
exit_error:
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index a21c378f096a..c3ce81c1a716 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -159,7 +159,6 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
return;
memset(&cfg, 0, sizeof(cfg));
- cfg.slave_id = pdata->slave_id_fifo0_tx;
cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
cfg.src_addr = 0;
@@ -175,7 +174,6 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
if (!flctl->chan_fifo0_rx)
goto err;
- cfg.slave_id = pdata->slave_id_fifo0_rx;
cfg.direction = DMA_DEV_TO_MEM;
cfg.dst_addr = 0;
cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 635ee0027691..43b3392ffee7 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1743,7 +1743,6 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
struct onenand_chip *this = mtd->priv;
int column, subpage;
int written = 0;
- int ret = 0;
if (this->state == FL_PM_SUSPENDED)
return -EBUSY;
@@ -1786,15 +1785,10 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
onenand_panic_wait(mtd);
/* In partial page write we don't update bufferram */
- onenand_update_bufferram(mtd, to, !ret && !subpage);
+ onenand_update_bufferram(mtd, to, !subpage);
if (ONENAND_IS_2PLANE(this)) {
ONENAND_SET_BUFFERRAM1(this);
- onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage);
- }
-
- if (ret) {
- printk(KERN_ERR "%s: write failed %d\n", __func__, ret);
- break;
+ onenand_update_bufferram(mtd, to + this->writesize, !subpage);
}
written += thislen;
@@ -1808,7 +1802,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
}
*retlen = written;
- return ret;
+ return 0;
}
/**
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 1c7308c2c77d..5d5d36272bb5 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -460,8 +460,7 @@ fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len)
writel((seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, base + QUADSPI_IPCR);
/* Wait for the interrupt. */
- err = wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000));
- if (!err) {
+ if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) {
dev_err(q->dev,
"cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n",
cmd, addr, readl(base + QUADSPI_FR),
@@ -830,27 +829,27 @@ static int fsl_qspi_probe(struct platform_device *pdev)
ret = clk_prepare_enable(q->clk_en);
if (ret) {
- dev_err(dev, "can not enable the qspi_en clock\n");
+ dev_err(dev, "cannot enable the qspi_en clock: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(q->clk);
if (ret) {
- dev_err(dev, "can not enable the qspi clock\n");
+ dev_err(dev, "cannot enable the qspi clock: %d\n", ret);
goto clk_failed;
}
/* find the irq */
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
- dev_err(dev, "failed to get the irq\n");
+ dev_err(dev, "failed to get the irq: %d\n", ret);
goto irq_failed;
}
ret = devm_request_irq(dev, ret,
fsl_qspi_irq_handler, 0, pdev->name, q);
if (ret) {
- dev_err(dev, "failed to request irq.\n");
+ dev_err(dev, "failed to request irq: %d\n", ret);
goto irq_failed;
}
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index b6a5a0c269e1..14a5d2325dac 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -369,17 +369,13 @@ erase_err:
return ret;
}
-static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct mtd_info *mtd = nor->mtd;
uint32_t offset = ofs;
uint8_t status_old, status_new;
int ret = 0;
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
- if (ret)
- return ret;
-
status_old = read_sr(nor);
if (offset < mtd->size - (mtd->size / 2))
@@ -402,26 +398,18 @@ static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
(status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
write_enable(nor);
ret = write_sr(nor, status_new);
- if (ret)
- goto err;
}
-err:
- spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
return ret;
}
-static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
{
- struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ struct mtd_info *mtd = nor->mtd;
uint32_t offset = ofs;
uint8_t status_old, status_new;
int ret = 0;
- ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
- if (ret)
- return ret;
-
status_old = read_sr(nor);
if (offset+len > mtd->size - (mtd->size / 64))
@@ -444,15 +432,41 @@ static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
(status_old & (SR_BP2 | SR_BP1 | SR_BP0))) {
write_enable(nor);
ret = write_sr(nor, status_new);
- if (ret)
- goto err;
}
-err:
+ return ret;
+}
+
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
+ if (ret)
+ return ret;
+
+ ret = nor->flash_lock(nor, ofs, len);
+
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
return ret;
}
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+ int ret;
+
+ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+ if (ret)
+ return ret;
+
+ ret = nor->flash_unlock(nor, ofs, len);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ return ret;
+}
+
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
((kernel_ulong_t)&(struct flash_info) { \
@@ -524,6 +538,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
+ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, 0) },
/* ESMT */
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
@@ -553,6 +568,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) },
{ "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) },
+ { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
@@ -648,6 +664,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
{ "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
@@ -658,6 +675,7 @@ static const struct spi_device_id spi_nor_ids[] = {
{ "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
@@ -1045,6 +1063,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
/* nor protection support for STmicro chips */
if (JEDEC_MFR(info) == CFI_MFR_ST) {
+ nor->flash_lock = stm_lock;
+ nor->flash_unlock = stm_unlock;
+ }
+
+ if (nor->flash_lock && nor->flash_unlock) {
mtd->_lock = spi_nor_lock;
mtd->_unlock = spi_nor_unlock;
}
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index e579f9027c47..79316159eec6 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -9,6 +9,8 @@
#include <linux/slab.h>
#include <linux/mtd/nand_ecc.h>
+#include "mtd_test.h"
+
/*
* Test the implementation for software ECC
*
@@ -274,6 +276,10 @@ static int nand_ecc_test_run(const size_t size)
}
pr_info("ok - %s-%zd\n",
nand_ecc_test[i].name, size);
+
+ err = mtdtest_relax();
+ if (err)
+ break;
}
error:
kfree(error_data);
diff --git a/drivers/mtd/tests/mtd_test.h b/drivers/mtd/tests/mtd_test.h
index f437c776c54f..4b7bee17c924 100644
--- a/drivers/mtd/tests/mtd_test.h
+++ b/drivers/mtd/tests/mtd_test.h
@@ -1,4 +1,16 @@
#include <linux/mtd/mtd.h>
+#include <linux/sched.h>
+
+static inline int mtdtest_relax(void)
+{
+ cond_resched();
+ if (signal_pending(current)) {
+ pr_info("aborting test due to pending signal!\n");
+ return -EINTR;
+ }
+
+ return 0;
+}
int mtdtest_erase_eraseblock(struct mtd_info *mtd, unsigned int ebnum);
int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt,
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c
index 273f7e553954..09a4ccac53a2 100644
--- a/drivers/mtd/tests/nandbiterrs.c
+++ b/drivers/mtd/tests/nandbiterrs.c
@@ -320,6 +320,10 @@ static int overwrite_test(void)
break;
}
+ err = mtdtest_relax();
+ if (err)
+ break;
+
opno++;
}
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 5e061186eab1..8e8525f0202f 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -70,7 +70,7 @@ static int write_eraseblock(int ebnum)
int i;
struct mtd_oob_ops ops;
int err = 0;
- loff_t addr = ebnum * mtd->erasesize;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
@@ -112,7 +112,10 @@ static int write_whole_device(void)
return err;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ return err;
}
pr_info("written %u eraseblocks\n", i);
return 0;
@@ -141,6 +144,31 @@ static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t cou
return bitflips;
}
+/*
+ * Compare with 0xff and show the address, offset and data bytes at
+ * comparison failure. Return number of bitflips encountered.
+ */
+static size_t memffshow(loff_t addr, loff_t offset, const void *cs,
+ size_t count)
+{
+ const unsigned char *su1;
+ int res;
+ size_t i = 0;
+ size_t bitflips = 0;
+
+ for (su1 = cs; 0 < count; ++su1, count--, i++) {
+ res = *su1 ^ 0xff;
+ if (res) {
+ pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0xff diff 0x%x\n",
+ (unsigned long)addr, (unsigned long)offset + i,
+ *su1, res);
+ bitflips += hweight8(res);
+ }
+ }
+
+ return bitflips;
+}
+
static int verify_eraseblock(int ebnum)
{
int i;
@@ -203,6 +231,15 @@ static int verify_eraseblock(int ebnum)
bitflips = memcmpshow(addr, readbuf + use_offset,
writebuf + (use_len_max * i) + use_offset,
use_len);
+
+ /* verify pre-offset area for 0xff */
+ bitflips += memffshow(addr, 0, readbuf, use_offset);
+
+ /* verify post-(use_offset + use_len) area for 0xff */
+ k = use_offset + use_len;
+ bitflips += memffshow(addr, k, readbuf + k,
+ mtd->ecclayout->oobavail - k);
+
if (bitflips > bitflip_limit) {
pr_err("error: verify failed at %#llx\n",
(long long)addr);
@@ -212,34 +249,8 @@ static int verify_eraseblock(int ebnum)
return -1;
}
} else if (bitflips) {
- pr_info("ignoring error as within bitflip_limit\n");
+ pr_info("ignoring errors as within bitflip limit\n");
}
-
- for (k = 0; k < use_offset; ++k)
- if (readbuf[k] != 0xff) {
- pr_err("error: verify 0xff "
- "failed at %#llx\n",
- (long long)addr);
- errcnt += 1;
- if (errcnt > 1000) {
- pr_err("error: too "
- "many errors\n");
- return -1;
- }
- }
- for (k = use_offset + use_len;
- k < mtd->ecclayout->oobavail; ++k)
- if (readbuf[k] != 0xff) {
- pr_err("error: verify 0xff "
- "failed at %#llx\n",
- (long long)addr);
- errcnt += 1;
- if (errcnt > 1000) {
- pr_err("error: too "
- "many errors\n");
- return -1;
- }
- }
}
if (vary_offset)
do_vary_offset();
@@ -310,7 +321,10 @@ static int verify_all_eraseblocks(void)
return err;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ return err;
}
pr_info("verified %u eraseblocks\n", i);
return 0;
@@ -421,7 +435,10 @@ static int __init mtd_oobtest_init(void)
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("verified %u eraseblocks\n", i);
@@ -634,7 +651,11 @@ static int __init mtd_oobtest_init(void)
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+
addr += mtd->writesize;
}
}
@@ -672,7 +693,10 @@ static int __init mtd_oobtest_init(void)
}
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("verified %u eraseblocks\n", i);
diff --git a/drivers/mtd/tests/pagetest.c b/drivers/mtd/tests/pagetest.c
index 88296e888e9d..ba1890d5632c 100644
--- a/drivers/mtd/tests/pagetest.c
+++ b/drivers/mtd/tests/pagetest.c
@@ -407,7 +407,10 @@ static int __init mtd_pagetest_init(void)
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("written %u eraseblocks\n", i);
@@ -422,7 +425,10 @@ static int __init mtd_pagetest_init(void)
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("verified %u eraseblocks\n", i);
diff --git a/drivers/mtd/tests/readtest.c b/drivers/mtd/tests/readtest.c
index a54cf1511114..a3196b750a22 100644
--- a/drivers/mtd/tests/readtest.c
+++ b/drivers/mtd/tests/readtest.c
@@ -190,7 +190,10 @@ static int __init mtd_readtest_init(void)
if (!err)
err = ret;
}
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
if (err)
diff --git a/drivers/mtd/tests/speedtest.c b/drivers/mtd/tests/speedtest.c
index 5ee9f7021020..5a6f31af06f9 100644
--- a/drivers/mtd/tests/speedtest.c
+++ b/drivers/mtd/tests/speedtest.c
@@ -185,7 +185,7 @@ static long calc_speed(void)
(finish.tv_usec - start.tv_usec) / 1000;
if (ms == 0)
return 0;
- k = goodebcnt * (mtd->erasesize / 1024) * 1000;
+ k = (uint64_t)goodebcnt * (mtd->erasesize / 1024) * 1000;
do_div(k, ms);
return k;
}
@@ -269,7 +269,10 @@ static int __init mtd_speedtest_init(void)
err = write_eraseblock(i);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
stop_timing();
speed = calc_speed();
@@ -284,7 +287,10 @@ static int __init mtd_speedtest_init(void)
err = read_eraseblock(i);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
stop_timing();
speed = calc_speed();
@@ -303,7 +309,10 @@ static int __init mtd_speedtest_init(void)
err = write_eraseblock_by_page(i);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
stop_timing();
speed = calc_speed();
@@ -318,7 +327,10 @@ static int __init mtd_speedtest_init(void)
err = read_eraseblock_by_page(i);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
stop_timing();
speed = calc_speed();
@@ -337,7 +349,10 @@ static int __init mtd_speedtest_init(void)
err = write_eraseblock_by_2pages(i);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
stop_timing();
speed = calc_speed();
@@ -352,7 +367,10 @@ static int __init mtd_speedtest_init(void)
err = read_eraseblock_by_2pages(i);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
stop_timing();
speed = calc_speed();
@@ -385,7 +403,11 @@ static int __init mtd_speedtest_init(void)
err = multiblock_erase(i, j);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
+
i += j;
}
stop_timing();
diff --git a/drivers/mtd/tests/stresstest.c b/drivers/mtd/tests/stresstest.c
index c9d42cc2df1b..e509f8aa9a7e 100644
--- a/drivers/mtd/tests/stresstest.c
+++ b/drivers/mtd/tests/stresstest.c
@@ -96,7 +96,7 @@ static int do_read(void)
if (offs + len > mtd->erasesize)
len = mtd->erasesize - offs;
}
- addr = eb * mtd->erasesize + offs;
+ addr = (loff_t)eb * mtd->erasesize + offs;
return mtdtest_read(mtd, addr, len, readbuf);
}
@@ -124,7 +124,7 @@ static int do_write(void)
offsets[eb + 1] = 0;
}
}
- addr = eb * mtd->erasesize + offs;
+ addr = (loff_t)eb * mtd->erasesize + offs;
err = mtdtest_write(mtd, addr, len, writebuf);
if (unlikely(err))
return err;
@@ -221,7 +221,10 @@ static int __init mtd_stresstest_init(void)
err = do_operation();
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("finished, %d operations done\n", op);
diff --git a/drivers/mtd/tests/subpagetest.c b/drivers/mtd/tests/subpagetest.c
index 7b59ef522d5e..aecc6ce5a9e1 100644
--- a/drivers/mtd/tests/subpagetest.c
+++ b/drivers/mtd/tests/subpagetest.c
@@ -95,7 +95,7 @@ static int write_eraseblock2(int ebnum)
loff_t addr = (loff_t)ebnum * mtd->erasesize;
for (k = 1; k < 33; ++k) {
- if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
+ if (addr + (subpgsize * k) > (loff_t)(ebnum + 1) * mtd->erasesize)
break;
prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
@@ -195,7 +195,7 @@ static int verify_eraseblock2(int ebnum)
loff_t addr = (loff_t)ebnum * mtd->erasesize;
for (k = 1; k < 33; ++k) {
- if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
+ if (addr + (subpgsize * k) > (loff_t)(ebnum + 1) * mtd->erasesize)
break;
prandom_bytes_state(&rnd_state, writebuf, subpgsize * k);
clear_data(readbuf, subpgsize * k);
@@ -269,7 +269,10 @@ static int verify_all_eraseblocks_ff(void)
return err;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ return err;
}
pr_info("verified %u eraseblocks\n", i);
return 0;
@@ -346,7 +349,10 @@ static int __init mtd_subpagetest_init(void)
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("written %u eraseblocks\n", i);
@@ -360,7 +366,10 @@ static int __init mtd_subpagetest_init(void)
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("verified %u eraseblocks\n", i);
@@ -383,7 +392,10 @@ static int __init mtd_subpagetest_init(void)
goto out;
if (i % 256 == 0)
pr_info("written up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("written %u eraseblocks\n", i);
@@ -398,7 +410,10 @@ static int __init mtd_subpagetest_init(void)
goto out;
if (i % 256 == 0)
pr_info("verified up to eraseblock %u\n", i);
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
pr_info("verified %u eraseblocks\n", i);
diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c
index b55bc52a1340..e5d6e6d9532f 100644
--- a/drivers/mtd/tests/torturetest.c
+++ b/drivers/mtd/tests/torturetest.c
@@ -101,11 +101,11 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
{
int err, retries = 0;
size_t read;
- loff_t addr = ebnum * mtd->erasesize;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
if (pgcnt) {
- addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
+ addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
len = pgcnt * pgsize;
}
@@ -155,11 +155,11 @@ static inline int write_pattern(int ebnum, void *buf)
{
int err;
size_t written;
- loff_t addr = ebnum * mtd->erasesize;
+ loff_t addr = (loff_t)ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
if (pgcnt) {
- addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
+ addr = (loff_t)(ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
len = pgcnt * pgsize;
}
err = mtd_write(mtd, addr, len, &written, buf);
@@ -279,7 +279,10 @@ static int __init tort_init(void)
" for 0xFF... pattern\n");
goto out;
}
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
}
@@ -294,7 +297,10 @@ static int __init tort_init(void)
err = write_pattern(i, patt);
if (err)
goto out;
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
/* Verify what we wrote */
@@ -314,7 +320,10 @@ static int __init tort_init(void)
"0x55AA55..." : "0xAA55AA...");
goto out;
}
- cond_resched();
+
+ err = mtdtest_relax();
+ if (err)
+ goto out;
}
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 9690cf9aaef5..b7f824d5ee88 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1169,9 +1169,9 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
return ERR_PTR(err);
/* MTD device number is defined by the major / minor numbers */
- major = imajor(path.dentry->d_inode);
- minor = iminor(path.dentry->d_inode);
- mode = path.dentry->d_inode->i_mode;
+ major = imajor(d_backing_inode(path.dentry));
+ minor = iminor(d_backing_inode(path.dentry));
+ mode = d_backing_inode(path.dentry)->i_mode;
path_put(&path);
if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
return ERR_PTR(-EINVAL);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 478e00cf2d9e..e844887732fb 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -314,7 +314,7 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
if (error)
return ERR_PTR(error);
- inode = path.dentry->d_inode;
+ inode = d_backing_inode(path.dentry);
mod = inode->i_mode;
ubi_num = ubi_major2num(imajor(inode));
vol_id = iminor(inode) - 1;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 78dde56ae6e6..3a10551d64cf 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -82,6 +82,8 @@
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
+#include "bonding_priv.h"
+
/*---------------------------- Module parameters ----------------------------*/
/* monitor all links that often (in milliseconds). <=0 disables monitoring */
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 62694cfc05b6..b20b35acb47d 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -4,6 +4,7 @@
#include <net/netns/generic.h>
#include <net/bonding.h>
+#include "bonding_priv.h"
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h
new file mode 100644
index 000000000000..5a4d81a9437c
--- /dev/null
+++ b/drivers/net/bonding/bonding_priv.h
@@ -0,0 +1,25 @@
+/*
+ * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
+ *
+ * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
+ * NCM: Network and Communications Management, Inc.
+ *
+ * BUT, I'm the one who modified it for ethernet, so:
+ * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ */
+
+#ifndef _BONDING_PRIV_H
+#define _BONDING_PRIV_H
+
+#define DRV_VERSION "3.7.1"
+#define DRV_RELDATE "April 27, 2011"
+#define DRV_NAME "bonding"
+#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
+
+#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+
+#endif
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4643914859b2..8b17a9065b0b 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1102,7 +1102,7 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
MSG_FLAG_NERR)) {
- netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n",
+ netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
msg->u.rx_can_header.flag);
stats->rx_errors++;
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index b36ee9e0d220..d686b9cac29f 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -523,7 +523,7 @@ static int etherh_addr(char *addr, struct expansion_card *ec)
char *s;
if (!ecard_readchunk(&cd, ec, 0xf5, 0)) {
- printk(KERN_ERR "%s: unable to read podule description string\n",
+ printk(KERN_ERR "%s: unable to read module description string\n",
dev_name(&ec->dev));
goto no_addr;
}
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index eba070f16782..89cd11d86642 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -58,15 +58,12 @@ struct msgdma_extended_desc {
/* Tx buffer control flags
*/
#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
- MSGDMA_DESC_CTL_TR_ERR_IRQ | \
MSGDMA_DESC_CTL_GO)
-#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
- MSGDMA_DESC_CTL_GO)
+#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_GO)
#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
MSGDMA_DESC_CTL_TR_COMP_IRQ | \
- MSGDMA_DESC_CTL_TR_ERR_IRQ | \
MSGDMA_DESC_CTL_GO)
#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 90a76306ad0f..0533c051a3e5 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -777,6 +777,8 @@ static int init_phy(struct net_device *dev)
struct altera_tse_private *priv = netdev_priv(dev);
struct phy_device *phydev;
struct device_node *phynode;
+ bool fixed_link = false;
+ int rc = 0;
/* Avoid init phy in case of no phy present */
if (!priv->phy_iface)
@@ -789,13 +791,32 @@ static int init_phy(struct net_device *dev)
phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
if (!phynode) {
- netdev_dbg(dev, "no phy-handle found\n");
- if (!priv->mdio) {
- netdev_err(dev,
- "No phy-handle nor local mdio specified\n");
- return -ENODEV;
+ /* check if a fixed-link is defined in device-tree */
+ if (of_phy_is_fixed_link(priv->device->of_node)) {
+ rc = of_phy_register_fixed_link(priv->device->of_node);
+ if (rc < 0) {
+ netdev_err(dev, "cannot register fixed PHY\n");
+ return rc;
+ }
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ phynode = of_node_get(priv->device->of_node);
+ fixed_link = true;
+
+ netdev_dbg(dev, "fixed-link detected\n");
+ phydev = of_phy_connect(dev, phynode,
+ &altera_tse_adjust_link,
+ 0, priv->phy_iface);
+ } else {
+ netdev_dbg(dev, "no phy-handle found\n");
+ if (!priv->mdio) {
+ netdev_err(dev, "No phy-handle nor local mdio specified\n");
+ return -ENODEV;
+ }
+ phydev = connect_local_phy(dev);
}
- phydev = connect_local_phy(dev);
} else {
netdev_dbg(dev, "phy-handle found\n");
phydev = of_phy_connect(dev, phynode,
@@ -819,10 +840,10 @@ static int init_phy(struct net_device *dev)
/* Broken HW is sometimes missing the pull-up resistor on the
* MDIO line, which results in reads to non-existent devices returning
* 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
- * device as well.
+ * device as well. If a fixed-link is used the phy_id is always 0.
* Note: phydev->phy_id is the result of reading the UID PHY registers.
*/
- if (phydev->phy_id == 0) {
+ if ((phydev->phy_id == 0) && !fixed_link) {
netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
phy_disconnect(phydev);
return -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 6f7dc81581ff..3558a36b1c2d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2485,8 +2485,10 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
else if (bp->flags & GRO_ENABLE_FLAG)
fp->mode = TPA_MODE_GRO;
- /* We don't want TPA on an FCoE L2 ring */
- if (IS_FCOE_FP(fp))
+ /* We don't want TPA if it's disabled in bp
+ * or if this is an FCoE L2 ring.
+ */
+ if (bp->disable_tpa || IS_FCOE_FP(fp))
fp->disable_tpa = 1;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 1270b189a9a2..069952fa5d64 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18129,7 +18129,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
- tp->pcierr_recovery = true;
+ /* We needn't recover from permanent error */
+ if (state == pci_channel_io_frozen)
+ tp->pcierr_recovery = true;
/* We probably don't have netdev yet */
if (!netdev || !netif_running(netdev))
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index f0285bcbe598..371f75e782e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -538,7 +538,7 @@ static ssize_t tp_la_write(struct file *file, const char __user *buf,
char s[32];
unsigned long val;
size_t size = min(sizeof(s) - 1, count);
- struct adapter *adap = FILE_DATA(file)->i_private;
+ struct adapter *adap = file_inode(file)->i_private;
if (copy_from_user(s, buf, size))
return -EFAULT;
@@ -647,7 +647,7 @@ static int pm_stats_open(struct inode *inode, struct file *file)
static ssize_t pm_stats_clear(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct adapter *adap = FILE_DATA(file)->i_private;
+ struct adapter *adap = file_inode(file)->i_private;
t4_write_reg(adap, PM_RX_STAT_CONFIG_A, 0);
t4_write_reg(adap, PM_TX_STAT_CONFIG_A, 0);
@@ -1005,7 +1005,7 @@ static ssize_t mbox_write(struct file *file, const char __user *buf,
&data[7], &c) < 8 || c != '\n')
return -EINVAL;
- ino = FILE_DATA(file);
+ ino = file_inode(file);
mbox = (uintptr_t)ino->i_private & 7;
adap = ino->i_private - mbox;
addr = adap->regs + PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
@@ -1034,7 +1034,7 @@ static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
loff_t pos = *ppos;
- loff_t avail = FILE_DATA(file)->i_size;
+ loff_t avail = file_inode(file)->i_size;
struct adapter *adap = file->private_data;
if (pos < 0)
@@ -1479,7 +1479,7 @@ static ssize_t rss_key_write(struct file *file, const char __user *buf,
int i, j;
u32 key[10];
char s[100], *p;
- struct adapter *adap = FILE_DATA(file)->i_private;
+ struct adapter *adap = file_inode(file)->i_private;
if (count > sizeof(s) - 1)
return -EINVAL;
@@ -1951,12 +1951,6 @@ static const struct file_operations mem_debugfs_fops = {
.llseek = default_llseek,
};
-static void set_debugfs_file_size(struct dentry *de, loff_t size)
-{
- if (!IS_ERR(de) && de->d_inode)
- de->d_inode->i_size = size;
-}
-
static void add_debugfs_mem(struct adapter *adap, const char *name,
unsigned int idx, unsigned int size_mb)
{
@@ -2072,9 +2066,8 @@ int t4_setup_debugfs(struct adapter *adap)
}
}
- de = debugfs_create_file("flash", S_IRUSR, adap->debugfs_root, adap,
- &flash_debugfs_fops);
- set_debugfs_file_size(de, adap->params.sf_size);
+ de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
+ &flash_debugfs_fops, adap->params.sf_size);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
index 8f418ba868bd..23f43a0f8950 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -37,8 +37,6 @@
#include <linux/export.h>
-#define FILE_DATA(_file) ((_file)->f_path.dentry->d_inode)
-
#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \
static int name##_open(struct inode *inode, struct file *file) \
{ \
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 291c87036e17..2a0dc127df3f 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3347,7 +3347,7 @@ static int ehea_register_memory_hooks(void)
{
int ret = 0;
- if (atomic_inc_and_test(&ehea_memory_hooks_registered))
+ if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
return 0;
ret = ehea_create_busmap();
@@ -3381,12 +3381,14 @@ out3:
out2:
unregister_reboot_notifier(&ehea_reboot_nb);
out:
+ atomic_dec(&ehea_memory_hooks_registered);
return ret;
}
static void ehea_unregister_memory_hooks(void)
{
- if (atomic_read(&ehea_memory_hooks_registered))
+ /* Only remove the hooks if we've registered them */
+ if (atomic_read(&ehea_memory_hooks_registered) == 0)
return;
unregister_reboot_notifier(&ehea_reboot_nb);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index af829c578400..7ace07dad6a3 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!np) {
dev_err(&pdev->dev, "missing phy-handle\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err_netdev;
}
of_property_read_u32(np, "reg", &pep->phy_addr);
pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
@@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->smi_bus = mdiobus_alloc();
if (pep->smi_bus == NULL) {
err = -ENOMEM;
- goto err_base;
+ goto err_netdev;
}
pep->smi_bus->priv = pep;
pep->smi_bus->name = "pxa168_eth smi";
@@ -1551,13 +1552,10 @@ err_mdiobus:
mdiobus_unregister(pep->smi_bus);
err_free_mdio:
mdiobus_free(pep->smi_bus);
-err_base:
- iounmap(pep->base);
err_netdev:
free_netdev(dev);
err_clk:
- clk_disable(clk);
- clk_put(clk);
+ clk_disable_unprepare(clk);
return err;
}
@@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev)
if (pep->phy)
phy_disconnect(pep->phy);
if (pep->clk) {
- clk_disable(pep->clk);
- clk_put(pep->clk);
- pep->clk = NULL;
+ clk_disable_unprepare(pep->clk);
}
- iounmap(pep->base);
- pep->base = NULL;
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index f0fbb4ade85d..4f7dc044601e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -939,21 +939,34 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
return err;
}
if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
- /* compute slave's gid block */
- smp->attr_mod = cpu_to_be32(slave / 8);
- /* execute cmd */
- err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
- vhcr->in_modifier, opcode_modifier,
- vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
- if (!err) {
- /* if needed, move slave gid to index 0 */
- if (slave % 8)
- memcpy(outsmp->data,
- outsmp->data + (slave % 8) * 8, 8);
- /* delete all other gids */
- memset(outsmp->data + 8, 0, 56);
+ __be64 guid = mlx4_get_admin_guid(dev, slave,
+ port);
+
+ /* set the PF admin guid to the FW/HW burned
+ * GUID, if it wasn't yet set
+ */
+ if (slave == 0 && guid == 0) {
+ smp->attr_mod = 0;
+ err = mlx4_cmd_box(dev,
+ inbox->dma,
+ outbox->dma,
+ vhcr->in_modifier,
+ opcode_modifier,
+ vhcr->op,
+ MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ mlx4_set_admin_guid(dev,
+ *(__be64 *)outsmp->
+ data, slave, port);
+ } else {
+ memcpy(outsmp->data, &guid, 8);
}
- return err;
+
+ /* clean all other gids */
+ memset(outsmp->data + 8, 0, 56);
+ return 0;
}
if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
@@ -2350,6 +2363,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
vf_oper->vport[port].vlan_idx = NO_INDX;
vf_oper->vport[port].mac_idx = NO_INDX;
+ mlx4_set_random_admin_guid(dev, i, port);
}
spin_lock_init(&s_state->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3f44e2bbb982..a2ddf3d75ff8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1102,20 +1102,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
struct mlx4_en_priv *priv = netdev_priv(dev);
/* check if requested function is supported by the device */
- if ((hfunc == ETH_RSS_HASH_TOP &&
- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
- (hfunc == ETH_RSS_HASH_XOR &&
- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
- return -EINVAL;
+ if (hfunc == ETH_RSS_HASH_TOP) {
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
+ return -EINVAL;
+ if (!(dev->features & NETIF_F_RXHASH))
+ en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
+ return 0;
+ } else if (hfunc == ETH_RSS_HASH_XOR) {
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
+ return -EINVAL;
+ if (dev->features & NETIF_F_RXHASH)
+ en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
+ return 0;
+ }
- priv->rss_hash_fn = hfunc;
- if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
- en_warn(priv,
- "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
- if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
- en_warn(priv,
- "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
- return 0;
+ return -EINVAL;
}
static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
@@ -1189,6 +1190,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
priv->prof->rss_rings = rss_rings;
if (key)
memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+ priv->rss_hash_fn = hfunc;
if (port_up) {
err = mlx4_en_start_port(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 190fd624bdfe..2619c9fbf42d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -702,6 +702,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
}
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
+ flr_slave);
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.slave_flr_event_work);
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index acceb75e8c44..ced5ecab5aa7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2260,6 +2260,37 @@ void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
}
EXPORT_SYMBOL_GPL(mlx4_counter_free);
+void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
+}
+EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
+
+__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return priv->mfunc.master.vf_admin[entry].vport[port].guid;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
+
+void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ __be64 guid;
+
+ /* hw GUID */
+ if (entry == 0)
+ return;
+
+ get_random_bytes((char *)&guid, sizeof(guid));
+ guid &= ~(cpu_to_be64(1ULL << 56));
+ guid |= cpu_to_be64(1ULL << 57);
+ priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
+}
+
static int mlx4_setup_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index f30eeb730a86..502d3dd2c888 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -499,6 +499,7 @@ struct mlx4_vport_state {
bool spoofchk;
u32 link_state;
u8 qos_vport;
+ __be64 guid;
};
struct mlx4_vf_admin_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index df2238372ea7..8a64542abc16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -211,26 +211,28 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
return 0;
}
+#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
+
static void free_4k(struct mlx5_core_dev *dev, u64 addr)
{
struct fw_page *fwp;
int n;
- fwp = find_fw_page(dev, addr & PAGE_MASK);
+ fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
if (!fwp) {
mlx5_core_warn(dev, "page not found\n");
return;
}
- n = (addr & ~PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
+ n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
fwp->free_count++;
set_bit(n, &fwp->bitmask);
if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
rb_erase(&fwp->rb_node, &dev->priv.page_root);
if (fwp->free_count != 1)
list_del(&fwp->list);
- dma_unmap_page(&dev->pdev->dev, addr & PAGE_MASK, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
} else if (fwp->free_count == 1) {
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 2bef655279f3..9b7e0a34c98b 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1765,7 +1765,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
ALE_PORT_STATE,
ALE_PORT_STATE_FORWARD);
- if (ndev && slave->open)
+ if (ndev && slave->open &&
+ slave->link_interface != SGMII_LINK_MAC_PHY &&
+ slave->link_interface != XGMII_LINK_MAC_PHY)
netif_carrier_on(ndev);
} else {
writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
@@ -1773,7 +1775,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
ALE_PORT_STATE,
ALE_PORT_STATE_DISABLE);
- if (ndev)
+ if (ndev &&
+ slave->link_interface != SGMII_LINK_MAC_PHY &&
+ slave->link_interface != XGMII_LINK_MAC_PHY)
netif_carrier_off(ndev);
}
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 49ce7ece5af3..c9cb486c753d 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -80,7 +80,8 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
* assume the pin serves as pull-up. If direction is
* output, the default value is high.
*/
- gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low);
+ gpio_set_value_cansleep(bitbang->mdo,
+ 1 ^ bitbang->mdo_active_low);
return;
}
@@ -96,7 +97,8 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low;
+ return gpio_get_value_cansleep(bitbang->mdio) ^
+ bitbang->mdio_active_low;
}
static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -105,9 +107,11 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
container_of(ctrl, struct mdio_gpio_info, ctrl);
if (bitbang->mdo)
- gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low);
+ gpio_set_value_cansleep(bitbang->mdo,
+ what ^ bitbang->mdo_active_low);
else
- gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low);
+ gpio_set_value_cansleep(bitbang->mdio,
+ what ^ bitbang->mdio_active_low);
}
static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -115,7 +119,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low);
+ gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low);
}
static struct mdiobb_ops mdio_gpio_ops = {
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 1a87a585e74d..66edd99bc302 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -12,33 +12,30 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/mdio-mux.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#define DRV_VERSION "1.1"
#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
-#define MDIO_MUX_GPIO_MAX_BITS 8
-
struct mdio_mux_gpio_state {
- struct gpio_desc *gpio[MDIO_MUX_GPIO_MAX_BITS];
- unsigned int num_gpios;
+ struct gpio_descs *gpios;
void *mux_handle;
};
static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
void *data)
{
- int values[MDIO_MUX_GPIO_MAX_BITS];
- unsigned int n;
struct mdio_mux_gpio_state *s = data;
+ int values[s->gpios->ndescs];
+ unsigned int n;
if (current_child == desired_child)
return 0;
- for (n = 0; n < s->num_gpios; n++) {
+ for (n = 0; n < s->gpios->ndescs; n++)
values[n] = (desired_child >> n) & 1;
- }
- gpiod_set_array_cansleep(s->num_gpios, s->gpio, values);
+
+ gpiod_set_array_cansleep(s->gpios->ndescs, s->gpios->desc, values);
return 0;
}
@@ -46,56 +43,33 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
static int mdio_mux_gpio_probe(struct platform_device *pdev)
{
struct mdio_mux_gpio_state *s;
- int num_gpios;
- unsigned int n;
int r;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
- num_gpios = of_gpio_count(pdev->dev.of_node);
- if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
- return -ENODEV;
-
s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
- s->num_gpios = num_gpios;
-
- for (n = 0; n < num_gpios; ) {
- struct gpio_desc *gpio = gpiod_get_index(&pdev->dev, NULL, n,
- GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- r = PTR_ERR(gpio);
- goto err;
- }
- s->gpio[n] = gpio;
- n++;
- }
+ s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(s->gpios))
+ return PTR_ERR(s->gpios);
r = mdio_mux_init(&pdev->dev,
mdio_mux_gpio_switch_fn, &s->mux_handle, s);
- if (r == 0) {
- pdev->dev.platform_data = s;
- return 0;
- }
-err:
- while (n) {
- n--;
- gpiod_put(s->gpio[n]);
+ if (r != 0) {
+ gpiod_put_array(s->gpios);
+ return r;
}
- return r;
+
+ pdev->dev.platform_data = s;
+ return 0;
}
static int mdio_mux_gpio_remove(struct platform_device *pdev)
{
- unsigned int n;
struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev);
mdio_mux_uninit(s->mux_handle);
- for (n = 0; n < s->num_gpios; n++)
- gpiod_put(s->gpio[n]);
+ gpiod_put_array(s->gpios);
return 0;
}
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 911b21602ff2..05005c660d4d 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -478,7 +478,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
struct blkcipher_desc desc = { .tfm = state->arc4 };
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
- int sanity = 0;
struct scatterlist sg_in[1], sg_out[1];
if (isize <= PPP_HDRLEN + MPPE_OVHD) {
@@ -514,31 +513,19 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
"mppe_decompress[%d]: ENCRYPTED bit not set!\n",
state->unit);
state->sanity_errors += 100;
- sanity = 1;
+ goto sanity_error;
}
if (!state->stateful && !flushed) {
printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in "
"stateless mode!\n", state->unit);
state->sanity_errors += 100;
- sanity = 1;
+ goto sanity_error;
}
if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) {
printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on "
"flag packet!\n", state->unit);
state->sanity_errors += 100;
- sanity = 1;
- }
-
- if (sanity) {
- if (state->sanity_errors < SANITY_MAX)
- return DECOMP_ERROR;
- else
- /*
- * Take LCP down if the peer is sending too many bogons.
- * We don't want to do this for a single or just a few
- * instances since it could just be due to packet corruption.
- */
- return DECOMP_FATALERROR;
+ goto sanity_error;
}
/*
@@ -546,6 +533,13 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
*/
if (!state->stateful) {
+ /* Discard late packet */
+ if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE
+ > MPPE_CCOUNT_SPACE / 2) {
+ state->sanity_errors++;
+ goto sanity_error;
+ }
+
/* RFC 3078, sec 8.1. Rekey for every packet. */
while (state->ccount != ccount) {
mppe_rekey(state, 0);
@@ -649,6 +643,16 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
state->sanity_errors >>= 1;
return osize;
+
+sanity_error:
+ if (state->sanity_errors < SANITY_MAX)
+ return DECOMP_ERROR;
+ else
+ /* Take LCP down if the peer is sending too many bogons.
+ * We don't want to do this for a single or just a few
+ * instances since it could just be due to packet corruption.
+ */
+ return DECOMP_FATALERROR;
}
/*
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 1470b5227834..07bb3c8f191b 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -50,7 +50,7 @@ config OF_ADDRESS_PCI
config OF_IRQ
def_bool y
- depends on !SPARC
+ depends on !SPARC && IRQ_DOMAIN
config OF_NET
depends on NETDEVICES
diff --git a/drivers/of/base.c b/drivers/of/base.c
index a1aa0c7dee50..99764db0875a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -568,6 +568,29 @@ bool of_device_is_available(const struct device_node *device)
EXPORT_SYMBOL(of_device_is_available);
/**
+ * of_device_is_big_endian - check if a device has BE registers
+ *
+ * @device: Node to check for endianness
+ *
+ * Returns true if the device has a "big-endian" property, or if the kernel
+ * was compiled for BE *and* the device has a "native-endian" property.
+ * Returns false otherwise.
+ *
+ * Callers would nominally use ioread32be/iowrite32be if
+ * of_device_is_big_endian() == true, or readl/writel otherwise.
+ */
+bool of_device_is_big_endian(const struct device_node *device)
+{
+ if (of_property_read_bool(device, "big-endian"))
+ return true;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
+ of_property_read_bool(device, "native-endian"))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(of_device_is_big_endian);
+
+/**
* of_get_parent - Get a node's parent if any
* @node: Node to get parent
*
@@ -640,8 +663,9 @@ static struct device_node *__of_get_next_child(const struct device_node *node,
* @node: parent node
* @prev: previous child of the parent node, or NULL to get first
*
- * Returns a node pointer with refcount incremented, use
- * of_node_put() on it when done.
+ * Returns a node pointer with refcount incremented, use of_node_put() on
+ * it when done. Returns NULL when prev is the last child. Decrements the
+ * refcount of prev.
*/
struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 3a896c9aeb74..cde35c5d0191 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -109,6 +109,25 @@ int of_fdt_is_compatible(const void *blob,
}
/**
+ * of_fdt_is_big_endian - Return true if given node needs BE MMIO accesses
+ * @blob: A device tree blob
+ * @node: node to test
+ *
+ * Returns true if the node has a "big-endian" property, or if the kernel
+ * was compiled for BE *and* the node has a "native-endian" property.
+ * Returns false otherwise.
+ */
+bool of_fdt_is_big_endian(const void *blob, unsigned long node)
+{
+ if (fdt_getprop(blob, node, "big-endian", NULL))
+ return true;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
+ fdt_getprop(blob, node, "native-endian", NULL))
+ return true;
+ return false;
+}
+
+/**
* of_fdt_match - Return true if node matches a list of compatible values
*/
int of_fdt_match(const void *blob, unsigned long node,
@@ -172,7 +191,7 @@ static void * unflatten_dt_node(void *blob,
if (!pathp)
return mem;
- allocl = l++;
+ allocl = ++l;
/* version 0x10 has a more compact unit name here instead of the full
* path. we accumulate the full path size using "fpsize", we'll rebuild
@@ -879,8 +898,7 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
endp = reg + (l / sizeof(__be32));
- pr_debug("memory scan node %s, reg size %d, data: %x %x %x %x,\n",
- uname, l, reg[0], reg[1], reg[2], reg[3]);
+ pr_debug("memory scan node %s, reg size %d,\n", uname, l);
while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
u64 base, size;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index e844907c9efa..18016341d5a9 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -23,6 +23,8 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/bitops.h>
+
#include "of_private.h"
static struct unittest_results {
@@ -1109,6 +1111,59 @@ static const char *overlay_path(int nr)
static const char *bus_path = "/testcase-data/overlay-node/test-bus";
+/* it is guaranteed that overlay ids are assigned in sequence */
+#define MAX_UNITTEST_OVERLAYS 256
+static unsigned long overlay_id_bits[BITS_TO_LONGS(MAX_UNITTEST_OVERLAYS)];
+static int overlay_first_id = -1;
+
+static void of_unittest_track_overlay(int id)
+{
+ if (overlay_first_id < 0)
+ overlay_first_id = id;
+ id -= overlay_first_id;
+
+ /* we shouldn't need that many */
+ BUG_ON(id >= MAX_UNITTEST_OVERLAYS);
+ overlay_id_bits[BIT_WORD(id)] |= BIT_MASK(id);
+}
+
+static void of_unittest_untrack_overlay(int id)
+{
+ if (overlay_first_id < 0)
+ return;
+ id -= overlay_first_id;
+ BUG_ON(id >= MAX_UNITTEST_OVERLAYS);
+ overlay_id_bits[BIT_WORD(id)] &= ~BIT_MASK(id);
+}
+
+static void of_unittest_destroy_tracked_overlays(void)
+{
+ int id, ret, defers;
+
+ if (overlay_first_id < 0)
+ return;
+
+ /* try until no defers */
+ do {
+ defers = 0;
+ /* remove in reverse order */
+ for (id = MAX_UNITTEST_OVERLAYS - 1; id >= 0; id--) {
+ if (!(overlay_id_bits[BIT_WORD(id)] & BIT_MASK(id)))
+ continue;
+
+ ret = of_overlay_destroy(id + overlay_first_id);
+ if (ret != 0) {
+ defers++;
+ pr_warn("%s: overlay destroy failed for #%d\n",
+ __func__, id + overlay_first_id);
+ continue;
+ }
+
+ overlay_id_bits[BIT_WORD(id)] &= ~BIT_MASK(id);
+ }
+ } while (defers > 0);
+}
+
static int of_unittest_apply_overlay(int unittest_nr, int overlay_nr,
int *overlay_id)
{
@@ -1130,6 +1185,7 @@ static int of_unittest_apply_overlay(int unittest_nr, int overlay_nr,
goto out;
}
id = ret;
+ of_unittest_track_overlay(id);
ret = 0;
@@ -1343,6 +1399,7 @@ static void of_unittest_overlay_6(void)
return;
}
ov_id[i] = ret;
+ of_unittest_track_overlay(ov_id[i]);
}
for (i = 0; i < 2; i++) {
@@ -1367,6 +1424,7 @@ static void of_unittest_overlay_6(void)
PDEV_OVERLAY));
return;
}
+ of_unittest_untrack_overlay(ov_id[i]);
}
for (i = 0; i < 2; i++) {
@@ -1411,6 +1469,7 @@ static void of_unittest_overlay_8(void)
return;
}
ov_id[i] = ret;
+ of_unittest_track_overlay(ov_id[i]);
}
/* now try to remove first overlay (it should fail) */
@@ -1433,6 +1492,7 @@ static void of_unittest_overlay_8(void)
PDEV_OVERLAY));
return;
}
+ of_unittest_untrack_overlay(ov_id[i]);
}
unittest(1, "overlay test %d passed\n", 8);
@@ -1855,6 +1915,8 @@ static void __init of_unittest_overlay(void)
of_unittest_overlay_i2c_cleanup();
#endif
+ of_unittest_destroy_tracked_overlays();
+
out:
of_node_put(bus_np);
}
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 3f493459378f..dd92c5edf219 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -138,22 +138,22 @@ static int __oprofilefs_create_file(struct dentry *root, char const *name,
struct dentry *dentry;
struct inode *inode;
- mutex_lock(&root->d_inode->i_mutex);
+ mutex_lock(&d_inode(root)->i_mutex);
dentry = d_alloc_name(root, name);
if (!dentry) {
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
return -ENOMEM;
}
inode = oprofilefs_get_inode(root->d_sb, S_IFREG | perm);
if (!inode) {
dput(dentry);
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
return -ENOMEM;
}
inode->i_fop = fops;
inode->i_private = priv;
d_add(dentry, inode);
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&d_inode(root)->i_mutex);
return 0;
}
@@ -215,22 +215,22 @@ struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name)
struct dentry *dentry;
struct inode *inode;
- mutex_lock(&parent->d_inode->i_mutex);
+ mutex_lock(&d_inode(parent)->i_mutex);
dentry = d_alloc_name(parent, name);
if (!dentry) {
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
return NULL;
}
inode = oprofilefs_get_inode(parent->d_sb, S_IFDIR | 0755);
if (!inode) {
dput(dentry);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
return NULL;
}
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
d_add(dentry, inode);
- mutex_unlock(&parent->d_inode->i_mutex);
+ mutex_unlock(&d_inode(parent)->i_mutex);
return dentry;
}
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 440ed776efd4..2a6531a5fde8 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -4,7 +4,7 @@
menuconfig CHROME_PLATFORMS
bool "Platform support for Chrome hardware"
- depends on X86
+ depends on X86 || ARM
---help---
Say Y here to get to see options for platform support for
various Chromebooks and Chromeboxes. This option alone does
@@ -16,8 +16,7 @@ if CHROME_PLATFORMS
config CHROMEOS_LAPTOP
tristate "Chrome OS Laptop"
- depends on I2C
- depends on DMI
+ depends on I2C && DMI && X86
---help---
This driver instantiates i2c and smbus devices such as
light sensors and touchpads.
@@ -27,6 +26,7 @@ config CHROMEOS_LAPTOP
config CHROMEOS_PSTORE
tristate "Chrome OS pstore support"
+ depends on X86
---help---
This module instantiates the persistent storage on x86 ChromeOS
devices. It can be used to store away console logs and crash
@@ -38,5 +38,25 @@ config CHROMEOS_PSTORE
If you have a supported Chromebook, choose Y or M here.
The module will be called chromeos_pstore.
+config CROS_EC_CHARDEV
+ tristate "Chrome OS Embedded Controller userspace device interface"
+ depends on MFD_CROS_EC
+ ---help---
+ This driver adds support to talk with the ChromeOS EC from userspace.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called cros_ec_dev.
+
+config CROS_EC_LPC
+ tristate "ChromeOS Embedded Controller (LPC)"
+ depends on MFD_CROS_EC && (X86 || COMPILE_TEST)
+ help
+ If you say Y here, you get support for talking to the ChromeOS EC
+ over an LPC bus. This uses a simple byte-level protocol with a
+ checksum. This is used for userspace access only. The kernel
+ typically has its own communication methods.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_lpc.
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 2b860ca7450f..bd8d8601e875 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,3 +1,6 @@
obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
+cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o cros_ec_lightbar.o
+obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o
+obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index b84fdd6b629b..a04019ab9feb 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -133,12 +133,13 @@ static struct i2c_client *__add_probed_i2c_device(
const char *name,
int bus,
struct i2c_board_info *info,
- const unsigned short *addrs)
+ const unsigned short *alt_addr_list)
{
const struct dmi_device *dmi_dev;
const struct dmi_dev_onboard *dev_data;
struct i2c_adapter *adapter;
- struct i2c_client *client;
+ struct i2c_client *client = NULL;
+ const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END };
if (bus < 0)
return NULL;
@@ -169,8 +170,28 @@ static struct i2c_client *__add_probed_i2c_device(
return NULL;
}
- /* add the i2c device */
- client = i2c_new_probed_device(adapter, info, addrs, NULL);
+ /*
+ * Add the i2c device. If we can't detect it at the primary
+ * address we scan secondary addresses. In any case the client
+ * structure gets assigned primary address.
+ */
+ client = i2c_new_probed_device(adapter, info, addr_list, NULL);
+ if (!client && alt_addr_list) {
+ struct i2c_board_info dummy_info = {
+ I2C_BOARD_INFO("dummy", info->addr),
+ };
+ struct i2c_client *dummy;
+
+ dummy = i2c_new_probed_device(adapter, &dummy_info,
+ alt_addr_list, NULL);
+ if (dummy) {
+ pr_debug("%s %d-%02x is probed at %02x\n",
+ __func__, bus, info->addr, dummy->addr);
+ i2c_unregister_device(dummy);
+ client = i2c_new_device(adapter, info);
+ }
+ }
+
if (!client)
pr_notice("%s failed to register device %d-%02x\n",
__func__, bus, info->addr);
@@ -254,12 +275,10 @@ static struct i2c_client *add_i2c_device(const char *name,
enum i2c_adapter_type type,
struct i2c_board_info *info)
{
- const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END };
-
return __add_probed_i2c_device(name,
find_i2c_adapter_num(type),
info,
- addr_list);
+ NULL);
}
static int setup_cyapa_tp(enum i2c_adapter_type type)
@@ -275,7 +294,6 @@ static int setup_cyapa_tp(enum i2c_adapter_type type)
static int setup_atmel_224s_tp(enum i2c_adapter_type type)
{
const unsigned short addr_list[] = { ATMEL_TP_I2C_BL_ADDR,
- ATMEL_TP_I2C_ADDR,
I2C_CLIENT_END };
if (tp)
return 0;
@@ -289,7 +307,6 @@ static int setup_atmel_224s_tp(enum i2c_adapter_type type)
static int setup_atmel_1664s_ts(enum i2c_adapter_type type)
{
const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
- ATMEL_TS_I2C_ADDR,
I2C_CLIENT_END };
if (ts)
return 0;
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
new file mode 100644
index 000000000000..6090d0b2826f
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -0,0 +1,274 @@
+/*
+ * cros_ec_dev - expose the Chrome OS Embedded Controller to user-space
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+
+#include "cros_ec_dev.h"
+
+/* Device variables */
+#define CROS_MAX_DEV 128
+static struct class *cros_class;
+static int ec_major;
+
+/* Basic communication */
+static int ec_get_version(struct cros_ec_device *ec, char *str, int maxlen)
+{
+ struct ec_response_get_version *resp;
+ static const char * const current_image_name[] = {
+ "unknown", "read-only", "read-write", "invalid",
+ };
+ struct cros_ec_command msg = {
+ .version = 0,
+ .command = EC_CMD_GET_VERSION,
+ .outdata = { 0 },
+ .outsize = 0,
+ .indata = { 0 },
+ .insize = sizeof(*resp),
+ };
+ int ret;
+
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg.result != EC_RES_SUCCESS) {
+ snprintf(str, maxlen,
+ "%s\nUnknown EC version: EC returned %d\n",
+ CROS_EC_DEV_VERSION, msg.result);
+ return 0;
+ }
+
+ resp = (struct ec_response_get_version *)msg.indata;
+ if (resp->current_image >= ARRAY_SIZE(current_image_name))
+ resp->current_image = 3; /* invalid */
+
+ snprintf(str, maxlen, "%s\n%s\n%s\n%s\n", CROS_EC_DEV_VERSION,
+ resp->version_string_ro, resp->version_string_rw,
+ current_image_name[resp->current_image]);
+
+ return 0;
+}
+
+/* Device file ops */
+static int ec_device_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = container_of(inode->i_cdev,
+ struct cros_ec_device, cdev);
+ return 0;
+}
+
+static int ec_device_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static ssize_t ec_device_read(struct file *filp, char __user *buffer,
+ size_t length, loff_t *offset)
+{
+ struct cros_ec_device *ec = filp->private_data;
+ char msg[sizeof(struct ec_response_get_version) +
+ sizeof(CROS_EC_DEV_VERSION)];
+ size_t count;
+ int ret;
+
+ if (*offset != 0)
+ return 0;
+
+ ret = ec_get_version(ec, msg, sizeof(msg));
+ if (ret)
+ return ret;
+
+ count = min(length, strlen(msg));
+
+ if (copy_to_user(buffer, msg, count))
+ return -EFAULT;
+
+ *offset = count;
+ return count;
+}
+
+/* Ioctls */
+static long ec_device_ioctl_xcmd(struct cros_ec_device *ec, void __user *arg)
+{
+ long ret;
+ struct cros_ec_command s_cmd = { };
+
+ if (copy_from_user(&s_cmd, arg, sizeof(s_cmd)))
+ return -EFAULT;
+
+ ret = cros_ec_cmd_xfer(ec, &s_cmd);
+ /* Only copy data to userland if data was received. */
+ if (ret < 0)
+ return ret;
+
+ if (copy_to_user(arg, &s_cmd, sizeof(s_cmd)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long ec_device_ioctl_readmem(struct cros_ec_device *ec, void __user *arg)
+{
+ struct cros_ec_readmem s_mem = { };
+ long num;
+
+ /* Not every platform supports direct reads */
+ if (!ec->cmd_readmem)
+ return -ENOTTY;
+
+ if (copy_from_user(&s_mem, arg, sizeof(s_mem)))
+ return -EFAULT;
+
+ num = ec->cmd_readmem(ec, s_mem.offset, s_mem.bytes, s_mem.buffer);
+ if (num <= 0)
+ return num;
+
+ if (copy_to_user((void __user *)arg, &s_mem, sizeof(s_mem)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long ec_device_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct cros_ec_device *ec = filp->private_data;
+
+ if (_IOC_TYPE(cmd) != CROS_EC_DEV_IOC)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case CROS_EC_DEV_IOCXCMD:
+ return ec_device_ioctl_xcmd(ec, (void __user *)arg);
+ case CROS_EC_DEV_IOCRDMEM:
+ return ec_device_ioctl_readmem(ec, (void __user *)arg);
+ }
+
+ return -ENOTTY;
+}
+
+/* Module initialization */
+static const struct file_operations fops = {
+ .open = ec_device_open,
+ .release = ec_device_release,
+ .read = ec_device_read,
+ .unlocked_ioctl = ec_device_ioctl,
+};
+
+static int ec_device_probe(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ int retval = -ENOTTY;
+ dev_t devno = MKDEV(ec_major, 0);
+
+ /* Instantiate it (and remember the EC) */
+ cdev_init(&ec->cdev, &fops);
+
+ retval = cdev_add(&ec->cdev, devno, 1);
+ if (retval) {
+ dev_err(&pdev->dev, ": failed to add character device\n");
+ return retval;
+ }
+
+ ec->vdev = device_create(cros_class, NULL, devno, ec,
+ CROS_EC_DEV_NAME);
+ if (IS_ERR(ec->vdev)) {
+ retval = PTR_ERR(ec->vdev);
+ dev_err(&pdev->dev, ": failed to create device\n");
+ cdev_del(&ec->cdev);
+ return retval;
+ }
+
+ /* Initialize extra interfaces */
+ ec_dev_sysfs_init(ec);
+ ec_dev_lightbar_init(ec);
+
+ return 0;
+}
+
+static int ec_device_remove(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+
+ ec_dev_lightbar_remove(ec);
+ ec_dev_sysfs_remove(ec);
+ device_destroy(cros_class, MKDEV(ec_major, 0));
+ cdev_del(&ec->cdev);
+ return 0;
+}
+
+static struct platform_driver cros_ec_dev_driver = {
+ .driver = {
+ .name = "cros-ec-ctl",
+ },
+ .probe = ec_device_probe,
+ .remove = ec_device_remove,
+};
+
+static int __init cros_ec_dev_init(void)
+{
+ int ret;
+ dev_t dev = 0;
+
+ cros_class = class_create(THIS_MODULE, "chromeos");
+ if (IS_ERR(cros_class)) {
+ pr_err(CROS_EC_DEV_NAME ": failed to register device class\n");
+ return PTR_ERR(cros_class);
+ }
+
+ /* Get a range of minor numbers (starting with 0) to work with */
+ ret = alloc_chrdev_region(&dev, 0, CROS_MAX_DEV, CROS_EC_DEV_NAME);
+ if (ret < 0) {
+ pr_err(CROS_EC_DEV_NAME ": alloc_chrdev_region() failed\n");
+ goto failed_chrdevreg;
+ }
+ ec_major = MAJOR(dev);
+
+ /* Register the driver */
+ ret = platform_driver_register(&cros_ec_dev_driver);
+ if (ret < 0) {
+ pr_warn(CROS_EC_DEV_NAME ": can't register driver: %d\n", ret);
+ goto failed_devreg;
+ }
+ return 0;
+
+failed_devreg:
+ unregister_chrdev_region(MKDEV(ec_major, 0), CROS_MAX_DEV);
+failed_chrdevreg:
+ class_destroy(cros_class);
+ return ret;
+}
+
+static void __exit cros_ec_dev_exit(void)
+{
+ platform_driver_unregister(&cros_ec_dev_driver);
+ unregister_chrdev(ec_major, CROS_EC_DEV_NAME);
+ class_destroy(cros_class);
+}
+
+module_init(cros_ec_dev_init);
+module_exit(cros_ec_dev_exit);
+
+MODULE_AUTHOR("Bill Richardson <wfrichar@chromium.org>");
+MODULE_DESCRIPTION("Userspace interface to the Chrome OS Embedded Controller");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_dev.h b/drivers/platform/chrome/cros_ec_dev.h
new file mode 100644
index 000000000000..45d67f7e518c
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_dev.h
@@ -0,0 +1,53 @@
+/*
+ * cros_ec_dev - expose the Chrome OS Embedded Controller to userspace
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CROS_EC_DEV_H_
+#define _CROS_EC_DEV_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/mfd/cros_ec.h>
+
+#define CROS_EC_DEV_NAME "cros_ec"
+#define CROS_EC_DEV_VERSION "1.0.0"
+
+/*
+ * @offset: within EC_LPC_ADDR_MEMMAP region
+ * @bytes: number of bytes to read. zero means "read a string" (including '\0')
+ * (at most only EC_MEMMAP_SIZE bytes can be read)
+ * @buffer: where to store the result
+ * ioctl returns the number of bytes read, negative on error
+ */
+struct cros_ec_readmem {
+ uint32_t offset;
+ uint32_t bytes;
+ uint8_t buffer[EC_MEMMAP_SIZE];
+};
+
+#define CROS_EC_DEV_IOC 0xEC
+#define CROS_EC_DEV_IOCXCMD _IOWR(CROS_EC_DEV_IOC, 0, struct cros_ec_command)
+#define CROS_EC_DEV_IOCRDMEM _IOWR(CROS_EC_DEV_IOC, 1, struct cros_ec_readmem)
+
+void ec_dev_sysfs_init(struct cros_ec_device *);
+void ec_dev_sysfs_remove(struct cros_ec_device *);
+
+void ec_dev_lightbar_init(struct cros_ec_device *);
+void ec_dev_lightbar_remove(struct cros_ec_device *);
+
+#endif /* _CROS_EC_DEV_H_ */
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
new file mode 100644
index 000000000000..b4ff47a9069a
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -0,0 +1,367 @@
+/*
+ * cros_ec_lightbar - expose the Chromebook Pixel lightbar to userspace
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "cros_ec_lightbar: " fmt
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "cros_ec_dev.h"
+
+/* Rate-limit the lightbar interface to prevent DoS. */
+static unsigned long lb_interval_jiffies = 50 * HZ / 1000;
+
+static ssize_t interval_msec_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long msec = lb_interval_jiffies * 1000 / HZ;
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", msec);
+}
+
+static ssize_t interval_msec_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long msec;
+
+ if (kstrtoul(buf, 0, &msec))
+ return -EINVAL;
+
+ lb_interval_jiffies = msec * HZ / 1000;
+
+ return count;
+}
+
+static DEFINE_MUTEX(lb_mutex);
+/* Return 0 if able to throttle correctly, error otherwise */
+static int lb_throttle(void)
+{
+ static unsigned long last_access;
+ unsigned long now, next_timeslot;
+ long delay;
+ int ret = 0;
+
+ mutex_lock(&lb_mutex);
+
+ now = jiffies;
+ next_timeslot = last_access + lb_interval_jiffies;
+
+ if (time_before(now, next_timeslot)) {
+ delay = (long)(next_timeslot) - (long)now;
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(delay) > 0) {
+ /* interrupted - just abort */
+ ret = -EINTR;
+ goto out;
+ }
+ now = jiffies;
+ }
+
+ last_access = now;
+out:
+ mutex_unlock(&lb_mutex);
+
+ return ret;
+}
+
+#define INIT_MSG(P, R) { \
+ .command = EC_CMD_LIGHTBAR_CMD, \
+ .outsize = sizeof(*P), \
+ .insize = sizeof(*R), \
+ }
+
+static int get_lightbar_version(struct cros_ec_device *ec,
+ uint32_t *ver_ptr, uint32_t *flg_ptr)
+{
+ struct ec_params_lightbar *param;
+ struct ec_response_lightbar *resp;
+ struct cros_ec_command msg = INIT_MSG(param, resp);
+ int ret;
+
+ param = (struct ec_params_lightbar *)msg.outdata;
+ param->cmd = LIGHTBAR_CMD_VERSION;
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return 0;
+
+ switch (msg.result) {
+ case EC_RES_INVALID_PARAM:
+ /* Pixel had no version command. */
+ if (ver_ptr)
+ *ver_ptr = 0;
+ if (flg_ptr)
+ *flg_ptr = 0;
+ return 1;
+
+ case EC_RES_SUCCESS:
+ resp = (struct ec_response_lightbar *)msg.indata;
+
+ /* Future devices w/lightbars should implement this command */
+ if (ver_ptr)
+ *ver_ptr = resp->version.num;
+ if (flg_ptr)
+ *flg_ptr = resp->version.flags;
+ return 1;
+ }
+
+ /* Anything else (ie, EC_RES_INVALID_COMMAND) - no lightbar */
+ return 0;
+}
+
+static ssize_t version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ uint32_t version, flags;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+ int ret;
+
+ ret = lb_throttle();
+ if (ret)
+ return ret;
+
+ /* This should always succeed, because we check during init. */
+ if (!get_lightbar_version(ec, &version, &flags))
+ return -EIO;
+
+ return scnprintf(buf, PAGE_SIZE, "%d %d\n", version, flags);
+}
+
+static ssize_t brightness_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ec_params_lightbar *param;
+ struct ec_response_lightbar *resp;
+ struct cros_ec_command msg = INIT_MSG(param, resp);
+ int ret;
+ unsigned int val;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+
+ if (kstrtouint(buf, 0, &val))
+ return -EINVAL;
+
+ param = (struct ec_params_lightbar *)msg.outdata;
+ param->cmd = LIGHTBAR_CMD_BRIGHTNESS;
+ param->brightness.num = val;
+ ret = lb_throttle();
+ if (ret)
+ return ret;
+
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg.result != EC_RES_SUCCESS)
+ return -EINVAL;
+
+ return count;
+}
+
+
+/*
+ * We expect numbers, and we'll keep reading until we find them, skipping over
+ * any whitespace (sysfs guarantees that the input is null-terminated). Every
+ * four numbers are sent to the lightbar as <LED,R,G,B>. We fail at the first
+ * parsing error, if we don't parse any numbers, or if we have numbers left
+ * over.
+ */
+static ssize_t led_rgb_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ec_params_lightbar *param;
+ struct ec_response_lightbar *resp;
+ struct cros_ec_command msg = INIT_MSG(param, resp);
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+ unsigned int val[4];
+ int ret, i = 0, j = 0, ok = 0;
+
+ do {
+ /* Skip any whitespace */
+ while (*buf && isspace(*buf))
+ buf++;
+
+ if (!*buf)
+ break;
+
+ ret = sscanf(buf, "%i", &val[i++]);
+ if (ret == 0)
+ return -EINVAL;
+
+ if (i == 4) {
+ param = (struct ec_params_lightbar *)msg.outdata;
+ param->cmd = LIGHTBAR_CMD_RGB;
+ param->rgb.led = val[0];
+ param->rgb.red = val[1];
+ param->rgb.green = val[2];
+ param->rgb.blue = val[3];
+ /*
+ * Throttle only the first of every four transactions,
+ * so that the user can update all four LEDs at once.
+ */
+ if ((j++ % 4) == 0) {
+ ret = lb_throttle();
+ if (ret)
+ return ret;
+ }
+
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg.result != EC_RES_SUCCESS)
+ return -EINVAL;
+
+ i = 0;
+ ok = 1;
+ }
+
+ /* Skip over the number we just read */
+ while (*buf && !isspace(*buf))
+ buf++;
+
+ } while (*buf);
+
+ return (ok && i == 0) ? count : -EINVAL;
+}
+
+static char const *seqname[] = {
+ "ERROR", "S5", "S3", "S0", "S5S3", "S3S0",
+ "S0S3", "S3S5", "STOP", "RUN", "PULSE", "TEST", "KONAMI",
+};
+
+static ssize_t sequence_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ec_params_lightbar *param;
+ struct ec_response_lightbar *resp;
+ struct cros_ec_command msg = INIT_MSG(param, resp);
+ int ret;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+
+ param = (struct ec_params_lightbar *)msg.outdata;
+ param->cmd = LIGHTBAR_CMD_GET_SEQ;
+ ret = lb_throttle();
+ if (ret)
+ return ret;
+
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg.result != EC_RES_SUCCESS)
+ return scnprintf(buf, PAGE_SIZE,
+ "ERROR: EC returned %d\n", msg.result);
+
+ resp = (struct ec_response_lightbar *)msg.indata;
+ if (resp->get_seq.num >= ARRAY_SIZE(seqname))
+ return scnprintf(buf, PAGE_SIZE, "%d\n", resp->get_seq.num);
+ else
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+ seqname[resp->get_seq.num]);
+}
+
+static ssize_t sequence_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ec_params_lightbar *param;
+ struct ec_response_lightbar *resp;
+ struct cros_ec_command msg = INIT_MSG(param, resp);
+ unsigned int num;
+ int ret, len;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+
+ for (len = 0; len < count; len++)
+ if (!isalnum(buf[len]))
+ break;
+
+ for (num = 0; num < ARRAY_SIZE(seqname); num++)
+ if (!strncasecmp(seqname[num], buf, len))
+ break;
+
+ if (num >= ARRAY_SIZE(seqname)) {
+ ret = kstrtouint(buf, 0, &num);
+ if (ret)
+ return ret;
+ }
+
+ param = (struct ec_params_lightbar *)msg.outdata;
+ param->cmd = LIGHTBAR_CMD_SEQ;
+ param->seq.num = num;
+ ret = lb_throttle();
+ if (ret)
+ return ret;
+
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg.result != EC_RES_SUCCESS)
+ return -EINVAL;
+
+ return count;
+}
+
+/* Module initialization */
+
+static DEVICE_ATTR_RW(interval_msec);
+static DEVICE_ATTR_RO(version);
+static DEVICE_ATTR_WO(brightness);
+static DEVICE_ATTR_WO(led_rgb);
+static DEVICE_ATTR_RW(sequence);
+static struct attribute *__lb_cmds_attrs[] = {
+ &dev_attr_interval_msec.attr,
+ &dev_attr_version.attr,
+ &dev_attr_brightness.attr,
+ &dev_attr_led_rgb.attr,
+ &dev_attr_sequence.attr,
+ NULL,
+};
+static struct attribute_group lb_cmds_attr_group = {
+ .name = "lightbar",
+ .attrs = __lb_cmds_attrs,
+};
+
+void ec_dev_lightbar_init(struct cros_ec_device *ec)
+{
+ int ret = 0;
+
+ /* Only instantiate this stuff if the EC has a lightbar */
+ if (!get_lightbar_version(ec, NULL, NULL))
+ return;
+
+ ret = sysfs_create_group(&ec->vdev->kobj, &lb_cmds_attr_group);
+ if (ret)
+ pr_warn("sysfs_create_group() failed: %d\n", ret);
+}
+
+void ec_dev_lightbar_remove(struct cros_ec_device *ec)
+{
+ sysfs_remove_group(&ec->vdev->kobj, &lb_cmds_attr_group);
+}
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
new file mode 100644
index 000000000000..8f9ac4d7bbd0
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -0,0 +1,319 @@
+/*
+ * cros_ec_lpc - LPC access to the Chrome OS Embedded Controller
+ *
+ * Copyright (C) 2012-2015 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver uses the Chrome OS EC byte-level message-based protocol for
+ * communicating the keyboard state (which keys are pressed) from a keyboard EC
+ * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing,
+ * but everything else (including deghosting) is done here. The main
+ * motivation for this is to keep the EC firmware as simple as possible, since
+ * it cannot be easily upgraded and EC flash/IRAM space is relatively
+ * expensive.
+ */
+
+#include <linux/dmi.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+
+#define DRV_NAME "cros_ec_lpc"
+
+static int ec_response_timed_out(void)
+{
+ unsigned long one_second = jiffies + HZ;
+
+ usleep_range(200, 300);
+ do {
+ if (!(inb(EC_LPC_ADDR_HOST_CMD) & EC_LPC_STATUS_BUSY_MASK))
+ return 0;
+ usleep_range(100, 200);
+ } while (time_before(jiffies, one_second));
+
+ return 1;
+}
+
+static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec,
+ struct cros_ec_command *msg)
+{
+ struct ec_lpc_host_args args;
+ int csum;
+ int i;
+ int ret = 0;
+
+ if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE ||
+ msg->insize > EC_PROTO2_MAX_PARAM_SIZE) {
+ dev_err(ec->dev,
+ "invalid buffer sizes (out %d, in %d)\n",
+ msg->outsize, msg->insize);
+ return -EINVAL;
+ }
+
+ /* Now actually send the command to the EC and get the result */
+ args.flags = EC_HOST_ARGS_FLAG_FROM_HOST;
+ args.command_version = msg->version;
+ args.data_size = msg->outsize;
+
+ /* Initialize checksum */
+ csum = msg->command + args.flags +
+ args.command_version + args.data_size;
+
+ /* Copy data and update checksum */
+ for (i = 0; i < msg->outsize; i++) {
+ outb(msg->outdata[i], EC_LPC_ADDR_HOST_PARAM + i);
+ csum += msg->outdata[i];
+ }
+
+ /* Finalize checksum and write args */
+ args.checksum = csum & 0xFF;
+ outb(args.flags, EC_LPC_ADDR_HOST_ARGS);
+ outb(args.command_version, EC_LPC_ADDR_HOST_ARGS + 1);
+ outb(args.data_size, EC_LPC_ADDR_HOST_ARGS + 2);
+ outb(args.checksum, EC_LPC_ADDR_HOST_ARGS + 3);
+
+ /* Here we go */
+ outb(msg->command, EC_LPC_ADDR_HOST_CMD);
+
+ if (ec_response_timed_out()) {
+ dev_warn(ec->dev, "EC responsed timed out\n");
+ ret = -EIO;
+ goto done;
+ }
+
+ /* Check result */
+ msg->result = inb(EC_LPC_ADDR_HOST_DATA);
+
+ switch (msg->result) {
+ case EC_RES_SUCCESS:
+ break;
+ case EC_RES_IN_PROGRESS:
+ ret = -EAGAIN;
+ dev_dbg(ec->dev, "command 0x%02x in progress\n",
+ msg->command);
+ goto done;
+ default:
+ dev_dbg(ec->dev, "command 0x%02x returned %d\n",
+ msg->command, msg->result);
+ }
+
+ /* Read back args */
+ args.flags = inb(EC_LPC_ADDR_HOST_ARGS);
+ args.command_version = inb(EC_LPC_ADDR_HOST_ARGS + 1);
+ args.data_size = inb(EC_LPC_ADDR_HOST_ARGS + 2);
+ args.checksum = inb(EC_LPC_ADDR_HOST_ARGS + 3);
+
+ if (args.data_size > msg->insize) {
+ dev_err(ec->dev,
+ "packet too long (%d bytes, expected %d)",
+ args.data_size, msg->insize);
+ ret = -ENOSPC;
+ goto done;
+ }
+
+ /* Start calculating response checksum */
+ csum = msg->command + args.flags +
+ args.command_version + args.data_size;
+
+ /* Read response and update checksum */
+ for (i = 0; i < args.data_size; i++) {
+ msg->indata[i] = inb(EC_LPC_ADDR_HOST_PARAM + i);
+ csum += msg->indata[i];
+ }
+
+ /* Verify checksum */
+ if (args.checksum != (csum & 0xFF)) {
+ dev_err(ec->dev,
+ "bad packet checksum, expected %02x, got %02x\n",
+ args.checksum, csum & 0xFF);
+ ret = -EBADMSG;
+ goto done;
+ }
+
+ /* Return actual amount of data received */
+ ret = args.data_size;
+done:
+ return ret;
+}
+
+/* Returns num bytes read, or negative on error. Doesn't need locking. */
+static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset,
+ unsigned int bytes, void *dest)
+{
+ int i = offset;
+ char *s = dest;
+ int cnt = 0;
+
+ if (offset >= EC_MEMMAP_SIZE - bytes)
+ return -EINVAL;
+
+ /* fixed length */
+ if (bytes) {
+ for (; cnt < bytes; i++, s++, cnt++)
+ *s = inb(EC_LPC_ADDR_MEMMAP + i);
+ return cnt;
+ }
+
+ /* string */
+ for (; i < EC_MEMMAP_SIZE; i++, s++) {
+ *s = inb(EC_LPC_ADDR_MEMMAP + i);
+ cnt++;
+ if (!*s)
+ break;
+ }
+
+ return cnt;
+}
+
+static int cros_ec_lpc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_device *ec_dev;
+ int ret;
+
+ if (!devm_request_region(dev, EC_LPC_ADDR_MEMMAP, EC_MEMMAP_SIZE,
+ dev_name(dev))) {
+ dev_err(dev, "couldn't reserve memmap region\n");
+ return -EBUSY;
+ }
+
+ if ((inb(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID) != 'E') ||
+ (inb(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID + 1) != 'C')) {
+ dev_err(dev, "EC ID not detected\n");
+ return -ENODEV;
+ }
+
+ if (!devm_request_region(dev, EC_HOST_CMD_REGION0,
+ EC_HOST_CMD_REGION_SIZE, dev_name(dev))) {
+ dev_err(dev, "couldn't reserve region0\n");
+ return -EBUSY;
+ }
+ if (!devm_request_region(dev, EC_HOST_CMD_REGION1,
+ EC_HOST_CMD_REGION_SIZE, dev_name(dev))) {
+ dev_err(dev, "couldn't reserve region1\n");
+ return -EBUSY;
+ }
+
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ec_dev);
+ ec_dev->dev = dev;
+ ec_dev->ec_name = pdev->name;
+ ec_dev->phys_name = dev_name(dev);
+ ec_dev->parent = dev;
+ ec_dev->cmd_xfer = cros_ec_cmd_xfer_lpc;
+ ec_dev->cmd_readmem = cros_ec_lpc_readmem;
+
+ ret = cros_ec_register(ec_dev);
+ if (ret) {
+ dev_err(dev, "couldn't register ec_dev (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_ec_lpc_remove(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec_dev;
+
+ ec_dev = platform_get_drvdata(pdev);
+ cros_ec_remove(ec_dev);
+
+ return 0;
+}
+
+static struct dmi_system_id cros_ec_lpc_dmi_table[] __initdata = {
+ {
+ /*
+ * Today all Chromebooks/boxes ship with Google_* as version and
+ * coreboot as bios vendor. No other systems with this
+ * combination are known to date.
+ */
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_BIOS_VERSION, "Google_"),
+ },
+ },
+ {
+ /* x86-link, the Chromebook Pixel. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ },
+ },
+ {
+ /* x86-peppy, the Acer C720 Chromebook. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"),
+ },
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table);
+
+static struct platform_driver cros_ec_lpc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_ec_lpc_probe,
+ .remove = cros_ec_lpc_remove,
+};
+
+static struct platform_device cros_ec_lpc_device = {
+ .name = DRV_NAME
+};
+
+static int __init cros_ec_lpc_init(void)
+{
+ int ret;
+
+ if (!dmi_check_system(cros_ec_lpc_dmi_table)) {
+ pr_err(DRV_NAME ": unsupported system.\n");
+ return -ENODEV;
+ }
+
+ /* Register the driver */
+ ret = platform_driver_register(&cros_ec_lpc_driver);
+ if (ret) {
+ pr_err(DRV_NAME ": can't register driver: %d\n", ret);
+ return ret;
+ }
+
+ /* Register the device, and it'll get hooked up automatically */
+ ret = platform_device_register(&cros_ec_lpc_device);
+ if (ret) {
+ pr_err(DRV_NAME ": can't register device: %d\n", ret);
+ platform_driver_unregister(&cros_ec_lpc_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit cros_ec_lpc_exit(void)
+{
+ platform_device_unregister(&cros_ec_lpc_device);
+ platform_driver_unregister(&cros_ec_lpc_driver);
+}
+
+module_init(cros_ec_lpc_init);
+module_exit(cros_ec_lpc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS EC LPC driver");
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
new file mode 100644
index 000000000000..fb62ab6cc659
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -0,0 +1,271 @@
+/*
+ * cros_ec_sysfs - expose the Chrome OS EC through sysfs
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "cros_ec_sysfs: " fmt
+
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include "cros_ec_dev.h"
+
+/* Accessor functions */
+
+static ssize_t show_ec_reboot(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int count = 0;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "ro|rw|cancel|cold|disable-jump|hibernate");
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ " [at-shutdown]\n");
+ return count;
+}
+
+static ssize_t store_ec_reboot(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ static const struct {
+ const char * const str;
+ uint8_t cmd;
+ uint8_t flags;
+ } words[] = {
+ {"cancel", EC_REBOOT_CANCEL, 0},
+ {"ro", EC_REBOOT_JUMP_RO, 0},
+ {"rw", EC_REBOOT_JUMP_RW, 0},
+ {"cold", EC_REBOOT_COLD, 0},
+ {"disable-jump", EC_REBOOT_DISABLE_JUMP, 0},
+ {"hibernate", EC_REBOOT_HIBERNATE, 0},
+ {"at-shutdown", -1, EC_REBOOT_FLAG_ON_AP_SHUTDOWN},
+ };
+ struct cros_ec_command msg = { 0 };
+ struct ec_params_reboot_ec *param =
+ (struct ec_params_reboot_ec *)msg.outdata;
+ int got_cmd = 0, offset = 0;
+ int i;
+ int ret;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+
+ param->flags = 0;
+ while (1) {
+ /* Find word to start scanning */
+ while (buf[offset] && isspace(buf[offset]))
+ offset++;
+ if (!buf[offset])
+ break;
+
+ for (i = 0; i < ARRAY_SIZE(words); i++) {
+ if (!strncasecmp(words[i].str, buf+offset,
+ strlen(words[i].str))) {
+ if (words[i].flags) {
+ param->flags |= words[i].flags;
+ } else {
+ param->cmd = words[i].cmd;
+ got_cmd = 1;
+ }
+ break;
+ }
+ }
+
+ /* On to the next word, if any */
+ while (buf[offset] && !isspace(buf[offset]))
+ offset++;
+ }
+
+ if (!got_cmd)
+ return -EINVAL;
+
+ msg.command = EC_CMD_REBOOT_EC;
+ msg.outsize = sizeof(param);
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (msg.result != EC_RES_SUCCESS) {
+ dev_dbg(ec->dev, "EC result %d\n", msg.result);
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t show_ec_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ static const char * const image_names[] = {"unknown", "RO", "RW"};
+ struct ec_response_get_version *r_ver;
+ struct ec_response_get_chip_info *r_chip;
+ struct ec_response_board_version *r_board;
+ struct cros_ec_command msg = { 0 };
+ int ret;
+ int count = 0;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+
+ /* Get versions. RW may change. */
+ msg.command = EC_CMD_GET_VERSION;
+ msg.insize = sizeof(*r_ver);
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (msg.result != EC_RES_SUCCESS)
+ return scnprintf(buf, PAGE_SIZE,
+ "ERROR: EC returned %d\n", msg.result);
+
+ r_ver = (struct ec_response_get_version *)msg.indata;
+ /* Strings should be null-terminated, but let's be sure. */
+ r_ver->version_string_ro[sizeof(r_ver->version_string_ro) - 1] = '\0';
+ r_ver->version_string_rw[sizeof(r_ver->version_string_rw) - 1] = '\0';
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "RO version: %s\n", r_ver->version_string_ro);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "RW version: %s\n", r_ver->version_string_rw);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Firmware copy: %s\n",
+ (r_ver->current_image < ARRAY_SIZE(image_names) ?
+ image_names[r_ver->current_image] : "?"));
+
+ /* Get build info. */
+ msg.command = EC_CMD_GET_BUILD_INFO;
+ msg.insize = sizeof(msg.indata);
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Build info: XFER ERROR %d\n", ret);
+ else if (msg.result != EC_RES_SUCCESS)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Build info: EC error %d\n", msg.result);
+ else {
+ msg.indata[sizeof(msg.indata) - 1] = '\0';
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Build info: %s\n", msg.indata);
+ }
+
+ /* Get chip info. */
+ msg.command = EC_CMD_GET_CHIP_INFO;
+ msg.insize = sizeof(*r_chip);
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip info: XFER ERROR %d\n", ret);
+ else if (msg.result != EC_RES_SUCCESS)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip info: EC error %d\n", msg.result);
+ else {
+ r_chip = (struct ec_response_get_chip_info *)msg.indata;
+
+ r_chip->vendor[sizeof(r_chip->vendor) - 1] = '\0';
+ r_chip->name[sizeof(r_chip->name) - 1] = '\0';
+ r_chip->revision[sizeof(r_chip->revision) - 1] = '\0';
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip vendor: %s\n", r_chip->vendor);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip name: %s\n", r_chip->name);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip revision: %s\n", r_chip->revision);
+ }
+
+ /* Get board version */
+ msg.command = EC_CMD_GET_BOARD_VERSION;
+ msg.insize = sizeof(*r_board);
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Board version: XFER ERROR %d\n", ret);
+ else if (msg.result != EC_RES_SUCCESS)
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Board version: EC error %d\n", msg.result);
+ else {
+ r_board = (struct ec_response_board_version *)msg.indata;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Board version: %d\n",
+ r_board->board_version);
+ }
+
+ return count;
+}
+
+static ssize_t show_ec_flashinfo(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ec_response_flash_info *resp;
+ struct cros_ec_command msg = { 0 };
+ int ret;
+ struct cros_ec_device *ec = dev_get_drvdata(dev);
+
+ /* The flash info shouldn't ever change, but ask each time anyway. */
+ msg.command = EC_CMD_FLASH_INFO;
+ msg.insize = sizeof(*resp);
+ ret = cros_ec_cmd_xfer(ec, &msg);
+ if (ret < 0)
+ return ret;
+ if (msg.result != EC_RES_SUCCESS)
+ return scnprintf(buf, PAGE_SIZE,
+ "ERROR: EC returned %d\n", msg.result);
+
+ resp = (struct ec_response_flash_info *)msg.indata;
+
+ return scnprintf(buf, PAGE_SIZE,
+ "FlashSize %d\nWriteSize %d\n"
+ "EraseSize %d\nProtectSize %d\n",
+ resp->flash_size, resp->write_block_size,
+ resp->erase_block_size, resp->protect_block_size);
+}
+
+/* Module initialization */
+
+static DEVICE_ATTR(reboot, S_IWUSR | S_IRUGO, show_ec_reboot, store_ec_reboot);
+static DEVICE_ATTR(version, S_IRUGO, show_ec_version, NULL);
+static DEVICE_ATTR(flashinfo, S_IRUGO, show_ec_flashinfo, NULL);
+
+static struct attribute *__ec_attrs[] = {
+ &dev_attr_reboot.attr,
+ &dev_attr_version.attr,
+ &dev_attr_flashinfo.attr,
+ NULL,
+};
+
+static struct attribute_group ec_attr_group = {
+ .attrs = __ec_attrs,
+};
+
+void ec_dev_sysfs_init(struct cros_ec_device *ec)
+{
+ int error;
+
+ error = sysfs_create_group(&ec->vdev->kobj, &ec_attr_group);
+ if (error)
+ pr_warn("failed to create group: %d\n", error);
+}
+
+void ec_dev_sysfs_remove(struct cros_ec_device *ec)
+{
+ sysfs_remove_group(&ec->vdev->kobj, &ec_attr_group);
+}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 97527614141b..f9f205cb1f11 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -614,6 +614,7 @@ config ACPI_TOSHIBA
depends on INPUT
depends on RFKILL || RFKILL = n
depends on SERIO_I8042 || SERIO_I8042 = n
+ depends on ACPI_VIDEO || ACPI_VIDEO = n
select INPUT_POLLDEV
select INPUT_SPARSEKMAP
---help---
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 66d6d22c239c..6808715003f6 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/vga_switcheroo.h>
+#include <linux/vgaarb.h>
#include <acpi/video.h>
#include <asm/io.h>
@@ -31,6 +32,7 @@ struct apple_gmux_data {
bool indexed;
struct mutex index_lock;
+ struct pci_dev *pdev;
struct backlight_device *bdev;
/* switcheroo data */
@@ -415,6 +417,23 @@ static int gmux_resume(struct device *dev)
return 0;
}
+static struct pci_dev *gmux_get_io_pdev(void)
+{
+ struct pci_dev *pdev = NULL;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) {
+ u16 cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_IO))
+ continue;
+
+ return pdev;
+ }
+
+ return NULL;
+}
+
static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
{
struct apple_gmux_data *gmux_data;
@@ -425,6 +444,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
int ret = -ENXIO;
acpi_status status;
unsigned long long gpe;
+ struct pci_dev *pdev = NULL;
if (apple_gmux_data)
return -EBUSY;
@@ -475,7 +495,7 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
ver_minor = (version >> 16) & 0xff;
ver_release = (version >> 8) & 0xff;
} else {
- pr_info("gmux device not present\n");
+ pr_info("gmux device not present or IO disabled\n");
ret = -ENODEV;
goto err_release;
}
@@ -483,6 +503,23 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor,
ver_release, (gmux_data->indexed ? "indexed" : "classic"));
+ /*
+ * Apple systems with gmux are EFI based and normally don't use
+ * VGA. In addition changing IO+MEM ownership between IGP and dGPU
+ * disables IO/MEM used for backlight control on some systems.
+ * Lock IO+MEM to GPU with active IO to prevent switch.
+ */
+ pdev = gmux_get_io_pdev();
+ if (pdev && vga_tryget(pdev,
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM)) {
+ pr_err("IO+MEM vgaarb-locking for PCI:%s failed\n",
+ pci_name(pdev));
+ ret = -EBUSY;
+ goto err_release;
+ } else if (pdev)
+ pr_info("locked IO for PCI:%s\n", pci_name(pdev));
+ gmux_data->pdev = pdev;
+
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
@@ -574,6 +611,10 @@ err_enable_gpe:
err_notify:
backlight_device_unregister(bdev);
err_release:
+ if (gmux_data->pdev)
+ vga_put(gmux_data->pdev,
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM);
+ pci_dev_put(pdev);
release_region(gmux_data->iostart, gmux_data->iolen);
err_free:
kfree(gmux_data);
@@ -593,6 +634,11 @@ static void gmux_remove(struct pnp_dev *pnp)
&gmux_notify_handler);
}
+ if (gmux_data->pdev) {
+ vga_put(gmux_data->pdev,
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM);
+ pci_dev_put(gmux_data->pdev);
+ }
backlight_device_unregister(gmux_data->bdev);
release_region(gmux_data->iostart, gmux_data->iolen);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 3d21efe11d7b..d688d806a8a5 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2,9 +2,11 @@
* Driver for Dell laptop extras
*
* Copyright (c) Red Hat <mjg@redhat.com>
+ * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
*
- * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
- * Inc.
+ * Based on documentation in the libsmbios package:
+ * Copyright (C) 2005-2014 Dell Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -32,6 +34,13 @@
#include "../../firmware/dcdbas.h"
#define BRIGHTNESS_TOKEN 0x7d
+#define KBD_LED_OFF_TOKEN 0x01E1
+#define KBD_LED_ON_TOKEN 0x01E2
+#define KBD_LED_AUTO_TOKEN 0x01E3
+#define KBD_LED_AUTO_25_TOKEN 0x02EA
+#define KBD_LED_AUTO_50_TOKEN 0x02EB
+#define KBD_LED_AUTO_75_TOKEN 0x02EC
+#define KBD_LED_AUTO_100_TOKEN 0x02F6
/* This structure will be modified by the firmware when we enter
* system management mode, hence the volatiles */
@@ -62,6 +71,13 @@ struct calling_interface_structure {
struct quirk_entry {
u8 touchpad_led;
+
+ int needs_kbd_timeouts;
+ /*
+ * Ordered list of timeouts expressed in seconds.
+ * The list must end with -1
+ */
+ int kbd_timeouts[];
};
static struct quirk_entry *quirks;
@@ -76,6 +92,15 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
return 1;
}
+/*
+ * These values come from Windows utility provided by Dell. If any other value
+ * is used then BIOS silently set timeout to 0 without any error message.
+ */
+static struct quirk_entry quirk_dell_xps13_9333 = {
+ .needs_kbd_timeouts = 1,
+ .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
+};
+
static int da_command_address;
static int da_command_code;
static int da_num_tokens;
@@ -267,6 +292,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_vostro_v130,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Dell XPS13 9333",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+ },
+ .driver_data = &quirk_dell_xps13_9333,
+ },
{ }
};
@@ -331,17 +365,29 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
-static int find_token_location(int tokenid)
+static int find_token_id(int tokenid)
{
int i;
+
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].tokenID == tokenid)
- return da_tokens[i].location;
+ return i;
}
return -1;
}
+static int find_token_location(int tokenid)
+{
+ int id;
+
+ id = find_token_id(tokenid);
+ if (id == -1)
+ return -1;
+
+ return da_tokens[id].location;
+}
+
static struct calling_interface_buffer *
dell_send_request(struct calling_interface_buffer *buffer, int class,
int select)
@@ -362,6 +408,20 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
return buffer;
}
+static inline int dell_smi_error(int value)
+{
+ switch (value) {
+ case 0: /* Completed successfully */
+ return 0;
+ case -1: /* Completed with error */
+ return -EIO;
+ case -2: /* Function not supported */
+ return -ENXIO;
+ default: /* Unknown error */
+ return -EINVAL;
+ }
+}
+
/* Derived from information in DellWirelessCtl.cpp:
Class 17, select 11 is radio control. It returns an array of 32-bit values.
@@ -716,7 +776,7 @@ static int dell_send_intensity(struct backlight_device *bd)
else
dell_send_request(buffer, 1, 1);
-out:
+ out:
release_buffer();
return ret;
}
@@ -740,7 +800,7 @@ static int dell_get_intensity(struct backlight_device *bd)
ret = buffer->output[1];
-out:
+ out:
release_buffer();
return ret;
}
@@ -789,6 +849,1018 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
+/*
+ * Derived from information in smbios-keyboard-ctl:
+ *
+ * cbClass 4
+ * cbSelect 11
+ * Keyboard illumination
+ * cbArg1 determines the function to be performed
+ *
+ * cbArg1 0x0 = Get Feature Information
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbRES2, word0 Bitmap of user-selectable modes
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * cbRES2, byte2 Reserved for future use
+ * cbRES2, byte3 Keyboard illumination type
+ * 0 Reserved
+ * 1 Tasklight
+ * 2 Backlight
+ * 3-255 Reserved for future use
+ * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbRES3, byte1 Supported timeout unit bitmap
+ * bit 0 Seconds
+ * bit 1 Minutes
+ * bit 2 Hours
+ * bit 3 Days
+ * bits 4-7 Reserved for future use
+ * cbRES3, byte2 Number of keyboard light brightness levels
+ * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported).
+ * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported).
+ * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported).
+ * cbRES4, byte3 Maximum acceptable days value (0 if days not supported)
+ *
+ * cbArg1 0x1 = Get Current State
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbRES2, word0 Bitmap of current mode state
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * Note: Only One bit can be set
+ * cbRES2, byte2 Currently active auto keyboard illumination triggers.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbRES2, byte3 Current Timeout
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
+ * are set upon return from the [Get feature information] call.
+ * cbRES3, byte0 Current setting of ALS value that turns the light on or off.
+ * cbRES3, byte1 Current ALS reading
+ * cbRES3, byte2 Current keyboard light level.
+ *
+ * cbArg1 0x2 = Set New State
+ * cbRES1 Standard return codes (0, -1, -2)
+ * cbArg2, word0 Bitmap of current mode state
+ * bit 0 Always off (All systems)
+ * bit 1 Always on (Travis ATG, Siberia)
+ * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
+ * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
+ * bit 4 Auto: Input-activity-based On; input-activity based Off
+ * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
+ * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
+ * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
+ * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
+ * bits 9-15 Reserved for future use
+ * Note: Only One bit can be set
+ * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow
+ * keyboard to turn off automatically.
+ * bit 0 Any keystroke
+ * bit 1 Touchpad activity
+ * bit 2 Pointing stick
+ * bit 3 Any mouse
+ * bits 4-7 Reserved for future use
+ * cbArg2, byte3 Desired Timeout
+ * bits 7:6 Timeout units indicator:
+ * 00b Seconds
+ * 01b Minutes
+ * 10b Hours
+ * 11b Days
+ * bits 5:0 Timeout value (0-63) in sec/min/hr/day
+ * cbArg3, byte0 Desired setting of ALS value that turns the light on or off.
+ * cbArg3, byte2 Desired keyboard light level.
+ */
+
+
+enum kbd_timeout_unit {
+ KBD_TIMEOUT_SECONDS = 0,
+ KBD_TIMEOUT_MINUTES,
+ KBD_TIMEOUT_HOURS,
+ KBD_TIMEOUT_DAYS,
+};
+
+enum kbd_mode_bit {
+ KBD_MODE_BIT_OFF = 0,
+ KBD_MODE_BIT_ON,
+ KBD_MODE_BIT_ALS,
+ KBD_MODE_BIT_TRIGGER_ALS,
+ KBD_MODE_BIT_TRIGGER,
+ KBD_MODE_BIT_TRIGGER_25,
+ KBD_MODE_BIT_TRIGGER_50,
+ KBD_MODE_BIT_TRIGGER_75,
+ KBD_MODE_BIT_TRIGGER_100,
+};
+
+#define kbd_is_als_mode_bit(bit) \
+ ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
+#define kbd_is_trigger_mode_bit(bit) \
+ ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
+#define kbd_is_level_mode_bit(bit) \
+ ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
+
+struct kbd_info {
+ u16 modes;
+ u8 type;
+ u8 triggers;
+ u8 levels;
+ u8 seconds;
+ u8 minutes;
+ u8 hours;
+ u8 days;
+};
+
+struct kbd_state {
+ u8 mode_bit;
+ u8 triggers;
+ u8 timeout_value;
+ u8 timeout_unit;
+ u8 als_setting;
+ u8 als_value;
+ u8 level;
+};
+
+static const int kbd_tokens[] = {
+ KBD_LED_OFF_TOKEN,
+ KBD_LED_AUTO_25_TOKEN,
+ KBD_LED_AUTO_50_TOKEN,
+ KBD_LED_AUTO_75_TOKEN,
+ KBD_LED_AUTO_100_TOKEN,
+ KBD_LED_ON_TOKEN,
+};
+
+static u16 kbd_token_bits;
+
+static struct kbd_info kbd_info;
+static bool kbd_als_supported;
+static bool kbd_triggers_supported;
+
+static u8 kbd_mode_levels[16];
+static int kbd_mode_levels_count;
+
+static u8 kbd_previous_level;
+static u8 kbd_previous_mode_bit;
+
+static bool kbd_led_present;
+
+/*
+ * NOTE: there are three ways to set the keyboard backlight level.
+ * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
+ * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
+ * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
+ *
+ * There are laptops which support only one of these methods. If we want to
+ * support as many machines as possible we need to implement all three methods.
+ * The first two methods use the kbd_state structure. The third uses SMBIOS
+ * tokens. If kbd_info.levels == 0, the machine does not support setting the
+ * keyboard backlight level via kbd_state.level.
+ */
+
+static int kbd_get_info(struct kbd_info *info)
+{
+ u8 units;
+ int ret;
+
+ get_buffer();
+
+ buffer->input[0] = 0x0;
+ dell_send_request(buffer, 4, 11);
+ ret = buffer->output[0];
+
+ if (ret) {
+ ret = dell_smi_error(ret);
+ goto out;
+ }
+
+ info->modes = buffer->output[1] & 0xFFFF;
+ info->type = (buffer->output[1] >> 24) & 0xFF;
+ info->triggers = buffer->output[2] & 0xFF;
+ units = (buffer->output[2] >> 8) & 0xFF;
+ info->levels = (buffer->output[2] >> 16) & 0xFF;
+
+ if (units & BIT(0))
+ info->seconds = (buffer->output[3] >> 0) & 0xFF;
+ if (units & BIT(1))
+ info->minutes = (buffer->output[3] >> 8) & 0xFF;
+ if (units & BIT(2))
+ info->hours = (buffer->output[3] >> 16) & 0xFF;
+ if (units & BIT(3))
+ info->days = (buffer->output[3] >> 24) & 0xFF;
+
+ out:
+ release_buffer();
+ return ret;
+}
+
+static unsigned int kbd_get_max_level(void)
+{
+ if (kbd_info.levels != 0)
+ return kbd_info.levels;
+ if (kbd_mode_levels_count > 0)
+ return kbd_mode_levels_count - 1;
+ return 0;
+}
+
+static int kbd_get_level(struct kbd_state *state)
+{
+ int i;
+
+ if (kbd_info.levels != 0)
+ return state->level;
+
+ if (kbd_mode_levels_count > 0) {
+ for (i = 0; i < kbd_mode_levels_count; ++i)
+ if (kbd_mode_levels[i] == state->mode_bit)
+ return i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int kbd_set_level(struct kbd_state *state, u8 level)
+{
+ if (kbd_info.levels != 0) {
+ if (level != 0)
+ kbd_previous_level = level;
+ if (state->level == level)
+ return 0;
+ state->level = level;
+ if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
+ state->mode_bit = kbd_previous_mode_bit;
+ else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
+ kbd_previous_mode_bit = state->mode_bit;
+ state->mode_bit = KBD_MODE_BIT_OFF;
+ }
+ return 0;
+ }
+
+ if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
+ if (level != 0)
+ kbd_previous_level = level;
+ state->mode_bit = kbd_mode_levels[level];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int kbd_get_state(struct kbd_state *state)
+{
+ int ret;
+
+ get_buffer();
+
+ buffer->input[0] = 0x1;
+ dell_send_request(buffer, 4, 11);
+ ret = buffer->output[0];
+
+ if (ret) {
+ ret = dell_smi_error(ret);
+ goto out;
+ }
+
+ state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
+ if (state->mode_bit != 0)
+ state->mode_bit--;
+
+ state->triggers = (buffer->output[1] >> 16) & 0xFF;
+ state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
+ state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
+ state->als_setting = buffer->output[2] & 0xFF;
+ state->als_value = (buffer->output[2] >> 8) & 0xFF;
+ state->level = (buffer->output[2] >> 16) & 0xFF;
+
+ out:
+ release_buffer();
+ return ret;
+}
+
+static int kbd_set_state(struct kbd_state *state)
+{
+ int ret;
+
+ get_buffer();
+ buffer->input[0] = 0x2;
+ buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
+ buffer->input[1] |= (state->triggers & 0xFF) << 16;
+ buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
+ buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
+ buffer->input[2] = state->als_setting & 0xFF;
+ buffer->input[2] |= (state->level & 0xFF) << 16;
+ dell_send_request(buffer, 4, 11);
+ ret = buffer->output[0];
+ release_buffer();
+
+ return dell_smi_error(ret);
+}
+
+static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
+{
+ int ret;
+
+ ret = kbd_set_state(state);
+ if (ret == 0)
+ return 0;
+
+ /*
+ * When setting the new state fails,try to restore the previous one.
+ * This is needed on some machines where BIOS sets a default state when
+ * setting a new state fails. This default state could be all off.
+ */
+
+ if (kbd_set_state(old))
+ pr_err("Setting old previous keyboard state failed\n");
+
+ return ret;
+}
+
+static int kbd_set_token_bit(u8 bit)
+{
+ int id;
+ int ret;
+
+ if (bit >= ARRAY_SIZE(kbd_tokens))
+ return -EINVAL;
+
+ id = find_token_id(kbd_tokens[bit]);
+ if (id == -1)
+ return -EINVAL;
+
+ get_buffer();
+ buffer->input[0] = da_tokens[id].location;
+ buffer->input[1] = da_tokens[id].value;
+ dell_send_request(buffer, 1, 0);
+ ret = buffer->output[0];
+ release_buffer();
+
+ return dell_smi_error(ret);
+}
+
+static int kbd_get_token_bit(u8 bit)
+{
+ int id;
+ int ret;
+ int val;
+
+ if (bit >= ARRAY_SIZE(kbd_tokens))
+ return -EINVAL;
+
+ id = find_token_id(kbd_tokens[bit]);
+ if (id == -1)
+ return -EINVAL;
+
+ get_buffer();
+ buffer->input[0] = da_tokens[id].location;
+ dell_send_request(buffer, 0, 0);
+ ret = buffer->output[0];
+ val = buffer->output[1];
+ release_buffer();
+
+ if (ret)
+ return dell_smi_error(ret);
+
+ return (val == da_tokens[id].value);
+}
+
+static int kbd_get_first_active_token_bit(void)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
+ ret = kbd_get_token_bit(i);
+ if (ret == 1)
+ return i;
+ }
+
+ return ret;
+}
+
+static int kbd_get_valid_token_counts(void)
+{
+ return hweight16(kbd_token_bits);
+}
+
+static inline int kbd_init_info(void)
+{
+ struct kbd_state state;
+ int ret;
+ int i;
+
+ ret = kbd_get_info(&kbd_info);
+ if (ret)
+ return ret;
+
+ kbd_get_state(&state);
+
+ /* NOTE: timeout value is stored in 6 bits so max value is 63 */
+ if (kbd_info.seconds > 63)
+ kbd_info.seconds = 63;
+ if (kbd_info.minutes > 63)
+ kbd_info.minutes = 63;
+ if (kbd_info.hours > 63)
+ kbd_info.hours = 63;
+ if (kbd_info.days > 63)
+ kbd_info.days = 63;
+
+ /* NOTE: On tested machines ON mode did not work and caused
+ * problems (turned backlight off) so do not use it
+ */
+ kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
+
+ kbd_previous_level = kbd_get_level(&state);
+ kbd_previous_mode_bit = state.mode_bit;
+
+ if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
+ kbd_previous_level = 1;
+
+ if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
+ kbd_previous_mode_bit =
+ ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
+ if (kbd_previous_mode_bit != 0)
+ kbd_previous_mode_bit--;
+ }
+
+ if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
+ BIT(KBD_MODE_BIT_TRIGGER_ALS)))
+ kbd_als_supported = true;
+
+ if (kbd_info.modes & (
+ BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
+ BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
+ BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
+ ))
+ kbd_triggers_supported = true;
+
+ /* kbd_mode_levels[0] is reserved, see below */
+ for (i = 0; i < 16; ++i)
+ if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
+ kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
+
+ /*
+ * Find the first supported mode and assign to kbd_mode_levels[0].
+ * This should be 0 (off), but we cannot depend on the BIOS to
+ * support 0.
+ */
+ if (kbd_mode_levels_count > 0) {
+ for (i = 0; i < 16; ++i) {
+ if (BIT(i) & kbd_info.modes) {
+ kbd_mode_levels[0] = i;
+ break;
+ }
+ }
+ kbd_mode_levels_count++;
+ }
+
+ return 0;
+
+}
+
+static inline void kbd_init_tokens(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
+ if (find_token_id(kbd_tokens[i]) != -1)
+ kbd_token_bits |= BIT(i);
+}
+
+static void kbd_init(void)
+{
+ int ret;
+
+ ret = kbd_init_info();
+ kbd_init_tokens();
+
+ if (kbd_token_bits != 0 || ret == 0)
+ kbd_led_present = true;
+}
+
+static ssize_t kbd_led_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool convert;
+ int value;
+ int ret;
+ char ch;
+ u8 unit;
+ int i;
+
+ ret = sscanf(buf, "%d %c", &value, &ch);
+ if (ret < 1)
+ return -EINVAL;
+ else if (ret == 1)
+ ch = 's';
+
+ if (value < 0)
+ return -EINVAL;
+
+ convert = false;
+
+ switch (ch) {
+ case 's':
+ if (value > kbd_info.seconds)
+ convert = true;
+ unit = KBD_TIMEOUT_SECONDS;
+ break;
+ case 'm':
+ if (value > kbd_info.minutes)
+ convert = true;
+ unit = KBD_TIMEOUT_MINUTES;
+ break;
+ case 'h':
+ if (value > kbd_info.hours)
+ convert = true;
+ unit = KBD_TIMEOUT_HOURS;
+ break;
+ case 'd':
+ if (value > kbd_info.days)
+ convert = true;
+ unit = KBD_TIMEOUT_DAYS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (quirks && quirks->needs_kbd_timeouts)
+ convert = true;
+
+ if (convert) {
+ /* Convert value from current units to seconds */
+ switch (unit) {
+ case KBD_TIMEOUT_DAYS:
+ value *= 24;
+ case KBD_TIMEOUT_HOURS:
+ value *= 60;
+ case KBD_TIMEOUT_MINUTES:
+ value *= 60;
+ unit = KBD_TIMEOUT_SECONDS;
+ }
+
+ if (quirks && quirks->needs_kbd_timeouts) {
+ for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
+ if (value <= quirks->kbd_timeouts[i]) {
+ value = quirks->kbd_timeouts[i];
+ break;
+ }
+ }
+ }
+
+ if (value <= kbd_info.seconds && kbd_info.seconds) {
+ unit = KBD_TIMEOUT_SECONDS;
+ } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
+ value /= 60;
+ unit = KBD_TIMEOUT_MINUTES;
+ } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
+ value /= (60 * 60);
+ unit = KBD_TIMEOUT_HOURS;
+ } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
+ value /= (60 * 60 * 24);
+ unit = KBD_TIMEOUT_DAYS;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ new_state = state;
+ new_state.timeout_value = value;
+ new_state.timeout_unit = unit;
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t kbd_led_timeout_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kbd_state state;
+ int ret;
+ int len;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ len = sprintf(buf, "%d", state.timeout_value);
+
+ switch (state.timeout_unit) {
+ case KBD_TIMEOUT_SECONDS:
+ return len + sprintf(buf+len, "s\n");
+ case KBD_TIMEOUT_MINUTES:
+ return len + sprintf(buf+len, "m\n");
+ case KBD_TIMEOUT_HOURS:
+ return len + sprintf(buf+len, "h\n");
+ case KBD_TIMEOUT_DAYS:
+ return len + sprintf(buf+len, "d\n");
+ default:
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
+ kbd_led_timeout_show, kbd_led_timeout_store);
+
+static const char * const kbd_led_triggers[] = {
+ "keyboard",
+ "touchpad",
+ /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
+ "mouse",
+};
+
+static ssize_t kbd_led_triggers_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool triggers_enabled = false;
+ int trigger_bit = -1;
+ char trigger[21];
+ int i, ret;
+
+ ret = sscanf(buf, "%20s", trigger);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (trigger[0] != '+' && trigger[0] != '-')
+ return -EINVAL;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ if (kbd_triggers_supported)
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+
+ if (kbd_triggers_supported) {
+ for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
+ if (!(kbd_info.triggers & BIT(i)))
+ continue;
+ if (!kbd_led_triggers[i])
+ continue;
+ if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
+ continue;
+ if (trigger[0] == '+' &&
+ triggers_enabled && (state.triggers & BIT(i)))
+ return count;
+ if (trigger[0] == '-' &&
+ (!triggers_enabled || !(state.triggers & BIT(i))))
+ return count;
+ trigger_bit = i;
+ break;
+ }
+ }
+
+ if (trigger_bit != -1) {
+ new_state = state;
+ if (trigger[0] == '+')
+ new_state.triggers |= BIT(trigger_bit);
+ else {
+ new_state.triggers &= ~BIT(trigger_bit);
+ /* NOTE: trackstick bit (2) must be disabled when
+ * disabling touchpad bit (1), otherwise touchpad
+ * bit (1) will not be disabled */
+ if (trigger_bit == 1)
+ new_state.triggers &= ~BIT(2);
+ }
+ if ((kbd_info.triggers & new_state.triggers) !=
+ new_state.triggers)
+ return -EINVAL;
+ if (new_state.triggers && !triggers_enabled) {
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
+ kbd_set_level(&new_state, kbd_previous_level);
+ } else if (new_state.triggers == 0) {
+ kbd_set_level(&new_state, 0);
+ }
+ if (!(kbd_info.modes & BIT(new_state.mode_bit)))
+ return -EINVAL;
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+ if (new_state.mode_bit != KBD_MODE_BIT_OFF)
+ kbd_previous_mode_bit = new_state.mode_bit;
+ return count;
+ }
+
+ return -EINVAL;
+}
+
+static ssize_t kbd_led_triggers_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kbd_state state;
+ bool triggers_enabled;
+ int level, i, ret;
+ int len = 0;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ len = 0;
+
+ if (kbd_triggers_supported) {
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+ level = kbd_get_level(&state);
+ for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
+ if (!(kbd_info.triggers & BIT(i)))
+ continue;
+ if (!kbd_led_triggers[i])
+ continue;
+ if ((triggers_enabled || level <= 0) &&
+ (state.triggers & BIT(i)))
+ buf[len++] = '+';
+ else
+ buf[len++] = '-';
+ len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
+ }
+ }
+
+ if (len)
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
+ kbd_led_triggers_show, kbd_led_triggers_store);
+
+static ssize_t kbd_led_als_enabled_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state new_state;
+ struct kbd_state state;
+ bool triggers_enabled = false;
+ int enable;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &enable);
+ if (ret)
+ return ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ if (enable == kbd_is_als_mode_bit(state.mode_bit))
+ return count;
+
+ new_state = state;
+
+ if (kbd_triggers_supported)
+ triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
+
+ if (enable) {
+ if (triggers_enabled)
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
+ else
+ new_state.mode_bit = KBD_MODE_BIT_ALS;
+ } else {
+ if (triggers_enabled) {
+ new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
+ kbd_set_level(&new_state, kbd_previous_level);
+ } else {
+ new_state.mode_bit = KBD_MODE_BIT_ON;
+ }
+ }
+ if (!(kbd_info.modes & BIT(new_state.mode_bit)))
+ return -EINVAL;
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+ kbd_previous_mode_bit = new_state.mode_bit;
+
+ return count;
+}
+
+static ssize_t kbd_led_als_enabled_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kbd_state state;
+ bool enabled = false;
+ int ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+ enabled = kbd_is_als_mode_bit(state.mode_bit);
+
+ return sprintf(buf, "%d\n", enabled ? 1 : 0);
+}
+
+static DEVICE_ATTR(als_enabled, S_IRUGO | S_IWUSR,
+ kbd_led_als_enabled_show, kbd_led_als_enabled_store);
+
+static ssize_t kbd_led_als_setting_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kbd_state state;
+ struct kbd_state new_state;
+ u8 setting;
+ int ret;
+
+ ret = kstrtou8(buf, 10, &setting);
+ if (ret)
+ return ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ new_state = state;
+ new_state.als_setting = setting;
+
+ ret = kbd_set_state_safe(&new_state, &state);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t kbd_led_als_setting_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kbd_state state;
+ int ret;
+
+ ret = kbd_get_state(&state);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "%d\n", state.als_setting);
+}
+
+static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
+ kbd_led_als_setting_show, kbd_led_als_setting_store);
+
+static struct attribute *kbd_led_attrs[] = {
+ &dev_attr_stop_timeout.attr,
+ &dev_attr_start_triggers.attr,
+ NULL,
+};
+
+static const struct attribute_group kbd_led_group = {
+ .attrs = kbd_led_attrs,
+};
+
+static struct attribute *kbd_led_als_attrs[] = {
+ &dev_attr_als_enabled.attr,
+ &dev_attr_als_setting.attr,
+ NULL,
+};
+
+static const struct attribute_group kbd_led_als_group = {
+ .attrs = kbd_led_als_attrs,
+};
+
+static const struct attribute_group *kbd_led_groups[] = {
+ &kbd_led_group,
+ &kbd_led_als_group,
+ NULL,
+};
+
+static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
+{
+ int ret;
+ u16 num;
+ struct kbd_state state;
+
+ if (kbd_get_max_level()) {
+ ret = kbd_get_state(&state);
+ if (ret)
+ return 0;
+ ret = kbd_get_level(&state);
+ if (ret < 0)
+ return 0;
+ return ret;
+ }
+
+ if (kbd_get_valid_token_counts()) {
+ ret = kbd_get_first_active_token_bit();
+ if (ret < 0)
+ return 0;
+ for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
+ num &= num - 1; /* clear the first bit set */
+ if (num == 0)
+ return 0;
+ return ffs(num) - 1;
+ }
+
+ pr_warn("Keyboard brightness level control not supported\n");
+ return 0;
+}
+
+static void kbd_led_level_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct kbd_state state;
+ struct kbd_state new_state;
+ u16 num;
+
+ if (kbd_get_max_level()) {
+ if (kbd_get_state(&state))
+ return;
+ new_state = state;
+ if (kbd_set_level(&new_state, value))
+ return;
+ kbd_set_state_safe(&new_state, &state);
+ return;
+ }
+
+ if (kbd_get_valid_token_counts()) {
+ for (num = kbd_token_bits; num != 0 && value > 0; --value)
+ num &= num - 1; /* clear the first bit set */
+ if (num == 0)
+ return;
+ kbd_set_token_bit(ffs(num) - 1);
+ return;
+ }
+
+ pr_warn("Keyboard brightness level control not supported\n");
+}
+
+static struct led_classdev kbd_led = {
+ .name = "dell::kbd_backlight",
+ .brightness_set = kbd_led_level_set,
+ .brightness_get = kbd_led_level_get,
+ .groups = kbd_led_groups,
+};
+
+static int __init kbd_led_init(struct device *dev)
+{
+ kbd_init();
+ if (!kbd_led_present)
+ return -ENODEV;
+ if (!kbd_als_supported)
+ kbd_led_groups[1] = NULL;
+ kbd_led.max_brightness = kbd_get_max_level();
+ if (!kbd_led.max_brightness) {
+ kbd_led.max_brightness = kbd_get_valid_token_counts();
+ if (kbd_led.max_brightness)
+ kbd_led.max_brightness--;
+ }
+ return led_classdev_register(dev, &kbd_led);
+}
+
+static void brightness_set_exit(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ /* Don't change backlight level on exit */
+};
+
+static void kbd_led_exit(void)
+{
+ if (!kbd_led_present)
+ return;
+ kbd_led.brightness_set = brightness_set_exit;
+ led_classdev_unregister(&kbd_led);
+}
+
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -841,6 +1913,8 @@ static int __init dell_init(void)
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
+ kbd_led_init(&platform_device->dev);
+
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
if (dell_laptop_dir != NULL)
debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -908,6 +1982,7 @@ static void __exit dell_exit(void)
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
+ kbd_led_exit();
i8042_remove_filter(dell_laptop_i8042_filter);
cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
@@ -924,5 +1999,7 @@ module_init(dell_init);
module_exit(dell_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
+MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
MODULE_DESCRIPTION("Dell laptop driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index a4a4258f6134..8037c8b46241 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -62,7 +62,7 @@
* (1 << 1): Bluetooth enable/disable, RW.
* (1 << 2): GPS enable/disable, RW.
* (1 << 3): WiFi enable/disable, RW.
- * (1 << 4): WWAN (3G) enable/disalbe, RW.
+ * (1 << 4): WWAN (3G) enable/disable, RW.
* (1 << 5): Touchscreen enable/disable, Read Only.
*/
#define OT_EC_DEVICE_STATE_ADDRESS 0xD6
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 3b8ceee7c5cb..7769575345d8 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -319,6 +319,7 @@ static struct {
u32 sensors_pdrv_attrs_registered:1;
u32 sensors_pdev_attrs_registered:1;
u32 hotkey_poll_active:1;
+ u32 has_adaptive_kbd:1;
} tp_features;
static struct {
@@ -1911,6 +1912,27 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
TP_ACPI_HOTKEYSCAN_UNK7,
TP_ACPI_HOTKEYSCAN_UNK8,
+ TP_ACPI_HOTKEYSCAN_MUTE2,
+ TP_ACPI_HOTKEYSCAN_BRIGHTNESS_ZERO,
+ TP_ACPI_HOTKEYSCAN_CLIPPING_TOOL,
+ TP_ACPI_HOTKEYSCAN_CLOUD,
+ TP_ACPI_HOTKEYSCAN_UNK9,
+ TP_ACPI_HOTKEYSCAN_VOICE,
+ TP_ACPI_HOTKEYSCAN_UNK10,
+ TP_ACPI_HOTKEYSCAN_GESTURES,
+ TP_ACPI_HOTKEYSCAN_UNK11,
+ TP_ACPI_HOTKEYSCAN_UNK12,
+ TP_ACPI_HOTKEYSCAN_UNK13,
+ TP_ACPI_HOTKEYSCAN_CONFIG,
+ TP_ACPI_HOTKEYSCAN_NEW_TAB,
+ TP_ACPI_HOTKEYSCAN_RELOAD,
+ TP_ACPI_HOTKEYSCAN_BACK,
+ TP_ACPI_HOTKEYSCAN_MIC_DOWN,
+ TP_ACPI_HOTKEYSCAN_MIC_UP,
+ TP_ACPI_HOTKEYSCAN_MIC_CANCELLATION,
+ TP_ACPI_HOTKEYSCAN_CAMERA_MODE,
+ TP_ACPI_HOTKEYSCAN_ROTATE_DISPLAY,
+
/* Hotkey keymap size */
TPACPI_HOTKEY_MAP_LEN
};
@@ -2647,9 +2669,7 @@ static ssize_t hotkey_enable_store(struct device *dev,
return count;
}
-static struct device_attribute dev_attr_hotkey_enable =
- __ATTR(hotkey_enable, S_IWUSR | S_IRUGO,
- hotkey_enable_show, hotkey_enable_store);
+static DEVICE_ATTR_RW(hotkey_enable);
/* sysfs hotkey mask --------------------------------------------------- */
static ssize_t hotkey_mask_show(struct device *dev,
@@ -2685,9 +2705,7 @@ static ssize_t hotkey_mask_store(struct device *dev,
return (res) ? res : count;
}
-static struct device_attribute dev_attr_hotkey_mask =
- __ATTR(hotkey_mask, S_IWUSR | S_IRUGO,
- hotkey_mask_show, hotkey_mask_store);
+static DEVICE_ATTR_RW(hotkey_mask);
/* sysfs hotkey bios_enabled ------------------------------------------- */
static ssize_t hotkey_bios_enabled_show(struct device *dev,
@@ -2697,8 +2715,7 @@ static ssize_t hotkey_bios_enabled_show(struct device *dev,
return sprintf(buf, "0\n");
}
-static struct device_attribute dev_attr_hotkey_bios_enabled =
- __ATTR(hotkey_bios_enabled, S_IRUGO, hotkey_bios_enabled_show, NULL);
+static DEVICE_ATTR_RO(hotkey_bios_enabled);
/* sysfs hotkey bios_mask ---------------------------------------------- */
static ssize_t hotkey_bios_mask_show(struct device *dev,
@@ -2710,8 +2727,7 @@ static ssize_t hotkey_bios_mask_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_orig_mask);
}
-static struct device_attribute dev_attr_hotkey_bios_mask =
- __ATTR(hotkey_bios_mask, S_IRUGO, hotkey_bios_mask_show, NULL);
+static DEVICE_ATTR_RO(hotkey_bios_mask);
/* sysfs hotkey all_mask ----------------------------------------------- */
static ssize_t hotkey_all_mask_show(struct device *dev,
@@ -2722,8 +2738,7 @@ static ssize_t hotkey_all_mask_show(struct device *dev,
hotkey_all_mask | hotkey_source_mask);
}
-static struct device_attribute dev_attr_hotkey_all_mask =
- __ATTR(hotkey_all_mask, S_IRUGO, hotkey_all_mask_show, NULL);
+static DEVICE_ATTR_RO(hotkey_all_mask);
/* sysfs hotkey recommended_mask --------------------------------------- */
static ssize_t hotkey_recommended_mask_show(struct device *dev,
@@ -2735,9 +2750,7 @@ static ssize_t hotkey_recommended_mask_show(struct device *dev,
& ~hotkey_reserved_mask);
}
-static struct device_attribute dev_attr_hotkey_recommended_mask =
- __ATTR(hotkey_recommended_mask, S_IRUGO,
- hotkey_recommended_mask_show, NULL);
+static DEVICE_ATTR_RO(hotkey_recommended_mask);
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
@@ -2792,9 +2805,7 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
return (rc < 0) ? rc : count;
}
-static struct device_attribute dev_attr_hotkey_source_mask =
- __ATTR(hotkey_source_mask, S_IWUSR | S_IRUGO,
- hotkey_source_mask_show, hotkey_source_mask_store);
+static DEVICE_ATTR_RW(hotkey_source_mask);
/* sysfs hotkey hotkey_poll_freq --------------------------------------- */
static ssize_t hotkey_poll_freq_show(struct device *dev,
@@ -2826,9 +2837,7 @@ static ssize_t hotkey_poll_freq_store(struct device *dev,
return count;
}
-static struct device_attribute dev_attr_hotkey_poll_freq =
- __ATTR(hotkey_poll_freq, S_IWUSR | S_IRUGO,
- hotkey_poll_freq_show, hotkey_poll_freq_store);
+static DEVICE_ATTR_RW(hotkey_poll_freq);
#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
@@ -2849,8 +2858,7 @@ static ssize_t hotkey_radio_sw_show(struct device *dev,
(res == TPACPI_RFK_RADIO_OFF) ? 0 : 1);
}
-static struct device_attribute dev_attr_hotkey_radio_sw =
- __ATTR(hotkey_radio_sw, S_IRUGO, hotkey_radio_sw_show, NULL);
+static DEVICE_ATTR_RO(hotkey_radio_sw);
static void hotkey_radio_sw_notify_change(void)
{
@@ -2872,8 +2880,7 @@ static ssize_t hotkey_tablet_mode_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", !!s);
}
-static struct device_attribute dev_attr_hotkey_tablet_mode =
- __ATTR(hotkey_tablet_mode, S_IRUGO, hotkey_tablet_mode_show, NULL);
+static DEVICE_ATTR_RO(hotkey_tablet_mode);
static void hotkey_tablet_mode_notify_change(void)
{
@@ -2890,8 +2897,7 @@ static ssize_t hotkey_wakeup_reason_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason);
}
-static struct device_attribute dev_attr_hotkey_wakeup_reason =
- __ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
+static DEVICE_ATTR_RO(hotkey_wakeup_reason);
static void hotkey_wakeup_reason_notify_change(void)
{
@@ -2907,9 +2913,7 @@ static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack);
}
-static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete =
- __ATTR(wakeup_hotunplug_complete, S_IRUGO,
- hotkey_wakeup_hotunplug_complete_show, NULL);
+static DEVICE_ATTR_RO(hotkey_wakeup_hotunplug_complete);
static void hotkey_wakeup_hotunplug_complete_notify_change(void)
{
@@ -2917,6 +2921,57 @@ static void hotkey_wakeup_hotunplug_complete_notify_change(void)
"wakeup_hotunplug_complete");
}
+/* sysfs adaptive kbd mode --------------------------------------------- */
+
+static int adaptive_keyboard_get_mode(void);
+static int adaptive_keyboard_set_mode(int new_mode);
+
+enum ADAPTIVE_KEY_MODE {
+ HOME_MODE,
+ WEB_BROWSER_MODE,
+ WEB_CONFERENCE_MODE,
+ FUNCTION_MODE,
+ LAYFLAT_MODE
+};
+
+static ssize_t adaptive_kbd_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int current_mode;
+
+ current_mode = adaptive_keyboard_get_mode();
+ if (current_mode < 0)
+ return current_mode;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", current_mode);
+}
+
+static ssize_t adaptive_kbd_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+ int res;
+
+ if (parse_strtoul(buf, LAYFLAT_MODE, &t))
+ return -EINVAL;
+
+ res = adaptive_keyboard_set_mode(t);
+ return (res < 0) ? res : count;
+}
+
+static DEVICE_ATTR_RW(adaptive_kbd_mode);
+
+static struct attribute *adaptive_kbd_attributes[] = {
+ &dev_attr_adaptive_kbd_mode.attr,
+ NULL
+};
+
+static const struct attribute_group adaptive_kbd_attr_group = {
+ .attrs = adaptive_kbd_attributes,
+};
+
/* --------------------------------------------------------------------- */
static struct attribute *hotkey_attributes[] __initdata = {
@@ -3118,6 +3173,13 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+
+ /* No assignments, only used for Adaptive keyboards. */
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
+ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
},
/* Generic keymap for Lenovo ThinkPads */
@@ -3174,6 +3236,35 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* Extra keys in use since the X240 / T440 / T540 */
KEY_CONFIG, KEY_SEARCH, KEY_SCALE, KEY_FILE,
+
+ /*
+ * These are the adaptive keyboard keycodes for Carbon X1 2014.
+ * The first item in this list is the Mute button which is
+ * emitted with 0x103 through
+ * adaptive_keyboard_hotkey_notify_hotkey() when the sound
+ * symbol is held.
+ * We'll need to offset those by 0x20.
+ */
+ KEY_RESERVED, /* Mute held, 0x103 */
+ KEY_BRIGHTNESS_MIN, /* Backlight off */
+ KEY_RESERVED, /* Clipping tool */
+ KEY_RESERVED, /* Cloud */
+ KEY_RESERVED,
+ KEY_VOICECOMMAND, /* Voice */
+ KEY_RESERVED,
+ KEY_RESERVED, /* Gestures */
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_CONFIG, /* Settings */
+ KEY_RESERVED, /* New tab */
+ KEY_REFRESH, /* Reload */
+ KEY_BACK, /* Back */
+ KEY_RESERVED, /* Microphone down */
+ KEY_RESERVED, /* Microphone up */
+ KEY_RESERVED, /* Microphone cancellation */
+ KEY_RESERVED, /* Camera mode */
+ KEY_RESERVED, /* Rotate display, 0x116 */
},
};
@@ -3227,6 +3318,20 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (!tp_features.hotkey)
return 1;
+ /*
+ * Check if we have an adaptive keyboard, like on the
+ * Lenovo Carbon X1 2014 (2nd Gen).
+ */
+ if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
+ if ((hkeyv >> 8) == 2) {
+ tp_features.has_adaptive_kbd = true;
+ res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
+ &adaptive_kbd_attr_group);
+ if (res)
+ goto err_exit;
+ }
+ }
+
quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
ARRAY_SIZE(tpacpi_hotkey_qtable));
@@ -3437,6 +3542,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
err_exit:
delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj);
+ sysfs_remove_group(&tpacpi_pdev->dev.kobj,
+ &adaptive_kbd_attr_group);
+
hotkey_dev_attributes = NULL;
return (res < 0) ? res : 1;
@@ -3449,14 +3557,6 @@ err_exit:
* Will consider support rest of modes in future.
*
*/
-enum ADAPTIVE_KEY_MODE {
- HOME_MODE,
- WEB_BROWSER_MODE,
- WEB_CONFERENCE_MODE,
- FUNCTION_MODE,
- LAYFLAT_MODE
-};
-
static const int adaptive_keyboard_modes[] = {
HOME_MODE,
/* WEB_BROWSER_MODE = 2,
@@ -3466,6 +3566,8 @@ static const int adaptive_keyboard_modes[] = {
#define DFR_CHANGE_ROW 0x101
#define DFR_SHOW_QUICKVIEW_ROW 0x102
+#define FIRST_ADAPTIVE_KEY 0x103
+#define ADAPTIVE_KEY_OFFSET 0x020
/* press Fn key a while second, it will switch to Function Mode. Then
* release Fn key, previous mode be restored.
@@ -3473,6 +3575,32 @@ static const int adaptive_keyboard_modes[] = {
static bool adaptive_keyboard_mode_is_saved;
static int adaptive_keyboard_prev_mode;
+static int adaptive_keyboard_get_mode(void)
+{
+ int mode = 0;
+
+ if (!acpi_evalf(hkey_handle, &mode, "GTRW", "dd", 0)) {
+ pr_err("Cannot read adaptive keyboard mode\n");
+ return -EIO;
+ }
+
+ return mode;
+}
+
+static int adaptive_keyboard_set_mode(int new_mode)
+{
+ if (new_mode < 0 ||
+ new_mode > LAYFLAT_MODE)
+ return -EINVAL;
+
+ if (!acpi_evalf(hkey_handle, NULL, "STRW", "vd", new_mode)) {
+ pr_err("Cannot set adaptive keyboard mode\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int adaptive_keyboard_get_next_mode(int mode)
{
size_t i;
@@ -3493,8 +3621,9 @@ static int adaptive_keyboard_get_next_mode(int mode)
static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode)
{
- u32 current_mode = 0;
+ int current_mode = 0;
int new_mode = 0;
+ int keycode;
switch (scancode) {
case DFR_CHANGE_ROW:
@@ -3502,43 +3631,51 @@ static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode)
new_mode = adaptive_keyboard_prev_mode;
adaptive_keyboard_mode_is_saved = false;
} else {
- if (!acpi_evalf(
- hkey_handle, &current_mode,
- "GTRW", "dd", 0)) {
- pr_err("Cannot read adaptive keyboard mode\n");
+ current_mode = adaptive_keyboard_get_mode();
+ if (current_mode < 0)
return false;
- } else {
- new_mode = adaptive_keyboard_get_next_mode(
- current_mode);
- }
+ new_mode = adaptive_keyboard_get_next_mode(
+ current_mode);
}
- if (!acpi_evalf(hkey_handle, NULL, "STRW", "vd", new_mode)) {
- pr_err("Cannot set adaptive keyboard mode\n");
+ if (adaptive_keyboard_set_mode(new_mode) < 0)
return false;
- }
return true;
case DFR_SHOW_QUICKVIEW_ROW:
- if (!acpi_evalf(hkey_handle,
- &adaptive_keyboard_prev_mode,
- "GTRW", "dd", 0)) {
- pr_err("Cannot read adaptive keyboard mode\n");
+ current_mode = adaptive_keyboard_get_mode();
+ if (current_mode < 0)
return false;
- } else {
- adaptive_keyboard_mode_is_saved = true;
- if (!acpi_evalf(hkey_handle,
- NULL, "STRW", "vd", FUNCTION_MODE)) {
- pr_err("Cannot set adaptive keyboard mode\n");
- return false;
- }
- }
+ adaptive_keyboard_prev_mode = current_mode;
+ adaptive_keyboard_mode_is_saved = true;
+
+ if (adaptive_keyboard_set_mode (FUNCTION_MODE) < 0)
+ return false;
return true;
default:
- return false;
+ if (scancode < FIRST_ADAPTIVE_KEY ||
+ scancode >= FIRST_ADAPTIVE_KEY + TPACPI_HOTKEY_MAP_LEN -
+ ADAPTIVE_KEY_OFFSET) {
+ pr_info("Unhandled adaptive keyboard key: 0x%x\n",
+ scancode);
+ return false;
+ }
+ keycode = hotkey_keycode_map[scancode - FIRST_ADAPTIVE_KEY + ADAPTIVE_KEY_OFFSET];
+ if (keycode != KEY_RESERVED) {
+ mutex_lock(&tpacpi_inputdev_send_mutex);
+
+ input_report_key(tpacpi_inputdev, keycode, 1);
+ input_sync(tpacpi_inputdev);
+
+ input_report_key(tpacpi_inputdev, keycode, 0);
+ input_sync(tpacpi_inputdev);
+
+ mutex_unlock(&tpacpi_inputdev_send_mutex);
+ }
+ return true;
}
}
@@ -3836,28 +3973,21 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
static void hotkey_suspend(void)
{
- int hkeyv;
-
/* Do these on suspend, we get the events on early resume! */
hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE;
hotkey_autosleep_ack = 0;
/* save previous mode of adaptive keyboard of X1 Carbon */
- if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
- if ((hkeyv >> 8) == 2) {
- if (!acpi_evalf(hkey_handle,
- &adaptive_keyboard_prev_mode,
- "GTRW", "dd", 0)) {
- pr_err("Cannot read adaptive keyboard mode.\n");
- }
+ if (tp_features.has_adaptive_kbd) {
+ if (!acpi_evalf(hkey_handle, &adaptive_keyboard_prev_mode,
+ "GTRW", "dd", 0)) {
+ pr_err("Cannot read adaptive keyboard mode.\n");
}
}
}
static void hotkey_resume(void)
{
- int hkeyv;
-
tpacpi_disable_brightness_delay();
if (hotkey_status_set(true) < 0 ||
@@ -3872,14 +4002,10 @@ static void hotkey_resume(void)
hotkey_poll_setup_safe(false);
/* restore previous mode of adapive keyboard of X1 Carbon */
- if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
- if ((hkeyv >> 8) == 2) {
- if (!acpi_evalf(hkey_handle,
- NULL,
- "STRW", "vd",
- adaptive_keyboard_prev_mode)) {
- pr_err("Cannot set adaptive keyboard mode.\n");
- }
+ if (tp_features.has_adaptive_kbd) {
+ if (!acpi_evalf(hkey_handle, NULL, "STRW", "vd",
+ adaptive_keyboard_prev_mode)) {
+ pr_err("Cannot set adaptive keyboard mode.\n");
}
}
}
@@ -4079,9 +4205,7 @@ static ssize_t bluetooth_enable_store(struct device *dev,
attr, buf, count);
}
-static struct device_attribute dev_attr_bluetooth_enable =
- __ATTR(bluetooth_enable, S_IWUSR | S_IRUGO,
- bluetooth_enable_show, bluetooth_enable_store);
+static DEVICE_ATTR_RW(bluetooth_enable);
/* --------------------------------------------------------------------- */
@@ -4269,9 +4393,7 @@ static ssize_t wan_enable_store(struct device *dev,
attr, buf, count);
}
-static struct device_attribute dev_attr_wan_enable =
- __ATTR(wwan_enable, S_IWUSR | S_IRUGO,
- wan_enable_show, wan_enable_store);
+static DEVICE_ATTR_RW(wan_enable);
/* --------------------------------------------------------------------- */
@@ -5048,8 +5170,7 @@ static ssize_t cmos_command_store(struct device *dev,
return (res) ? res : count;
}
-static struct device_attribute dev_attr_cmos_command =
- __ATTR(cmos_command, S_IWUSR, NULL, cmos_command_store);
+static DEVICE_ATTR_WO(cmos_command);
/* --------------------------------------------------------------------- */
@@ -8017,9 +8138,7 @@ static ssize_t fan_pwm1_enable_store(struct device *dev,
return count;
}
-static struct device_attribute dev_attr_fan_pwm1_enable =
- __ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
- fan_pwm1_enable_show, fan_pwm1_enable_store);
+static DEVICE_ATTR_RW(fan_pwm1_enable);
/* sysfs fan pwm1 ------------------------------------------------------ */
static ssize_t fan_pwm1_show(struct device *dev,
@@ -8079,9 +8198,7 @@ static ssize_t fan_pwm1_store(struct device *dev,
return (rc) ? rc : count;
}
-static struct device_attribute dev_attr_fan_pwm1 =
- __ATTR(pwm1, S_IWUSR | S_IRUGO,
- fan_pwm1_show, fan_pwm1_store);
+static DEVICE_ATTR_RW(fan_pwm1);
/* sysfs fan fan1_input ------------------------------------------------ */
static ssize_t fan_fan1_input_show(struct device *dev,
@@ -8098,9 +8215,7 @@ static ssize_t fan_fan1_input_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%u\n", speed);
}
-static struct device_attribute dev_attr_fan_fan1_input =
- __ATTR(fan1_input, S_IRUGO,
- fan_fan1_input_show, NULL);
+static DEVICE_ATTR_RO(fan_fan1_input);
/* sysfs fan fan2_input ------------------------------------------------ */
static ssize_t fan_fan2_input_show(struct device *dev,
@@ -8117,9 +8232,7 @@ static ssize_t fan_fan2_input_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%u\n", speed);
}
-static struct device_attribute dev_attr_fan_fan2_input =
- __ATTR(fan2_input, S_IRUGO,
- fan_fan2_input_show, NULL);
+static DEVICE_ATTR_RO(fan_fan2_input);
/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
@@ -8735,8 +8848,7 @@ static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME);
}
-static struct device_attribute dev_attr_thinkpad_acpi_pdev_name =
- __ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL);
+static DEVICE_ATTR_RO(thinkpad_acpi_pdev_name);
/* --------------------------------------------------------------------- */
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index dbcb7a8915b8..9956b9902bb4 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -51,6 +51,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/uaccess.h>
+#include <acpi/video.h>
MODULE_AUTHOR("John Belmonte");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
@@ -116,6 +117,7 @@ MODULE_LICENSE("GPL");
#define HCI_KBD_ILLUMINATION 0x0095
#define HCI_ECO_MODE 0x0097
#define HCI_ACCELEROMETER2 0x00a6
+#define HCI_SYSTEM_INFO 0xc000
#define SCI_PANEL_POWER_ON 0x010d
#define SCI_ILLUMINATION 0x014e
#define SCI_USB_SLEEP_CHARGE 0x0150
@@ -129,10 +131,13 @@ MODULE_LICENSE("GPL");
#define HCI_ACCEL_MASK 0x7fff
#define HCI_HOTKEY_DISABLE 0x0b
#define HCI_HOTKEY_ENABLE 0x09
+#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
#define HCI_LCD_BRIGHTNESS_BITS 3
#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
#define HCI_LCD_BRIGHTNESS_LEVELS (1 << HCI_LCD_BRIGHTNESS_BITS)
#define HCI_MISC_SHIFT 0x10
+#define HCI_SYSTEM_TYPE1 0x10
+#define HCI_SYSTEM_TYPE2 0x11
#define HCI_VIDEO_OUT_LCD 0x1
#define HCI_VIDEO_OUT_CRT 0x2
#define HCI_VIDEO_OUT_TV 0x4
@@ -147,9 +152,10 @@ MODULE_LICENSE("GPL");
#define SCI_KBD_MODE_OFF 0x10
#define SCI_KBD_TIME_MAX 0x3c001a
#define SCI_USB_CHARGE_MODE_MASK 0xff
-#define SCI_USB_CHARGE_DISABLED 0x30000
-#define SCI_USB_CHARGE_ALTERNATE 0x30009
-#define SCI_USB_CHARGE_AUTO 0x30021
+#define SCI_USB_CHARGE_DISABLED 0x00
+#define SCI_USB_CHARGE_ALTERNATE 0x09
+#define SCI_USB_CHARGE_TYPICAL 0x11
+#define SCI_USB_CHARGE_AUTO 0x21
#define SCI_USB_CHARGE_BAT_MASK 0x7
#define SCI_USB_CHARGE_BAT_LVL_OFF 0x1
#define SCI_USB_CHARGE_BAT_LVL_ON 0x4
@@ -174,6 +180,8 @@ struct toshiba_acpi_dev {
int kbd_mode;
int kbd_time;
int usbsc_bat_level;
+ int usbsc_mode_base;
+ int hotkey_event_type;
unsigned int illumination_supported:1;
unsigned int video_supported:1;
@@ -243,29 +251,6 @@ static const struct key_entry toshiba_acpi_keymap[] = {
{ KE_END, 0 },
};
-/* alternative keymap */
-static const struct dmi_system_id toshiba_alt_keymap_dmi[] = {
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite M840"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Qosmio X75-A"),
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A50-A"),
- },
- },
- {}
-};
-
static const struct key_entry toshiba_acpi_alt_keymap[] = {
{ KE_KEY, 0x157, { KEY_MUTE } },
{ KE_KEY, 0x102, { KEY_ZOOMOUT } },
@@ -281,6 +266,14 @@ static const struct key_entry toshiba_acpi_alt_keymap[] = {
};
/*
+ * List of models which have a broken acpi-video backlight interface and thus
+ * need to use the toshiba (vendor) interface instead.
+ */
+static const struct dmi_system_id toshiba_vendor_backlight_dmi[] = {
+ {}
+};
+
+/*
* Utility
*/
@@ -819,6 +812,54 @@ static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
}
/* Sleep (Charge and Music) utilities support */
+static void toshiba_usb_sleep_charge_available(struct toshiba_acpi_dev *dev)
+{
+ u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+ u32 out[TCI_WORDS];
+ acpi_status status;
+
+ /* Set the feature to "not supported" in case of error */
+ dev->usb_sleep_charge_supported = 0;
+
+ if (!sci_open(dev))
+ return;
+
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+ pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
+ sci_close(dev);
+ return;
+ } else if (out[0] == TOS_NOT_SUPPORTED) {
+ pr_info("USB Sleep and Charge not supported\n");
+ sci_close(dev);
+ return;
+ } else if (out[0] == TOS_SUCCESS) {
+ dev->usbsc_mode_base = out[4];
+ }
+
+ in[5] = SCI_USB_CHARGE_BAT_LVL;
+ status = tci_raw(dev, in, out);
+ if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+ pr_err("ACPI call to get USB Sleep and Charge mode failed\n");
+ sci_close(dev);
+ return;
+ } else if (out[0] == TOS_NOT_SUPPORTED) {
+ pr_info("USB Sleep and Charge not supported\n");
+ sci_close(dev);
+ return;
+ } else if (out[0] == TOS_SUCCESS) {
+ dev->usbsc_bat_level = out[2];
+ /*
+ * If we reach this point, it means that the laptop has support
+ * for this feature and all values are initialized.
+ * Set it as supported.
+ */
+ dev->usb_sleep_charge_supported = 1;
+ }
+
+ sci_close(dev);
+}
+
static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev,
u32 *mode)
{
@@ -934,11 +975,11 @@ static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev,
status = tci_raw(dev, in, out);
sci_close(dev);
if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
- pr_err("ACPI call to get USB S&C battery level failed\n");
+ pr_err("ACPI call to get USB Rapid Charge failed\n");
return -EIO;
} else if (out[0] == TOS_NOT_SUPPORTED ||
out[0] == TOS_INPUT_DATA_ERROR) {
- pr_info("USB Sleep and Charge not supported\n");
+ pr_info("USB Rapid Charge not supported\n");
return -ENODEV;
}
@@ -962,10 +1003,10 @@ static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
status = tci_raw(dev, in, out);
sci_close(dev);
if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
- pr_err("ACPI call to set USB S&C battery level failed\n");
+ pr_err("ACPI call to set USB Rapid Charge failed\n");
return -EIO;
} else if (out[0] == TOS_NOT_SUPPORTED) {
- pr_info("USB Sleep and Charge not supported\n");
+ pr_info("USB Rapid Charge not supported\n");
return -ENODEV;
} else if (out[0] == TOS_INPUT_DATA_ERROR) {
return -EIO;
@@ -984,10 +1025,10 @@ static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state)
result = sci_read(dev, SCI_USB_SLEEP_MUSIC, state);
sci_close(dev);
if (result == TOS_FAILURE) {
- pr_err("ACPI call to set USB S&C mode failed\n");
+ pr_err("ACPI call to get Sleep and Music failed\n");
return -EIO;
} else if (result == TOS_NOT_SUPPORTED) {
- pr_info("USB Sleep and Charge not supported\n");
+ pr_info("Sleep and Music not supported\n");
return -ENODEV;
} else if (result == TOS_INPUT_DATA_ERROR) {
return -EIO;
@@ -1006,10 +1047,10 @@ static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
result = sci_write(dev, SCI_USB_SLEEP_MUSIC, state);
sci_close(dev);
if (result == TOS_FAILURE) {
- pr_err("ACPI call to set USB S&C mode failed\n");
+ pr_err("ACPI call to set Sleep and Music failed\n");
return -EIO;
} else if (result == TOS_NOT_SUPPORTED) {
- pr_info("USB Sleep and Charge not supported\n");
+ pr_info("Sleep and Music not supported\n");
return -ENODEV;
} else if (result == TOS_INPUT_DATA_ERROR) {
return -EIO;
@@ -1149,6 +1190,28 @@ static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state)
return 0;
}
+/* Hotkey Event type */
+static int toshiba_hotkey_event_type_get(struct toshiba_acpi_dev *dev,
+ u32 *type)
+{
+ u32 val1 = 0x03;
+ u32 val2 = 0;
+ u32 result;
+
+ result = hci_read2(dev, HCI_SYSTEM_INFO, &val1, &val2);
+ if (result == TOS_FAILURE) {
+ pr_err("ACPI call to get System type failed\n");
+ return -EIO;
+ } else if (result == TOS_NOT_SUPPORTED) {
+ pr_info("System type not supported\n");
+ return -ENODEV;
+ }
+
+ *type = val2;
+
+ return 0;
+}
+
/* Bluetooth rfkill handlers */
static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present)
@@ -1973,17 +2036,21 @@ static ssize_t usb_sleep_charge_store(struct device *dev,
* 0 - Disabled
* 1 - Alternate (Non USB conformant devices that require more power)
* 2 - Auto (USB conformant devices)
+ * 3 - Typical
*/
- if (state != 0 && state != 1 && state != 2)
+ if (state != 0 && state != 1 && state != 2 && state != 3)
return -EINVAL;
/* Set the USB charging mode to internal value */
+ mode = toshiba->usbsc_mode_base;
if (state == 0)
- mode = SCI_USB_CHARGE_DISABLED;
+ mode |= SCI_USB_CHARGE_DISABLED;
else if (state == 1)
- mode = SCI_USB_CHARGE_ALTERNATE;
+ mode |= SCI_USB_CHARGE_ALTERNATE;
else if (state == 2)
- mode = SCI_USB_CHARGE_AUTO;
+ mode |= SCI_USB_CHARGE_AUTO;
+ else if (state == 3)
+ mode |= SCI_USB_CHARGE_TYPICAL;
ret = toshiba_usb_sleep_charge_set(toshiba, mode);
if (ret)
@@ -2333,6 +2400,20 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
return 0;
}
+static void toshiba_acpi_enable_special_functions(struct toshiba_acpi_dev *dev)
+{
+ u32 result;
+
+ /*
+ * Re-activate the hotkeys, but this time, we are using the
+ * "Special Functions" mode.
+ */
+ result = hci_write1(dev, HCI_HOTKEY_EVENT,
+ HCI_HOTKEY_SPECIAL_FUNCTIONS);
+ if (result != TOS_SUCCESS)
+ pr_err("Could not enable the Special Function mode\n");
+}
+
static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
{
@@ -2434,10 +2515,22 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
{
+ const struct key_entry *keymap = toshiba_acpi_keymap;
acpi_handle ec_handle;
- int error;
+ u32 events_type;
u32 hci_result;
- const struct key_entry *keymap = toshiba_acpi_keymap;
+ int error;
+
+ error = toshiba_acpi_enable_hotkeys(dev);
+ if (error)
+ return error;
+
+ error = toshiba_hotkey_event_type_get(dev, &events_type);
+ if (error) {
+ pr_err("Unable to query Hotkey Event Type\n");
+ return error;
+ }
+ dev->hotkey_event_type = events_type;
dev->hotkey_dev = input_allocate_device();
if (!dev->hotkey_dev)
@@ -2447,8 +2540,14 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
dev->hotkey_dev->phys = "toshiba_acpi/input0";
dev->hotkey_dev->id.bustype = BUS_HOST;
- if (dmi_check_system(toshiba_alt_keymap_dmi))
+ if (events_type == HCI_SYSTEM_TYPE1 ||
+ !dev->kbd_function_keys_supported)
+ keymap = toshiba_acpi_keymap;
+ else if (events_type == HCI_SYSTEM_TYPE2 ||
+ dev->kbd_function_keys_supported)
keymap = toshiba_acpi_alt_keymap;
+ else
+ pr_info("Unknown event type received %x\n", events_type);
error = sparse_keymap_setup(dev->hotkey_dev, keymap, NULL);
if (error)
goto err_free_dev;
@@ -2490,12 +2589,6 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
goto err_remove_filter;
}
- error = toshiba_acpi_enable_hotkeys(dev);
- if (error) {
- pr_info("Unable to enable hotkeys\n");
- goto err_remove_filter;
- }
-
error = input_register_device(dev->hotkey_dev);
if (error) {
pr_info("Unable to register input device\n");
@@ -2541,6 +2634,20 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
ret = get_tr_backlight_status(dev, &enabled);
dev->tr_backlight_supported = !ret;
+ /*
+ * Tell acpi-video-detect code to prefer vendor backlight on all
+ * systems with transflective backlight and on dmi matched systems.
+ */
+ if (dev->tr_backlight_supported ||
+ dmi_check_system(toshiba_vendor_backlight_dmi))
+ acpi_video_dmi_promote_vendor();
+
+ if (acpi_video_backlight_support())
+ return 0;
+
+ /* acpi-video may have loaded before we called dmi_promote_vendor() */
+ acpi_video_unregister_backlight();
+
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -2624,6 +2731,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
{
struct toshiba_acpi_dev *dev;
const char *hci_method;
+ u32 special_functions;
u32 dummy;
bool bt_present;
int ret = 0;
@@ -2648,6 +2756,16 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
acpi_dev->driver_data = dev;
dev_set_drvdata(&acpi_dev->dev, dev);
+ /* Query the BIOS for supported features */
+
+ /*
+ * The "Special Functions" are always supported by the laptops
+ * with the new keyboard layout, query for its presence to help
+ * determine the keymap layout to use.
+ */
+ ret = toshiba_function_keys_get(dev, &special_functions);
+ dev->kbd_function_keys_supported = !ret;
+
if (toshiba_acpi_setup_keyboard(dev))
pr_info("Unable to activate hotkeys\n");
@@ -2716,8 +2834,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
ret = toshiba_accelerometer_supported(dev);
dev->accelerometer_supported = !ret;
- ret = toshiba_usb_sleep_charge_get(dev, &dummy);
- dev->usb_sleep_charge_supported = !ret;
+ toshiba_usb_sleep_charge_available(dev);
ret = toshiba_usb_rapid_charge_get(dev, &dummy);
dev->usb_rapid_charge_supported = !ret;
@@ -2725,23 +2842,25 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
ret = toshiba_usb_sleep_music_get(dev, &dummy);
dev->usb_sleep_music_supported = !ret;
- ret = toshiba_function_keys_get(dev, &dummy);
- dev->kbd_function_keys_supported = !ret;
-
ret = toshiba_panel_power_on_get(dev, &dummy);
dev->panel_power_on_supported = !ret;
ret = toshiba_usb_three_get(dev, &dummy);
dev->usb_three_supported = !ret;
- /* Determine whether or not BIOS supports fan and video interfaces */
-
ret = get_video_status(dev, &dummy);
dev->video_supported = !ret;
ret = get_fan_status(dev, &dummy);
dev->fan_supported = !ret;
+ /*
+ * Enable the "Special Functions" mode only if they are
+ * supported and if they are activated.
+ */
+ if (dev->kbd_function_keys_supported && special_functions)
+ toshiba_acpi_enable_special_functions(dev);
+
ret = sysfs_create_group(&dev->acpi_dev->dev.kobj,
&toshiba_attr_group);
if (ret) {
@@ -2770,6 +2889,21 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
case 0x80: /* Hotkeys and some system events */
toshiba_acpi_process_hotkeys(dev);
break;
+ case 0x81: /* Dock events */
+ case 0x82:
+ case 0x83:
+ pr_info("Dock event received %x\n", event);
+ break;
+ case 0x88: /* Thermal events */
+ pr_info("Thermal event received\n");
+ break;
+ case 0x8f: /* LID closed */
+ case 0x90: /* LID is closed and Dock has been ejected */
+ break;
+ case 0x8c: /* SATA power events */
+ case 0x8b:
+ pr_info("SATA power event received %x\n", event);
+ break;
case 0x92: /* Keyboard backlight mode changed */
/* Update sysfs entries */
ret = sysfs_update_group(&acpi_dev->dev.kobj,
@@ -2777,17 +2911,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
if (ret)
pr_err("Unable to update sysfs entries\n");
break;
- case 0x81: /* Unknown */
- case 0x82: /* Unknown */
- case 0x83: /* Unknown */
- case 0x8c: /* Unknown */
+ case 0x85: /* Unknown */
+ case 0x8d: /* Unknown */
case 0x8e: /* Unknown */
- case 0x8f: /* Unknown */
- case 0x90: /* Unknown */
+ case 0x94: /* Unknown */
+ case 0x95: /* Unknown */
default:
pr_info("Unknown event received %x\n", event);
break;
}
+
+ acpi_bus_generate_netlink_event(acpi_dev->pnp.device_class,
+ dev_name(&acpi_dev->dev),
+ event, 0);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 2cb1ea62b4a7..249800763362 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -2,6 +2,7 @@
* Toshiba Bluetooth Enable Driver
*
* Copyright (C) 2009 Jes Sorensen <Jes.Sorensen@gmail.com>
+ * Copyright (C) 2015 Azael Avalos <coproscefalo@gmail.com>
*
* Thanks to Matthew Garrett for background info on ACPI innards which
* normal people aren't meant to understand :-)
@@ -25,6 +26,10 @@
#include <linux/types.h>
#include <linux/acpi.h>
+#define BT_KILLSWITCH_MASK 0x01
+#define BT_PLUGGED_MASK 0x40
+#define BT_POWER_MASK 0x80
+
MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Bluetooth Enable Driver");
MODULE_LICENSE("GPL");
@@ -57,32 +62,107 @@ static struct acpi_driver toshiba_bt_rfkill_driver = {
.drv.pm = &toshiba_bt_pm,
};
+static int toshiba_bluetooth_present(acpi_handle handle)
+{
+ acpi_status result;
+ u64 bt_present;
+
+ /*
+ * Some Toshiba laptops may have a fake TOS6205 device in
+ * their ACPI BIOS, so query the _STA method to see if there
+ * is really anything there.
+ */
+ result = acpi_evaluate_integer(handle, "_STA", NULL, &bt_present);
+ if (ACPI_FAILURE(result)) {
+ pr_err("ACPI call to query Bluetooth presence failed");
+ return -ENXIO;
+ } else if (!bt_present) {
+ pr_info("Bluetooth device not present\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int toshiba_bluetooth_status(acpi_handle handle)
+{
+ acpi_status result;
+ u64 status;
+
+ result = acpi_evaluate_integer(handle, "BTST", NULL, &status);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not get Bluetooth device status\n");
+ return -ENXIO;
+ }
+
+ pr_info("Bluetooth status %llu\n", status);
+
+ return status;
+}
static int toshiba_bluetooth_enable(acpi_handle handle)
{
- acpi_status res1, res2;
- u64 result;
+ acpi_status result;
+ bool killswitch;
+ bool powered;
+ bool plugged;
+ int status;
/*
* Query ACPI to verify RFKill switch is set to 'on'.
* If not, we return silently, no need to report it as
* an error.
*/
- res1 = acpi_evaluate_integer(handle, "BTST", NULL, &result);
- if (ACPI_FAILURE(res1))
- return res1;
- if (!(result & 0x01))
- return 0;
+ status = toshiba_bluetooth_status(handle);
+ if (status < 0)
+ return status;
+
+ killswitch = (status & BT_KILLSWITCH_MASK) ? true : false;
+ powered = (status & BT_POWER_MASK) ? true : false;
+ plugged = (status & BT_PLUGGED_MASK) ? true : false;
- pr_info("Re-enabling Toshiba Bluetooth\n");
- res1 = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
- res2 = acpi_evaluate_object(handle, "BTPO", NULL, NULL);
- if (!ACPI_FAILURE(res1) || !ACPI_FAILURE(res2))
+ if (!killswitch)
return 0;
+ /*
+ * This check ensures to only enable the device if it is powered
+ * off or detached, as some recent devices somehow pass the killswitch
+ * test, causing a loop enabling/disabling the device, see bug 93911.
+ */
+ if (powered || plugged)
+ return 0;
+
+ result = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not attach USB Bluetooth device\n");
+ return -ENXIO;
+ }
+
+ result = acpi_evaluate_object(handle, "BTPO", NULL, NULL);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not power ON Bluetooth device\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int toshiba_bluetooth_disable(acpi_handle handle)
+{
+ acpi_status result;
+
+ result = acpi_evaluate_object(handle, "BTPF", NULL, NULL);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not power OFF Bluetooth device\n");
+ return -ENXIO;
+ }
- pr_warn("Failed to re-enable Toshiba Bluetooth\n");
+ result = acpi_evaluate_object(handle, "DUSB", NULL, NULL);
+ if (ACPI_FAILURE(result)) {
+ pr_err("Could not detach USB Bluetooth device\n");
+ return -ENXIO;
+ }
- return -ENODEV;
+ return 0;
}
static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
@@ -99,23 +179,18 @@ static int toshiba_bt_resume(struct device *dev)
static int toshiba_bt_rfkill_add(struct acpi_device *device)
{
- acpi_status status;
- u64 bt_present;
- int result = -ENODEV;
+ int result;
- /*
- * Some Toshiba laptops may have a fake TOS6205 device in
- * their ACPI BIOS, so query the _STA method to see if there
- * is really anything there, before trying to enable it.
- */
- status = acpi_evaluate_integer(device->handle, "_STA", NULL,
- &bt_present);
+ result = toshiba_bluetooth_present(device->handle);
+ if (result)
+ return result;
- if (!ACPI_FAILURE(status) && bt_present) {
- pr_info("Detected Toshiba ACPI Bluetooth device - "
- "installing RFKill handler\n");
- result = toshiba_bluetooth_enable(device->handle);
- }
+ pr_info("Toshiba ACPI Bluetooth device driver\n");
+
+ /* Enable the BT device */
+ result = toshiba_bluetooth_enable(device->handle);
+ if (result)
+ return result;
return result;
}
@@ -123,7 +198,7 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device)
static int toshiba_bt_rfkill_remove(struct acpi_device *device)
{
/* clean up */
- return 0;
+ return toshiba_bluetooth_disable(device->handle);
}
module_acpi_driver(toshiba_bt_rfkill_driver);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 737e56d46f61..aac47573f9ed 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -45,7 +45,6 @@ MODULE_LICENSE("GPL");
#define ACPI_WMI_CLASS "wmi"
-static DEFINE_MUTEX(wmi_data_lock);
static LIST_HEAD(wmi_block_list);
struct guid_block {
@@ -240,10 +239,10 @@ static bool find_guid(const char *guid_string, struct wmi_block **out)
if (memcmp(block->guid, guid_input, 16) == 0) {
if (out)
*out = wblock;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index e03877c4b195..fd243231620a 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1064,6 +1064,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
+ RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 810aef3f4c3e..ba34c7d89042 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(of_pwm_get);
* @table: array of consumers to register
* @num: number of consumers in table
*/
-void __init pwm_add_table(struct pwm_lookup *table, size_t num)
+void pwm_add_table(struct pwm_lookup *table, size_t num)
{
mutex_lock(&pwm_lookup_lock);
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 522f7075bb1a..fa5feaba25a5 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -225,6 +225,10 @@ static const struct of_device_id atmel_hlcdc_dt_ids[] = {
.compatible = "atmel,sama5d3-hlcdc",
.data = &atmel_hlcdc_pwm_sama5d3_errata,
},
+ {
+ .compatible = "atmel,sama5d4-hlcdc",
+ .data = &atmel_hlcdc_pwm_sama5d3_errata,
+ },
{ /* sentinel */ },
};
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index f75ecb09d97d..b430811e14f5 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -35,6 +35,10 @@
#define PERIOD_CDIV(div) (((div) & 0x7) << 20)
#define PERIOD_CDIV_MAX 8
+static const unsigned int cdiv[PERIOD_CDIV_MAX] = {
+ 1, 2, 4, 8, 16, 64, 256, 1024
+};
+
struct mxs_pwm_chip {
struct pwm_chip chip;
struct clk *clk;
@@ -54,13 +58,13 @@ static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
rate = clk_get_rate(mxs->clk);
while (1) {
- c = rate / (1 << div);
+ c = rate / cdiv[div];
c = c * period_ns;
do_div(c, 1000000000);
if (c < PERIOD_PERIOD_MAX)
break;
div++;
- if (div > PERIOD_CDIV_MAX)
+ if (div >= PERIOD_CDIV_MAX)
return -EINVAL;
}
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index 3fb775ded0df..34b5c275a92a 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -202,7 +202,7 @@ static const struct pwm_ops pca9685_pwm_ops = {
.owner = THIS_MODULE,
};
-static struct regmap_config pca9685_regmap_i2c_config = {
+static const struct regmap_config pca9685_regmap_i2c_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = PCA9685_NUMREGS,
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 3e9b5835a4af..ff201e1b9219 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -269,12 +269,31 @@ static void pwm_samsung_disable(struct pwm_chip *chip, struct pwm_device *pwm)
spin_unlock_irqrestore(&samsung_pwm_lock, flags);
}
+static void pwm_samsung_manual_update(struct samsung_pwm_chip *chip,
+ struct pwm_device *pwm)
+{
+ unsigned int tcon_chan = to_tcon_channel(pwm->hwpwm);
+ u32 tcon;
+ unsigned long flags;
+
+ spin_lock_irqsave(&samsung_pwm_lock, flags);
+
+ tcon = readl(chip->base + REG_TCON);
+ tcon |= TCON_MANUALUPDATE(tcon_chan);
+ writel(tcon, chip->base + REG_TCON);
+
+ tcon &= ~TCON_MANUALUPDATE(tcon_chan);
+ writel(tcon, chip->base + REG_TCON);
+
+ spin_unlock_irqrestore(&samsung_pwm_lock, flags);
+}
+
static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
struct samsung_pwm_channel *chan = pwm_get_chip_data(pwm);
- u32 tin_ns = chan->tin_ns, tcnt, tcmp;
+ u32 tin_ns = chan->tin_ns, tcnt, tcmp, oldtcmp;
/*
* We currently avoid using 64bit arithmetic by using the
@@ -288,6 +307,7 @@ static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
return 0;
tcnt = readl(our_chip->base + REG_TCNTB(pwm->hwpwm));
+ oldtcmp = readl(our_chip->base + REG_TCMPB(pwm->hwpwm));
/* We need tick count for calculation, not last tick. */
++tcnt;
@@ -335,6 +355,16 @@ static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
writel(tcnt, our_chip->base + REG_TCNTB(pwm->hwpwm));
writel(tcmp, our_chip->base + REG_TCMPB(pwm->hwpwm));
+ /*
+ * In case the PWM is currently at 100% duty cycle, force a manual
+ * update to prevent the signal staying high if the PWM is disabled
+ * shortly afer this update (before it autoreloaded the new values).
+ */
+ if (oldtcmp == (u32) -1) {
+ dev_dbg(our_chip->chip.dev, "Forcing manual update");
+ pwm_samsung_manual_update(our_chip, pwm);
+ }
+
chan->period_ns = period_ns;
chan->tin_ns = tin_ns;
chan->duty_ns = duty_ns;
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 71d7802aa8b4..6f1fa1773e76 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -1201,13 +1201,9 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->vdev.id.vendor = cdev->id.cu_type;
vcdev->vdev.id.device = cdev->id.cu_model;
- if (virtio_device_is_legacy_only(vcdev->vdev.id)) {
- vcdev->revision = 0;
- } else {
- ret = virtio_ccw_set_transport_rev(vcdev);
- if (ret)
- goto out_free;
- }
+ ret = virtio_ccw_set_transport_rev(vcdev);
+ if (ret)
+ goto out_free;
ret = register_virtio_device(&vcdev->vdev);
if (ret) {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 57418258c101..fe8a8d157e22 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3065,7 +3065,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
{
struct qla_hw_data *ha = vha->hw;
struct se_cmd *se_cmd;
- struct target_core_fabric_ops *tfo;
+ const struct target_core_fabric_ops *tfo;
struct qla_tgt_cmd *cmd;
if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index ab4879e12ea7..68c2002e78bf 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -53,9 +53,8 @@
static struct workqueue_struct *tcm_qla2xxx_free_wq;
static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
-static struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
+static const struct target_core_fabric_ops tcm_qla2xxx_ops;
+static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops;
/*
* Parse WWN.
@@ -336,6 +335,14 @@ static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg
return tpg->tpg_attrib.demo_mode_login_only;
}
+static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return tpg->tpg_attrib.fabric_prot_type;
+}
+
static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
struct se_portal_group *se_tpg)
{
@@ -1082,8 +1089,53 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
+static ssize_t tcm_qla2xxx_tpg_show_dynamic_sessions(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ return target_show_dynamic_sessions(se_tpg, page);
+}
+
+TF_TPG_BASE_ATTR_RO(tcm_qla2xxx, dynamic_sessions);
+
+static ssize_t tcm_qla2xxx_tpg_store_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ unsigned long val;
+ int ret = kstrtoul(page, 0, &val);
+
+ if (ret) {
+ pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
+ return ret;
+ }
+ if (val != 0 && val != 1 && val != 3) {
+ pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
+ return -EINVAL;
+ }
+ tpg->tpg_attrib.fabric_prot_type = val;
+
+ return count;
+}
+
+static ssize_t tcm_qla2xxx_tpg_show_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
+}
+TF_TPG_BASE_ATTR(tcm_qla2xxx, fabric_prot_type, S_IRUGO | S_IWUSR);
+
static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
&tcm_qla2xxx_tpg_enable.attr,
+ &tcm_qla2xxx_tpg_dynamic_sessions.attr,
+ &tcm_qla2xxx_tpg_fabric_prot_type.attr,
NULL,
};
@@ -1124,7 +1176,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
tpg->tpg_attrib.cache_dynamic_acls = 1;
tpg->tpg_attrib.demo_mode_login_only = 1;
- ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
+ ret = core_tpg_register(&tcm_qla2xxx_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
@@ -1244,7 +1296,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
tpg->tpg_attrib.cache_dynamic_acls = 1;
tpg->tpg_attrib.demo_mode_login_only = 1;
- ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
+ ret = core_tpg_register(&tcm_qla2xxx_npiv_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
@@ -1560,7 +1612,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
se_sess = transport_init_session_tags(num_tags,
sizeof(struct qla_tgt_cmd),
- TARGET_PROT_NORMAL);
+ TARGET_PROT_ALL);
if (IS_ERR(se_sess)) {
pr_err("Unable to initialize struct se_session\n");
return PTR_ERR(se_sess);
@@ -1934,7 +1986,9 @@ static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
NULL,
};
-static struct target_core_fabric_ops tcm_qla2xxx_ops = {
+static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
+ .module = THIS_MODULE,
+ .name = "qla2xxx",
.get_fabric_name = tcm_qla2xxx_get_fabric_name,
.get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
.tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
@@ -1949,6 +2003,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
tcm_qla2xxx_check_demo_write_protect,
.tpg_check_prod_mode_write_protect =
tcm_qla2xxx_check_prod_write_protect,
+ .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only,
.tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
@@ -1983,9 +2038,15 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
.fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+
+ .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
+ .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs,
+ .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs,
};
-static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+ .module = THIS_MODULE,
+ .name = "qla2xxx_npiv",
.get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
.get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
.tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
@@ -2033,94 +2094,26 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
.fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+
+ .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
+ .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs,
};
static int tcm_qla2xxx_register_configfs(void)
{
- struct target_fabric_configfs *fabric, *npiv_fabric;
int ret;
pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
utsname()->machine);
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
- if (IS_ERR(fabric)) {
- pr_err("target_fabric_configfs_init() failed\n");
- return PTR_ERR(fabric);
- }
- /*
- * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
- */
- fabric->tf_ops = tcm_qla2xxx_ops;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs =
- tcm_qla2xxx_tpg_attrib_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- /*
- * Register the fabric for use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+
+ ret = target_register_template(&tcm_qla2xxx_ops);
+ if (ret)
return ret;
- }
- /*
- * Setup our local pointer to *fabric
- */
- tcm_qla2xxx_fabric_configfs = fabric;
- pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
- /*
- * Register the top level struct config_item_type for NPIV with TCM core
- */
- npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
- if (IS_ERR(npiv_fabric)) {
- pr_err("target_fabric_configfs_init() failed\n");
- ret = PTR_ERR(npiv_fabric);
- goto out_fabric;
- }
- /*
- * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
- */
- npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
- /*
- * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
- */
- npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
- tcm_qla2xxx_npiv_tpg_attrs;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- /*
- * Register the npiv_fabric for use within TCM
- */
- ret = target_fabric_configfs_register(npiv_fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+ ret = target_register_template(&tcm_qla2xxx_npiv_ops);
+ if (ret)
goto out_fabric;
- }
- /*
- * Setup our local pointer to *npiv_fabric
- */
- tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
- pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
WQ_MEM_RECLAIM, 0);
@@ -2140,9 +2133,9 @@ static int tcm_qla2xxx_register_configfs(void)
out_free_wq:
destroy_workqueue(tcm_qla2xxx_free_wq);
out_fabric_npiv:
- target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+ target_unregister_template(&tcm_qla2xxx_npiv_ops);
out_fabric:
- target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+ target_unregister_template(&tcm_qla2xxx_ops);
return ret;
}
@@ -2151,13 +2144,8 @@ static void tcm_qla2xxx_deregister_configfs(void)
destroy_workqueue(tcm_qla2xxx_cmd_wq);
destroy_workqueue(tcm_qla2xxx_free_wq);
- target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
- tcm_qla2xxx_fabric_configfs = NULL;
- pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
-
- target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
- tcm_qla2xxx_npiv_fabric_configfs = NULL;
- pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
+ target_unregister_template(&tcm_qla2xxx_ops);
+ target_unregister_template(&tcm_qla2xxx_npiv_ops);
}
static int __init tcm_qla2xxx_init(void)
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 10c002145648..23295115c9fc 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -33,6 +33,7 @@ struct tcm_qla2xxx_tpg_attrib {
int demo_mode_write_protect;
int prod_mode_write_protect;
int demo_mode_login_only;
+ int fabric_prot_type;
};
struct tcm_qla2xxx_tpg {
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 186924aa4740..f6bac9e77d06 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1023,7 +1023,6 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
}
memset(&cfg, 0, sizeof(cfg));
- cfg.slave_id = id;
cfg.direction = dir;
if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = port_addr;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index e57eec0b2f46..bcc7c635d8e7 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1030,7 +1030,6 @@ static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
}
memset(&cfg, 0, sizeof(cfg));
- cfg.slave_id = id;
cfg.direction = dir;
if (dir == DMA_MEM_TO_DEV) {
cfg.dst_addr = port_addr;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 0e3d8c7add24..b0b96ab31954 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1106,6 +1106,7 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
struct ion_buffer *buffer;
struct dma_buf *dmabuf;
bool valid_handle;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
@@ -1118,8 +1119,12 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
ion_buffer_get(buffer);
mutex_unlock(&client->lock);
- dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR,
- NULL);
+ exp_info.ops = &dma_buf_ops;
+ exp_info.size = buffer->size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = buffer;
+
+ dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ion_buffer_put(buffer);
return dmabuf;
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index fe1fd05423e9..5af01351306d 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -153,7 +153,7 @@ static int ll_ddelete(const struct dentry *de)
CDEBUG(D_DENTRY, "%s dentry %pd (%p, parent %p, inode %p) %s%s\n",
d_lustre_invalid((struct dentry *)de) ? "deleting" : "keeping",
- de, de, de->d_parent, de->d_inode,
+ de, de, de->d_parent, d_inode(de),
d_unhashed(de) ? "" : "hashed,",
list_empty(&de->d_subdirs) ? "" : "subdirs");
@@ -167,8 +167,8 @@ static int ll_ddelete(const struct dentry *de)
#if 0
/* if not ldlm lock for this inode, set i_nlink to 0 so that
* this inode can be recycled later b=20433 */
- if (de->d_inode && !find_cbdata(de->d_inode))
- clear_nlink(de->d_inode);
+ if (d_really_is_positive(de) && !find_cbdata(d_inode(de)))
+ clear_nlink(d_inode(de));
#endif
if (d_lustre_invalid((struct dentry *)de))
@@ -181,7 +181,7 @@ int ll_d_init(struct dentry *de)
LASSERT(de != NULL);
CDEBUG(D_DENTRY, "ldd on dentry %pd (%p) parent %p inode %p refc %d\n",
- de, de, de->d_parent, de->d_inode,
+ de, de, de->d_parent, d_inode(de),
d_count(de));
if (de->d_fsdata == NULL) {
@@ -261,7 +261,7 @@ void ll_invalidate_aliases(struct inode *inode)
ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_u.d_alias) {
CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
dentry, dentry, dentry->d_parent,
- dentry->d_inode, dentry->d_flags);
+ d_inode(dentry), dentry->d_flags);
d_lustre_invalidate(dentry, 0);
}
@@ -309,7 +309,7 @@ void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
static int ll_revalidate_dentry(struct dentry *dentry,
unsigned int lookup_flags)
{
- struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *dir = d_inode(dentry->d_parent);
/*
* if open&create is set, talk to MDS to make sure file is created if
@@ -329,7 +329,7 @@ static int ll_revalidate_dentry(struct dentry *dentry,
if (lookup_flags & LOOKUP_RCU)
return -ECHILD;
- do_statahead_enter(dir, &dentry, dentry->d_inode == NULL);
+ do_statahead_enter(dir, &dentry, d_inode(dentry) == NULL);
ll_statahead_mark(dir, dentry);
return 1;
}
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 529062ea112b..4b44c634fcc3 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -388,7 +388,7 @@ int ll_file_release(struct inode *inode, struct file *file)
static int ll_intent_file_open(struct dentry *dentry, void *lmm,
int lmmsize, struct lookup_intent *itp)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct dentry *parent = dentry->d_parent;
const char *name = dentry->d_name.name;
@@ -413,7 +413,7 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm,
opc = LUSTRE_OPC_CREATE;
}
- op_data = ll_prep_md_op_data(NULL, parent->d_inode,
+ op_data = ll_prep_md_op_data(NULL, d_inode(parent),
inode, name, len,
O_RDWR, opc, NULL);
if (IS_ERR(op_data))
@@ -2896,7 +2896,7 @@ static int ll_inode_revalidate_fini(struct inode *inode, int rc)
static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ptlrpc_request *req = NULL;
struct obd_export *exp;
int rc = 0;
@@ -2948,12 +2948,12 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
do_lookup() -> ll_revalidate_it(). We cannot use d_drop
here to preserve get_cwd functionality on 2.6.
Bug 10503 */
- if (!dentry->d_inode->i_nlink)
+ if (!d_inode(dentry)->i_nlink)
d_lustre_invalidate(dentry, 0);
ll_lookup_finish_locks(&oit, inode);
- } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) {
- struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
+ } else if (!ll_have_md_lock(d_inode(dentry), &ibits, LCK_MINMODE)) {
+ struct ll_sb_info *sbi = ll_i2sbi(d_inode(dentry));
u64 valid = OBD_MD_FLGETATTR;
struct md_op_data *op_data;
int ealen = 0;
@@ -2991,7 +2991,7 @@ out:
static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int rc;
rc = __ll_inode_revalidate(dentry, ibits);
@@ -3019,7 +3019,7 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
int ll_getattr(struct vfsmount *mnt, struct dentry *de, struct kstat *stat)
{
- struct inode *inode = de->d_inode;
+ struct inode *inode = d_inode(de);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_inode_info *lli = ll_i2info(inode);
int res = 0;
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index e7422f5c9c6f..5f918e3c4683 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -1488,7 +1488,7 @@ static inline void d_lustre_invalidate(struct dentry *dentry, int nested)
{
CDEBUG(D_DENTRY, "invalidate dentry %pd (%p) parent %p inode %p refc %d\n",
dentry, dentry,
- dentry->d_parent, dentry->d_inode, d_count(dentry));
+ dentry->d_parent, d_inode(dentry), d_count(dentry));
spin_lock_nested(&dentry->d_lock,
nested ? DENTRY_D_LOCK_NESTED : DENTRY_D_LOCK_NORMAL);
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index bf1ec277a1dc..a27af7882170 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -1166,7 +1166,7 @@ static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
struct md_open_data **mod)
{
struct lustre_md md;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *request = NULL;
int rc, ia_valid;
@@ -1290,7 +1290,7 @@ static int ll_setattr_ost(struct inode *inode, struct iattr *attr)
*/
int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ll_inode_info *lli = ll_i2info(inode);
struct md_op_data *op_data = NULL;
struct md_open_data *mod = NULL;
@@ -1465,7 +1465,7 @@ out:
int ll_setattr(struct dentry *de, struct iattr *attr)
{
- int mode = de->d_inode->i_mode;
+ int mode = d_inode(de)->i_mode;
if ((attr->ia_valid & (ATTR_CTIME|ATTR_SIZE|ATTR_MODE)) ==
(ATTR_CTIME|ATTR_SIZE|ATTR_MODE))
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 243a7840457f..db43b81386f7 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -230,11 +230,11 @@ static int ll_nfs_get_name_filldir(struct dir_context *ctx, const char *name,
static int ll_get_name(struct dentry *dentry, char *name,
struct dentry *child)
{
- struct inode *dir = dentry->d_inode;
+ struct inode *dir = d_inode(dentry);
int rc;
struct ll_getname_data lgd = {
.lgd_name = name,
- .lgd_fid = ll_i2info(child->d_inode)->lli_fid,
+ .lgd_fid = ll_i2info(d_inode(child))->lli_fid,
.ctx.actor = ll_nfs_get_name_filldir,
};
@@ -282,7 +282,7 @@ static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid,
static struct dentry *ll_get_parent(struct dentry *dchild)
{
struct ptlrpc_request *req = NULL;
- struct inode *dir = dchild->d_inode;
+ struct inode *dir = d_inode(dchild);
struct ll_sb_info *sbi;
struct dentry *result = NULL;
struct mdt_body *body;
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 49f1cb067ea2..5a25dcd10126 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -155,7 +155,7 @@ static void ll_invalidate_negative_children(struct inode *dir)
list_for_each_entry_safe(child, tmp_subdir,
&dentry->d_subdirs,
d_child) {
- if (child->d_inode == NULL)
+ if (d_really_is_negative(child))
d_lustre_invalidate(child, 1);
}
}
@@ -392,7 +392,7 @@ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
iput(inode);
CDEBUG(D_DENTRY,
"Reuse dentry %p inode %p refc %d flags %#x\n",
- new, new->d_inode, d_count(new), new->d_flags);
+ new, d_inode(new), d_count(new), new->d_flags);
return new;
}
}
@@ -401,7 +401,7 @@ struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de)
return ERR_PTR(rc);
d_add(de, inode);
CDEBUG(D_DENTRY, "Add dentry %p inode %p refc %d flags %#x\n",
- de, de->d_inode, d_count(de), de->d_flags);
+ de, d_inode(de), d_count(de), de->d_flags);
return de;
}
@@ -448,7 +448,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
!it_disposition(it, DISP_OPEN_CREATE)) {
/* With DISP_OPEN_CREATE dentry will
instantiated in ll_create_it. */
- LASSERT((*de)->d_inode == NULL);
+ LASSERT(d_inode(*de) == NULL);
d_instantiate(*de, inode);
}
@@ -541,7 +541,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
goto out;
}
- inode = dentry->d_inode;
+ inode = d_inode(dentry);
if ((it->it_op & IT_OPEN) && inode &&
!S_ISREG(inode->i_mode) &&
!S_ISDIR(inode->i_mode)) {
@@ -638,9 +638,9 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
*opened |= FILE_CREATED;
}
- if (dentry->d_inode && it_disposition(it, DISP_OPEN_OPEN)) {
+ if (d_really_is_positive(dentry) && it_disposition(it, DISP_OPEN_OPEN)) {
/* Open dentry. */
- if (S_ISFIFO(dentry->d_inode->i_mode)) {
+ if (S_ISFIFO(d_inode(dentry)->i_mode)) {
/* We cannot call open here as it would
* deadlock.
*/
@@ -862,8 +862,8 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
static inline void ll_get_child_fid(struct dentry *child, struct lu_fid *fid)
{
- if (child->d_inode)
- *fid = *ll_inode2fid(child->d_inode);
+ if (d_really_is_positive(child))
+ *fid = *ll_inode2fid(d_inode(child));
}
/**
@@ -1076,7 +1076,7 @@ static int ll_symlink(struct inode *dir, struct dentry *dentry,
static int ll_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
- struct inode *src = old_dentry->d_inode;
+ struct inode *src = d_inode(old_dentry);
struct ll_sb_info *sbi = ll_i2sbi(dir);
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index b75562c6b5de..7f8071242f23 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -880,7 +880,7 @@ static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
struct dentry *dentry)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct lookup_intent it = { .it_op = IT_GETATTR,
.d.lustre.it_lock_handle = 0 };
struct md_enqueue_info *minfo;
@@ -926,7 +926,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
static void ll_statahead_one(struct dentry *parent, const char *entry_name,
int entry_name_len)
{
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct ll_inode_info *lli = ll_i2info(dir);
struct ll_statahead_info *sai = lli->lli_sai;
struct dentry *dentry = NULL;
@@ -944,8 +944,8 @@ static void ll_statahead_one(struct dentry *parent, const char *entry_name,
rc = do_sa_lookup(dir, entry);
} else {
rc = do_sa_revalidate(dir, entry, dentry);
- if (rc == 1 && agl_should_run(sai, dentry->d_inode))
- ll_agl_add(sai, dentry->d_inode, entry->se_index);
+ if (rc == 1 && agl_should_run(sai, d_inode(dentry)))
+ ll_agl_add(sai, d_inode(dentry), entry->se_index);
}
if (dentry != NULL)
@@ -968,7 +968,7 @@ static void ll_statahead_one(struct dentry *parent, const char *entry_name,
static int ll_agl_thread(void *arg)
{
struct dentry *parent = (struct dentry *)arg;
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct ll_inode_info *plli = ll_i2info(dir);
struct ll_inode_info *clli;
struct ll_sb_info *sbi = ll_i2sbi(dir);
@@ -1042,7 +1042,7 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
CDEBUG(D_READA, "start agl thread: sai %p, parent %pd\n",
sai, parent);
- plli = ll_i2info(parent->d_inode);
+ plli = ll_i2info(d_inode(parent));
task = kthread_run(ll_agl_thread, parent,
"ll_agl_%u", plli->lli_opendir_pid);
if (IS_ERR(task)) {
@@ -1059,7 +1059,7 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
static int ll_statahead_thread(void *arg)
{
struct dentry *parent = (struct dentry *)arg;
- struct inode *dir = parent->d_inode;
+ struct inode *dir = d_inode(parent);
struct ll_inode_info *plli = ll_i2info(dir);
struct ll_inode_info *clli;
struct ll_sb_info *sbi = ll_i2sbi(dir);
@@ -1604,7 +1604,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
rc = md_revalidate_lock(ll_i2mdexp(dir), &it,
ll_inode2fid(inode), &bits);
if (rc == 1) {
- if ((*dentryp)->d_inode == NULL) {
+ if (d_inode(*dentryp) == NULL) {
struct dentry *alias;
alias = ll_splice_alias(inode,
@@ -1614,13 +1614,13 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
return PTR_ERR(alias);
}
*dentryp = alias;
- } else if ((*dentryp)->d_inode != inode) {
+ } else if (d_inode(*dentryp) != inode) {
/* revalidate, but inode is recreated */
CDEBUG(D_READA,
"stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
*dentryp,
- (*dentryp)->d_inode->i_ino,
- (*dentryp)->d_inode->i_generation,
+ d_inode(*dentryp)->i_ino,
+ d_inode(*dentryp)->i_generation,
inode->i_ino,
inode->i_generation);
ll_sai_unplug(sai, entry);
@@ -1666,8 +1666,8 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
/* get parent reference count here, and put it in ll_statahead_thread */
parent = dget((*dentryp)->d_parent);
- if (unlikely(sai->sai_inode != parent->d_inode)) {
- struct ll_inode_info *nlli = ll_i2info(parent->d_inode);
+ if (unlikely(sai->sai_inode != d_inode(parent))) {
+ struct ll_inode_info *nlli = ll_i2info(d_inode(parent));
CWARN("Race condition, someone changed %pd just now: old parent "DFID", new parent "DFID"\n",
*dentryp,
@@ -1689,7 +1689,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
ll_sai_get(sai);
lli->lli_sai = sai;
- plli = ll_i2info(parent->d_inode);
+ plli = ll_i2info(d_inode(parent));
rc = PTR_ERR(kthread_run(ll_statahead_thread, parent,
"ll_sa_%u", plli->lli_opendir_pid));
thread = &sai->sai_thread;
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 686b6a574cc5..3711e671a4df 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -120,7 +120,7 @@ failed:
static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
struct ptlrpc_request *request = NULL;
int rc;
char *symname = NULL;
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index b439936b4524..e0fcbe1395fd 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -214,7 +214,7 @@ int ll_setxattr_common(struct inode *inode, const char *name,
int ll_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
LASSERT(inode);
LASSERT(name);
@@ -267,7 +267,7 @@ int ll_setxattr(struct dentry *dentry, const char *name,
int ll_removexattr(struct dentry *dentry, const char *name)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
LASSERT(inode);
LASSERT(name);
@@ -457,7 +457,7 @@ out:
ssize_t ll_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
LASSERT(inode);
LASSERT(name);
@@ -545,7 +545,7 @@ out:
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = d_inode(dentry);
int rc = 0, rc2 = 0;
struct lov_mds_md *lmm = NULL;
struct ptlrpc_request *request = NULL;
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index 81d44c477a5b..257361280510 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -31,12 +31,13 @@ config TCM_PSCSI
Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
passthrough access to Linux/SCSI device
-config TCM_USER
+config TCM_USER2
tristate "TCM/USER Subsystem Plugin for Linux"
depends on UIO && NET
help
Say Y here to enable the TCM/USER subsystem plugin for a userspace
- process to handle requests
+ process to handle requests. This is version 2 of the ABI; version 1
+ is obsolete.
source "drivers/target/loopback/Kconfig"
source "drivers/target/tcm_fc/Kconfig"
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index bbb4a7d638ef..e619c0266a79 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
-obj-$(CONFIG_TCM_USER) += target_core_user.o
+obj-$(CONFIG_TCM_USER2) += target_core_user.o
# Fabric modules
obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
index 13a92403fe3e..0f43be9c3453 100644
--- a/drivers/target/iscsi/Makefile
+++ b/drivers/target/iscsi/Makefile
@@ -1,6 +1,5 @@
iscsi_target_mod-y += iscsi_target_parameters.o \
iscsi_target_seq_pdu_list.o \
- iscsi_target_tq.o \
iscsi_target_auth.o \
iscsi_target_datain_values.o \
iscsi_target_device.o \
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 77d64251af40..34871a628b11 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -33,8 +33,6 @@
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_parameters.h"
#include "iscsi_target_seq_pdu_list.h"
-#include "iscsi_target_tq.h"
-#include "iscsi_target_configfs.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
@@ -537,7 +535,7 @@ static struct iscsit_transport iscsi_target_transport = {
static int __init iscsi_target_init_module(void)
{
- int ret = 0;
+ int ret = 0, size;
pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
@@ -546,24 +544,21 @@ static int __init iscsi_target_init_module(void)
pr_err("Unable to allocate memory for iscsit_global\n");
return -1;
}
+ spin_lock_init(&iscsit_global->ts_bitmap_lock);
mutex_init(&auth_id_lock);
spin_lock_init(&sess_idr_lock);
idr_init(&tiqn_idr);
idr_init(&sess_idr);
- ret = iscsi_target_register_configfs();
- if (ret < 0)
+ ret = target_register_template(&iscsi_ops);
+ if (ret)
goto out;
- ret = iscsi_thread_set_init();
- if (ret < 0)
+ size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
+ iscsit_global->ts_bitmap = vzalloc(size);
+ if (!iscsit_global->ts_bitmap) {
+ pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
goto configfs_out;
-
- if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
- TARGET_THREAD_SET_COUNT) {
- pr_err("iscsi_allocate_thread_sets() returned"
- " unexpected value!\n");
- goto ts_out1;
}
lio_qr_cache = kmem_cache_create("lio_qr_cache",
@@ -572,7 +567,7 @@ static int __init iscsi_target_init_module(void)
if (!lio_qr_cache) {
pr_err("nable to kmem_cache_create() for"
" lio_qr_cache\n");
- goto ts_out2;
+ goto bitmap_out;
}
lio_dr_cache = kmem_cache_create("lio_dr_cache",
@@ -617,12 +612,13 @@ dr_out:
kmem_cache_destroy(lio_dr_cache);
qr_out:
kmem_cache_destroy(lio_qr_cache);
-ts_out2:
- iscsi_deallocate_thread_sets();
-ts_out1:
- iscsi_thread_set_free();
+bitmap_out:
+ vfree(iscsit_global->ts_bitmap);
configfs_out:
- iscsi_target_deregister_configfs();
+ /* XXX: this probably wants it to be it's own unwind step.. */
+ if (iscsit_global->discovery_tpg)
+ iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
+ target_unregister_template(&iscsi_ops);
out:
kfree(iscsit_global);
return -ENOMEM;
@@ -630,8 +626,6 @@ out:
static void __exit iscsi_target_cleanup_module(void)
{
- iscsi_deallocate_thread_sets();
- iscsi_thread_set_free();
iscsit_release_discovery_tpg();
iscsit_unregister_transport(&iscsi_target_transport);
kmem_cache_destroy(lio_qr_cache);
@@ -639,8 +633,15 @@ static void __exit iscsi_target_cleanup_module(void)
kmem_cache_destroy(lio_ooo_cache);
kmem_cache_destroy(lio_r2t_cache);
- iscsi_target_deregister_configfs();
+ /*
+ * Shutdown discovery sessions and disable discovery TPG
+ */
+ if (iscsit_global->discovery_tpg)
+ iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
+ target_unregister_template(&iscsi_ops);
+
+ vfree(iscsit_global->ts_bitmap);
kfree(iscsit_global);
}
@@ -990,7 +991,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
/*
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
*/
- transport_init_se_cmd(&cmd->se_cmd, &lio_target_fabric_configfs->tf_ops,
+ transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
conn->sess->se_sess, be32_to_cpu(hdr->data_length),
cmd->data_direction, sam_task_attr,
cmd->sense_buffer + 2);
@@ -1805,8 +1806,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
u8 tcm_function;
int ret;
- transport_init_se_cmd(&cmd->se_cmd,
- &lio_target_fabric_configfs->tf_ops,
+ transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
@@ -2155,7 +2155,6 @@ reject:
cmd->text_in_ptr = NULL;
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
}
-EXPORT_SYMBOL(iscsit_handle_text_cmd);
int iscsit_logout_closesession(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
@@ -3715,17 +3714,16 @@ static int iscsit_send_reject(
void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
{
- struct iscsi_thread_set *ts = conn->thread_set;
int ord, cpu;
/*
- * thread_id is assigned from iscsit_global->ts_bitmap from
- * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
+ * bitmap_id is assigned from iscsit_global->ts_bitmap from
+ * within iscsit_start_kthreads()
*
- * Here we use thread_id to determine which CPU that this
- * iSCSI connection's iscsi_thread_set will be scheduled to
+ * Here we use bitmap_id to determine which CPU that this
+ * iSCSI connection's RX/TX threads will be scheduled to
* execute upon.
*/
- ord = ts->thread_id % cpumask_weight(cpu_online_mask);
+ ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
for_each_online_cpu(cpu) {
if (ord-- == 0) {
cpumask_set_cpu(cpu, conn->conn_cpumask);
@@ -3914,7 +3912,7 @@ check_rsp_state:
switch (state) {
case ISTATE_SEND_LOGOUTRSP:
if (!iscsit_logout_post_handler(cmd, conn))
- goto restart;
+ return -ECONNRESET;
/* fall through */
case ISTATE_SEND_STATUS:
case ISTATE_SEND_ASYNCMSG:
@@ -3942,8 +3940,6 @@ check_rsp_state:
err:
return -1;
-restart:
- return -EAGAIN;
}
static int iscsit_handle_response_queue(struct iscsi_conn *conn)
@@ -3970,21 +3966,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
int iscsi_target_tx_thread(void *arg)
{
int ret = 0;
- struct iscsi_conn *conn;
- struct iscsi_thread_set *ts = arg;
+ struct iscsi_conn *conn = arg;
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
*/
allow_signal(SIGINT);
-restart:
- conn = iscsi_tx_thread_pre_handler(ts);
- if (!conn)
- goto out;
-
- ret = 0;
-
while (!kthread_should_stop()) {
/*
* Ensure that both TX and RX per connection kthreads
@@ -3993,11 +3981,9 @@ restart:
iscsit_thread_check_cpumask(conn, current, 1);
wait_event_interruptible(conn->queues_wq,
- !iscsit_conn_all_queues_empty(conn) ||
- ts->status == ISCSI_THREAD_SET_RESET);
+ !iscsit_conn_all_queues_empty(conn));
- if ((ts->status == ISCSI_THREAD_SET_RESET) ||
- signal_pending(current))
+ if (signal_pending(current))
goto transport_err;
get_immediate:
@@ -4008,15 +3994,14 @@ get_immediate:
ret = iscsit_handle_response_queue(conn);
if (ret == 1)
goto get_immediate;
- else if (ret == -EAGAIN)
- goto restart;
+ else if (ret == -ECONNRESET)
+ goto out;
else if (ret < 0)
goto transport_err;
}
transport_err:
iscsit_take_action_for_connection_exit(conn);
- goto restart;
out:
return 0;
}
@@ -4111,8 +4096,7 @@ int iscsi_target_rx_thread(void *arg)
int ret;
u8 buffer[ISCSI_HDR_LEN], opcode;
u32 checksum = 0, digest = 0;
- struct iscsi_conn *conn = NULL;
- struct iscsi_thread_set *ts = arg;
+ struct iscsi_conn *conn = arg;
struct kvec iov;
/*
* Allow ourselves to be interrupted by SIGINT so that a
@@ -4120,11 +4104,6 @@ int iscsi_target_rx_thread(void *arg)
*/
allow_signal(SIGINT);
-restart:
- conn = iscsi_rx_thread_pre_handler(ts);
- if (!conn)
- goto out;
-
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
struct completion comp;
int rc;
@@ -4134,7 +4113,7 @@ restart:
if (rc < 0)
goto transport_err;
- goto out;
+ goto transport_err;
}
while (!kthread_should_stop()) {
@@ -4210,8 +4189,6 @@ transport_err:
if (!signal_pending(current))
atomic_set(&conn->transport_failed, 1);
iscsit_take_action_for_connection_exit(conn);
- goto restart;
-out:
return 0;
}
@@ -4273,7 +4250,24 @@ int iscsit_close_connection(
if (conn->conn_transport->transport_type == ISCSI_TCP)
complete(&conn->conn_logout_comp);
- iscsi_release_thread_set(conn);
+ if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
+ if (conn->tx_thread &&
+ cmpxchg(&conn->tx_thread_active, true, false)) {
+ send_sig(SIGINT, conn->tx_thread, 1);
+ kthread_stop(conn->tx_thread);
+ }
+ } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
+ if (conn->rx_thread &&
+ cmpxchg(&conn->rx_thread_active, true, false)) {
+ send_sig(SIGINT, conn->rx_thread, 1);
+ kthread_stop(conn->rx_thread);
+ }
+ }
+
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+ get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
iscsit_stop_timers_for_cmds(conn);
iscsit_stop_nopin_response_timer(conn);
@@ -4383,8 +4377,6 @@ int iscsit_close_connection(
iscsit_put_transport(conn->conn_transport);
- conn->thread_set = NULL;
-
pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
conn->conn_state = TARG_CONN_STATE_FREE;
kfree(conn);
@@ -4551,15 +4543,13 @@ static void iscsit_logout_post_handler_closesession(
struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
-
- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+ int sleep = cmpxchg(&conn->tx_thread_active, true, false);
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
iscsit_dec_conn_usage_count(conn);
- iscsit_stop_session(sess, 1, 1);
+ iscsit_stop_session(sess, sleep, sleep);
iscsit_dec_session_usage_count(sess);
target_put_session(sess->se_sess);
}
@@ -4567,13 +4557,12 @@ static void iscsit_logout_post_handler_closesession(
static void iscsit_logout_post_handler_samecid(
struct iscsi_conn *conn)
{
- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+ int sleep = cmpxchg(&conn->tx_thread_active, true, false);
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
- iscsit_cause_connection_reinstatement(conn, 1);
+ iscsit_cause_connection_reinstatement(conn, sleep);
iscsit_dec_conn_usage_count(conn);
}
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index e936d56fb523..7d0f9c00d9c2 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -35,7 +35,7 @@ extern void iscsit_stop_session(struct iscsi_session *, int, int);
extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
extern struct iscsit_global *iscsit_global;
-extern struct target_fabric_configfs *lio_target_fabric_configfs;
+extern const struct target_core_fabric_ops iscsi_ops;
extern struct kmem_cache *lio_dr_cache;
extern struct kmem_cache *lio_ooo_cache;
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 48384b675e62..469fce44ebad 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -37,9 +37,6 @@
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include <target/iscsi/iscsi_target_stat.h>
-#include "iscsi_target_configfs.h"
-
-struct target_fabric_configfs *lio_target_fabric_configfs;
struct lio_target_configfs_attribute {
struct configfs_attribute attr;
@@ -1052,6 +1049,11 @@ TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
*/
DEF_TPG_ATTRIB(t10_pi);
TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_fabric_prot_type
+ */
+DEF_TPG_ATTRIB(fabric_prot_type);
+TPG_ATTR(fabric_prot_type, S_IRUGO | S_IWUSR);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_authentication.attr,
@@ -1065,6 +1067,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_demo_mode_discovery.attr,
&iscsi_tpg_attrib_default_erl.attr,
&iscsi_tpg_attrib_t10_pi.attr,
+ &iscsi_tpg_attrib_fabric_prot_type.attr,
NULL,
};
@@ -1410,8 +1413,18 @@ out:
TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR);
+static ssize_t lio_target_tpg_show_dynamic_sessions(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ return target_show_dynamic_sessions(se_tpg, page);
+}
+
+TF_TPG_BASE_ATTR_RO(lio_target, dynamic_sessions);
+
static struct configfs_attribute *lio_target_tpg_attrs[] = {
&lio_target_tpg_enable.attr,
+ &lio_target_tpg_dynamic_sessions.attr,
NULL,
};
@@ -1450,10 +1463,8 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
if (!tpg)
return NULL;
- ret = core_tpg_register(
- &lio_target_fabric_configfs->tf_ops,
- wwn, &tpg->tpg_se_tpg, tpg,
- TRANSPORT_TPG_TYPE_NORMAL);
+ ret = core_tpg_register(&iscsi_ops, wwn, &tpg->tpg_se_tpg,
+ tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return NULL;
@@ -1872,6 +1883,20 @@ static int lio_tpg_check_prod_mode_write_protect(
return tpg->tpg_attrib.prod_mode_write_protect;
}
+static int lio_tpg_check_prot_fabric_only(
+ struct se_portal_group *se_tpg)
+{
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
+ /*
+ * Only report fabric_prot_type if t10_pi has also been enabled
+ * for incoming ib_isert sessions.
+ */
+ if (!tpg->tpg_attrib.t10_pi)
+ return 0;
+
+ return tpg->tpg_attrib.fabric_prot_type;
+}
+
static void lio_tpg_release_fabric_acl(
struct se_portal_group *se_tpg,
struct se_node_acl *se_acl)
@@ -1953,115 +1978,60 @@ static void lio_release_cmd(struct se_cmd *se_cmd)
iscsit_release_cmd(cmd);
}
-/* End functions for target_core_fabric_ops */
-
-int iscsi_target_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- lio_target_fabric_configfs = NULL;
- fabric = target_fabric_configfs_init(THIS_MODULE, "iscsi");
- if (IS_ERR(fabric)) {
- pr_err("target_fabric_configfs_init() for"
- " LIO-Target failed!\n");
- return PTR_ERR(fabric);
- }
- /*
- * Setup the fabric API of function pointers used by target_core_mod..
- */
- fabric->tf_ops.get_fabric_name = &iscsi_get_fabric_name;
- fabric->tf_ops.get_fabric_proto_ident = &iscsi_get_fabric_proto_ident;
- fabric->tf_ops.tpg_get_wwn = &lio_tpg_get_endpoint_wwn;
- fabric->tf_ops.tpg_get_tag = &lio_tpg_get_tag;
- fabric->tf_ops.tpg_get_default_depth = &lio_tpg_get_default_depth;
- fabric->tf_ops.tpg_get_pr_transport_id = &iscsi_get_pr_transport_id;
- fabric->tf_ops.tpg_get_pr_transport_id_len =
- &iscsi_get_pr_transport_id_len;
- fabric->tf_ops.tpg_parse_pr_out_transport_id =
- &iscsi_parse_pr_out_transport_id;
- fabric->tf_ops.tpg_check_demo_mode = &lio_tpg_check_demo_mode;
- fabric->tf_ops.tpg_check_demo_mode_cache =
- &lio_tpg_check_demo_mode_cache;
- fabric->tf_ops.tpg_check_demo_mode_write_protect =
- &lio_tpg_check_demo_mode_write_protect;
- fabric->tf_ops.tpg_check_prod_mode_write_protect =
- &lio_tpg_check_prod_mode_write_protect;
- fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
- fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
- fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
- fabric->tf_ops.check_stop_free = &lio_check_stop_free,
- fabric->tf_ops.release_cmd = &lio_release_cmd;
- fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
- fabric->tf_ops.close_session = &lio_tpg_close_session;
- fabric->tf_ops.sess_get_index = &lio_sess_get_index;
- fabric->tf_ops.sess_get_initiator_sid = &lio_sess_get_initiator_sid;
- fabric->tf_ops.write_pending = &lio_write_pending;
- fabric->tf_ops.write_pending_status = &lio_write_pending_status;
- fabric->tf_ops.set_default_node_attributes =
- &lio_set_default_node_attributes;
- fabric->tf_ops.get_task_tag = &iscsi_get_task_tag;
- fabric->tf_ops.get_cmd_state = &iscsi_get_cmd_state;
- fabric->tf_ops.queue_data_in = &lio_queue_data_in;
- fabric->tf_ops.queue_status = &lio_queue_status;
- fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
- fabric->tf_ops.aborted_task = &lio_aborted_task;
- /*
- * Setup function pointers for generic logic in target_core_fabric_configfs.c
- */
- fabric->tf_ops.fabric_make_wwn = &lio_target_call_coreaddtiqn;
- fabric->tf_ops.fabric_drop_wwn = &lio_target_call_coredeltiqn;
- fabric->tf_ops.fabric_make_tpg = &lio_target_tiqn_addtpg;
- fabric->tf_ops.fabric_drop_tpg = &lio_target_tiqn_deltpg;
- fabric->tf_ops.fabric_post_link = NULL;
- fabric->tf_ops.fabric_pre_unlink = NULL;
- fabric->tf_ops.fabric_make_np = &lio_target_call_addnptotpg;
- fabric->tf_ops.fabric_drop_np = &lio_target_call_delnpfromtpg;
- fabric->tf_ops.fabric_make_nodeacl = &lio_target_make_nodeacl;
- fabric->tf_ops.fabric_drop_nodeacl = &lio_target_drop_nodeacl;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- * sturct config_item_type's
- */
- fabric->tf_cit_tmpl.tfc_discovery_cit.ct_attrs = lio_target_discovery_auth_attrs;
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = lio_target_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = lio_target_tpg_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = lio_target_tpg_attrib_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_auth_cit.ct_attrs = lio_target_tpg_auth_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = lio_target_tpg_param_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = lio_target_portal_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = lio_target_initiator_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = lio_target_nacl_attrib_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = lio_target_nacl_auth_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = lio_target_nacl_param_attrs;
-
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() for"
- " LIO-Target failed!\n");
- target_fabric_configfs_free(fabric);
- return ret;
- }
-
- lio_target_fabric_configfs = fabric;
- pr_debug("LIO_TARGET[0] - Set fabric ->"
- " lio_target_fabric_configfs\n");
- return 0;
-}
-
-
-void iscsi_target_deregister_configfs(void)
-{
- if (!lio_target_fabric_configfs)
- return;
- /*
- * Shutdown discovery sessions and disable discovery TPG
- */
- if (iscsit_global->discovery_tpg)
- iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
-
- target_fabric_configfs_deregister(lio_target_fabric_configfs);
- lio_target_fabric_configfs = NULL;
- pr_debug("LIO_TARGET[0] - Cleared"
- " lio_target_fabric_configfs\n");
-}
+const struct target_core_fabric_ops iscsi_ops = {
+ .module = THIS_MODULE,
+ .name = "iscsi",
+ .get_fabric_name = iscsi_get_fabric_name,
+ .get_fabric_proto_ident = iscsi_get_fabric_proto_ident,
+ .tpg_get_wwn = lio_tpg_get_endpoint_wwn,
+ .tpg_get_tag = lio_tpg_get_tag,
+ .tpg_get_default_depth = lio_tpg_get_default_depth,
+ .tpg_get_pr_transport_id = iscsi_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = iscsi_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = iscsi_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = lio_tpg_check_demo_mode,
+ .tpg_check_demo_mode_cache = lio_tpg_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ lio_tpg_check_demo_mode_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ lio_tpg_check_prod_mode_write_protect,
+ .tpg_check_prot_fabric_only = &lio_tpg_check_prot_fabric_only,
+ .tpg_alloc_fabric_acl = lio_tpg_alloc_fabric_acl,
+ .tpg_release_fabric_acl = lio_tpg_release_fabric_acl,
+ .tpg_get_inst_index = lio_tpg_get_inst_index,
+ .check_stop_free = lio_check_stop_free,
+ .release_cmd = lio_release_cmd,
+ .shutdown_session = lio_tpg_shutdown_session,
+ .close_session = lio_tpg_close_session,
+ .sess_get_index = lio_sess_get_index,
+ .sess_get_initiator_sid = lio_sess_get_initiator_sid,
+ .write_pending = lio_write_pending,
+ .write_pending_status = lio_write_pending_status,
+ .set_default_node_attributes = lio_set_default_node_attributes,
+ .get_task_tag = iscsi_get_task_tag,
+ .get_cmd_state = iscsi_get_cmd_state,
+ .queue_data_in = lio_queue_data_in,
+ .queue_status = lio_queue_status,
+ .queue_tm_rsp = lio_queue_tm_rsp,
+ .aborted_task = lio_aborted_task,
+ .fabric_make_wwn = lio_target_call_coreaddtiqn,
+ .fabric_drop_wwn = lio_target_call_coredeltiqn,
+ .fabric_make_tpg = lio_target_tiqn_addtpg,
+ .fabric_drop_tpg = lio_target_tiqn_deltpg,
+ .fabric_make_np = lio_target_call_addnptotpg,
+ .fabric_drop_np = lio_target_call_delnpfromtpg,
+ .fabric_make_nodeacl = lio_target_make_nodeacl,
+ .fabric_drop_nodeacl = lio_target_drop_nodeacl,
+
+ .tfc_discovery_attrs = lio_target_discovery_auth_attrs,
+ .tfc_wwn_attrs = lio_target_wwn_attrs,
+ .tfc_tpg_base_attrs = lio_target_tpg_attrs,
+ .tfc_tpg_attrib_attrs = lio_target_tpg_attrib_attrs,
+ .tfc_tpg_auth_attrs = lio_target_tpg_auth_attrs,
+ .tfc_tpg_param_attrs = lio_target_tpg_param_attrs,
+ .tfc_tpg_np_base_attrs = lio_target_portal_attrs,
+ .tfc_tpg_nacl_base_attrs = lio_target_initiator_attrs,
+ .tfc_tpg_nacl_attrib_attrs = lio_target_nacl_attrib_attrs,
+ .tfc_tpg_nacl_auth_attrs = lio_target_nacl_auth_attrs,
+ .tfc_tpg_nacl_param_attrs = lio_target_nacl_param_attrs,
+};
diff --git a/drivers/target/iscsi/iscsi_target_configfs.h b/drivers/target/iscsi/iscsi_target_configfs.h
deleted file mode 100644
index 8cd5a63c4edc..000000000000
--- a/drivers/target/iscsi/iscsi_target_configfs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef ISCSI_TARGET_CONFIGFS_H
-#define ISCSI_TARGET_CONFIGFS_H
-
-extern int iscsi_target_register_configfs(void);
-extern void iscsi_target_deregister_configfs(void);
-
-#endif /* ISCSI_TARGET_CONFIGFS_H */
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index bdd8731a4daa..959a14c9dd5d 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -23,7 +23,6 @@
#include <target/iscsi/iscsi_target_core.h>
#include "iscsi_target_seq_pdu_list.h"
-#include "iscsi_target_tq.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
@@ -860,7 +859,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
}
spin_unlock_bh(&conn->state_lock);
- iscsi_thread_set_force_reinstatement(conn);
+ if (conn->tx_thread && conn->tx_thread_active)
+ send_sig(SIGINT, conn->tx_thread, 1);
+ if (conn->rx_thread && conn->rx_thread_active)
+ send_sig(SIGINT, conn->rx_thread, 1);
sleep:
wait_for_completion(&conn->conn_wait_rcfr_comp);
@@ -885,10 +887,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
return;
}
- if (iscsi_thread_set_force_reinstatement(conn) < 0) {
- spin_unlock_bh(&conn->state_lock);
- return;
- }
+ if (conn->tx_thread && conn->tx_thread_active)
+ send_sig(SIGINT, conn->tx_thread, 1);
+ if (conn->rx_thread && conn->rx_thread_active)
+ send_sig(SIGINT, conn->rx_thread, 1);
atomic_set(&conn->connection_reinstatement, 1);
if (!sleep) {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 153fb66ac1b8..8ce94ff744e6 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -26,7 +26,6 @@
#include <target/iscsi/iscsi_target_core.h>
#include <target/iscsi/iscsi_target_stat.h>
-#include "iscsi_target_tq.h"
#include "iscsi_target_device.h"
#include "iscsi_target_nego.h"
#include "iscsi_target_erl0.h"
@@ -699,6 +698,51 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
iscsit_start_nopin_timer(conn);
}
+static int iscsit_start_kthreads(struct iscsi_conn *conn)
+{
+ int ret = 0;
+
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
+ ISCSIT_BITMAP_BITS, get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
+
+ if (conn->bitmap_id < 0) {
+ pr_err("bitmap_find_free_region() failed for"
+ " iscsit_start_kthreads()\n");
+ return -ENOMEM;
+ }
+
+ conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
+ "%s", ISCSI_TX_THREAD_NAME);
+ if (IS_ERR(conn->tx_thread)) {
+ pr_err("Unable to start iscsi_target_tx_thread\n");
+ ret = PTR_ERR(conn->tx_thread);
+ goto out_bitmap;
+ }
+ conn->tx_thread_active = true;
+
+ conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
+ "%s", ISCSI_RX_THREAD_NAME);
+ if (IS_ERR(conn->rx_thread)) {
+ pr_err("Unable to start iscsi_target_rx_thread\n");
+ ret = PTR_ERR(conn->rx_thread);
+ goto out_tx;
+ }
+ conn->rx_thread_active = true;
+
+ return 0;
+out_tx:
+ kthread_stop(conn->tx_thread);
+ conn->tx_thread_active = false;
+out_bitmap:
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+ get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
+ return ret;
+}
+
int iscsi_post_login_handler(
struct iscsi_np *np,
struct iscsi_conn *conn,
@@ -709,7 +753,7 @@ int iscsi_post_login_handler(
struct se_session *se_sess = sess->se_sess;
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
- struct iscsi_thread_set *ts;
+ int rc;
iscsit_inc_conn_usage_count(conn);
@@ -724,7 +768,6 @@ int iscsi_post_login_handler(
/*
* SCSI Initiator -> SCSI Target Port Mapping
*/
- ts = iscsi_get_thread_set();
if (!zero_tsih) {
iscsi_set_session_parameters(sess->sess_ops,
conn->param_list, 0);
@@ -751,9 +794,11 @@ int iscsi_post_login_handler(
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);
- iscsi_post_login_start_timers(conn);
+ rc = iscsit_start_kthreads(conn);
+ if (rc)
+ return rc;
- iscsi_activate_thread_set(conn, ts);
+ iscsi_post_login_start_timers(conn);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
* are scheduled on the same CPU.
@@ -810,8 +855,11 @@ int iscsi_post_login_handler(
" iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
spin_unlock_bh(&se_tpg->session_lock);
+ rc = iscsit_start_kthreads(conn);
+ if (rc)
+ return rc;
+
iscsi_post_login_start_timers(conn);
- iscsi_activate_thread_set(conn, ts);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
* are scheduled on the same CPU.
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index bdd127c0e3ae..e8a240818353 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -68,10 +68,8 @@ int iscsit_load_discovery_tpg(void)
return -1;
}
- ret = core_tpg_register(
- &lio_target_fabric_configfs->tf_ops,
- NULL, &tpg->tpg_se_tpg, tpg,
- TRANSPORT_TPG_TYPE_DISCOVERY);
+ ret = core_tpg_register(&iscsi_ops, NULL, &tpg->tpg_se_tpg,
+ tpg, TRANSPORT_TPG_TYPE_DISCOVERY);
if (ret < 0) {
kfree(tpg);
return -1;
@@ -228,6 +226,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
a->default_erl = TA_DEFAULT_ERL;
a->t10_pi = TA_DEFAULT_T10_PI;
+ a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -878,3 +877,21 @@ int iscsit_ta_t10_pi(
return 0;
}
+
+int iscsit_ta_fabric_prot_type(
+ struct iscsi_portal_group *tpg,
+ u32 prot_type)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((prot_type != 0) && (prot_type != 1) && (prot_type != 3)) {
+ pr_err("Illegal value for fabric_prot_type: %u\n", prot_type);
+ return -EINVAL;
+ }
+
+ a->fabric_prot_type = prot_type;
+ pr_debug("iSCSI_TPG[%hu] - T10 Fabric Protection Type: %u\n",
+ tpg->tpgt, prot_type);
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index e7265337bc43..95ff5bdecd71 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -39,5 +39,6 @@ extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
deleted file mode 100644
index 26aa50996473..000000000000
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ /dev/null
@@ -1,495 +0,0 @@
-/*******************************************************************************
- * This file contains the iSCSI Login Thread and Thread Queue functions.
- *
- * (c) Copyright 2007-2013 Datera, Inc.
- *
- * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- ******************************************************************************/
-
-#include <linux/kthread.h>
-#include <linux/list.h>
-#include <linux/bitmap.h>
-
-#include <target/iscsi/iscsi_target_core.h>
-#include "iscsi_target_tq.h"
-#include "iscsi_target.h"
-
-static LIST_HEAD(inactive_ts_list);
-static DEFINE_SPINLOCK(inactive_ts_lock);
-static DEFINE_SPINLOCK(ts_bitmap_lock);
-
-static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
-{
- if (!list_empty(&ts->ts_list)) {
- WARN_ON(1);
- return;
- }
- spin_lock(&inactive_ts_lock);
- list_add_tail(&ts->ts_list, &inactive_ts_list);
- iscsit_global->inactive_ts++;
- spin_unlock(&inactive_ts_lock);
-}
-
-static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
-{
- struct iscsi_thread_set *ts;
-
- spin_lock(&inactive_ts_lock);
- if (list_empty(&inactive_ts_list)) {
- spin_unlock(&inactive_ts_lock);
- return NULL;
- }
-
- ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
-
- list_del_init(&ts->ts_list);
- iscsit_global->inactive_ts--;
- spin_unlock(&inactive_ts_lock);
-
- return ts;
-}
-
-int iscsi_allocate_thread_sets(u32 thread_pair_count)
-{
- int allocated_thread_pair_count = 0, i, thread_id;
- struct iscsi_thread_set *ts = NULL;
-
- for (i = 0; i < thread_pair_count; i++) {
- ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL);
- if (!ts) {
- pr_err("Unable to allocate memory for"
- " thread set.\n");
- return allocated_thread_pair_count;
- }
- /*
- * Locate the next available regision in the thread_set_bitmap
- */
- spin_lock(&ts_bitmap_lock);
- thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
- iscsit_global->ts_bitmap_count, get_order(1));
- spin_unlock(&ts_bitmap_lock);
- if (thread_id < 0) {
- pr_err("bitmap_find_free_region() failed for"
- " thread_set_bitmap\n");
- kfree(ts);
- return allocated_thread_pair_count;
- }
-
- ts->thread_id = thread_id;
- ts->status = ISCSI_THREAD_SET_FREE;
- INIT_LIST_HEAD(&ts->ts_list);
- spin_lock_init(&ts->ts_state_lock);
- init_completion(&ts->rx_restart_comp);
- init_completion(&ts->tx_restart_comp);
- init_completion(&ts->rx_start_comp);
- init_completion(&ts->tx_start_comp);
- sema_init(&ts->ts_activate_sem, 0);
-
- ts->create_threads = 1;
- ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s",
- ISCSI_TX_THREAD_NAME);
- if (IS_ERR(ts->tx_thread)) {
- dump_stack();
- pr_err("Unable to start iscsi_target_tx_thread\n");
- break;
- }
-
- ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s",
- ISCSI_RX_THREAD_NAME);
- if (IS_ERR(ts->rx_thread)) {
- kthread_stop(ts->tx_thread);
- pr_err("Unable to start iscsi_target_rx_thread\n");
- break;
- }
- ts->create_threads = 0;
-
- iscsi_add_ts_to_inactive_list(ts);
- allocated_thread_pair_count++;
- }
-
- pr_debug("Spawned %d thread set(s) (%d total threads).\n",
- allocated_thread_pair_count, allocated_thread_pair_count * 2);
- return allocated_thread_pair_count;
-}
-
-static void iscsi_deallocate_thread_one(struct iscsi_thread_set *ts)
-{
- spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_DIE;
-
- if (ts->rx_thread) {
- complete(&ts->rx_start_comp);
- spin_unlock_bh(&ts->ts_state_lock);
- kthread_stop(ts->rx_thread);
- spin_lock_bh(&ts->ts_state_lock);
- }
- if (ts->tx_thread) {
- complete(&ts->tx_start_comp);
- spin_unlock_bh(&ts->ts_state_lock);
- kthread_stop(ts->tx_thread);
- spin_lock_bh(&ts->ts_state_lock);
- }
- spin_unlock_bh(&ts->ts_state_lock);
- /*
- * Release this thread_id in the thread_set_bitmap
- */
- spin_lock(&ts_bitmap_lock);
- bitmap_release_region(iscsit_global->ts_bitmap,
- ts->thread_id, get_order(1));
- spin_unlock(&ts_bitmap_lock);
-
- kfree(ts);
-}
-
-void iscsi_deallocate_thread_sets(void)
-{
- struct iscsi_thread_set *ts = NULL;
- u32 released_count = 0;
-
- while ((ts = iscsi_get_ts_from_inactive_list())) {
-
- iscsi_deallocate_thread_one(ts);
- released_count++;
- }
-
- if (released_count)
- pr_debug("Stopped %d thread set(s) (%d total threads)."
- "\n", released_count, released_count * 2);
-}
-
-static void iscsi_deallocate_extra_thread_sets(void)
-{
- u32 orig_count, released_count = 0;
- struct iscsi_thread_set *ts = NULL;
-
- orig_count = TARGET_THREAD_SET_COUNT;
-
- while ((iscsit_global->inactive_ts + 1) > orig_count) {
- ts = iscsi_get_ts_from_inactive_list();
- if (!ts)
- break;
-
- iscsi_deallocate_thread_one(ts);
- released_count++;
- }
-
- if (released_count)
- pr_debug("Stopped %d thread set(s) (%d total threads)."
- "\n", released_count, released_count * 2);
-}
-
-void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
-{
- spin_lock_bh(&ts->ts_state_lock);
- conn->thread_set = ts;
- ts->conn = conn;
- ts->status = ISCSI_THREAD_SET_ACTIVE;
- spin_unlock_bh(&ts->ts_state_lock);
-
- complete(&ts->rx_start_comp);
- complete(&ts->tx_start_comp);
-
- down(&ts->ts_activate_sem);
-}
-
-struct iscsi_thread_set *iscsi_get_thread_set(void)
-{
- struct iscsi_thread_set *ts;
-
-get_set:
- ts = iscsi_get_ts_from_inactive_list();
- if (!ts) {
- iscsi_allocate_thread_sets(1);
- goto get_set;
- }
-
- ts->delay_inactive = 1;
- ts->signal_sent = 0;
- ts->thread_count = 2;
- init_completion(&ts->rx_restart_comp);
- init_completion(&ts->tx_restart_comp);
- sema_init(&ts->ts_activate_sem, 0);
-
- return ts;
-}
-
-void iscsi_set_thread_clear(struct iscsi_conn *conn, u8 thread_clear)
-{
- struct iscsi_thread_set *ts = NULL;
-
- if (!conn->thread_set) {
- pr_err("struct iscsi_conn->thread_set is NULL\n");
- return;
- }
- ts = conn->thread_set;
-
- spin_lock_bh(&ts->ts_state_lock);
- ts->thread_clear &= ~thread_clear;
-
- if ((thread_clear & ISCSI_CLEAR_RX_THREAD) &&
- (ts->blocked_threads & ISCSI_BLOCK_RX_THREAD))
- complete(&ts->rx_restart_comp);
- else if ((thread_clear & ISCSI_CLEAR_TX_THREAD) &&
- (ts->blocked_threads & ISCSI_BLOCK_TX_THREAD))
- complete(&ts->tx_restart_comp);
- spin_unlock_bh(&ts->ts_state_lock);
-}
-
-void iscsi_set_thread_set_signal(struct iscsi_conn *conn, u8 signal_sent)
-{
- struct iscsi_thread_set *ts = NULL;
-
- if (!conn->thread_set) {
- pr_err("struct iscsi_conn->thread_set is NULL\n");
- return;
- }
- ts = conn->thread_set;
-
- spin_lock_bh(&ts->ts_state_lock);
- ts->signal_sent |= signal_sent;
- spin_unlock_bh(&ts->ts_state_lock);
-}
-
-int iscsi_release_thread_set(struct iscsi_conn *conn)
-{
- int thread_called = 0;
- struct iscsi_thread_set *ts = NULL;
-
- if (!conn || !conn->thread_set) {
- pr_err("connection or thread set pointer is NULL\n");
- BUG();
- }
- ts = conn->thread_set;
-
- spin_lock_bh(&ts->ts_state_lock);
- ts->status = ISCSI_THREAD_SET_RESET;
-
- if (!strncmp(current->comm, ISCSI_RX_THREAD_NAME,
- strlen(ISCSI_RX_THREAD_NAME)))
- thread_called = ISCSI_RX_THREAD;
- else if (!strncmp(current->comm, ISCSI_TX_THREAD_NAME,
- strlen(ISCSI_TX_THREAD_NAME)))
- thread_called = ISCSI_TX_THREAD;
-
- if (ts->rx_thread && (thread_called == ISCSI_TX_THREAD) &&
- (ts->thread_clear & ISCSI_CLEAR_RX_THREAD)) {
-
- if (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD)) {
- send_sig(SIGINT, ts->rx_thread, 1);
- ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
- }
- ts->blocked_threads |= ISCSI_BLOCK_RX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
- wait_for_completion(&ts->rx_restart_comp);
- spin_lock_bh(&ts->ts_state_lock);
- ts->blocked_threads &= ~ISCSI_BLOCK_RX_THREAD;
- }
- if (ts->tx_thread && (thread_called == ISCSI_RX_THREAD) &&
- (ts->thread_clear & ISCSI_CLEAR_TX_THREAD)) {
-
- if (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD)) {
- send_sig(SIGINT, ts->tx_thread, 1);
- ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
- }
- ts->blocked_threads |= ISCSI_BLOCK_TX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
- wait_for_completion(&ts->tx_restart_comp);
- spin_lock_bh(&ts->ts_state_lock);
- ts->blocked_threads &= ~ISCSI_BLOCK_TX_THREAD;
- }
-
- ts->conn = NULL;
- ts->status = ISCSI_THREAD_SET_FREE;
- spin_unlock_bh(&ts->ts_state_lock);
-
- return 0;
-}
-
-int iscsi_thread_set_force_reinstatement(struct iscsi_conn *conn)
-{
- struct iscsi_thread_set *ts;
-
- if (!conn->thread_set)
- return -1;
- ts = conn->thread_set;
-
- spin_lock_bh(&ts->ts_state_lock);
- if (ts->status != ISCSI_THREAD_SET_ACTIVE) {
- spin_unlock_bh(&ts->ts_state_lock);
- return -1;
- }
-
- if (ts->tx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_TX_THREAD))) {
- send_sig(SIGINT, ts->tx_thread, 1);
- ts->signal_sent |= ISCSI_SIGNAL_TX_THREAD;
- }
- if (ts->rx_thread && (!(ts->signal_sent & ISCSI_SIGNAL_RX_THREAD))) {
- send_sig(SIGINT, ts->rx_thread, 1);
- ts->signal_sent |= ISCSI_SIGNAL_RX_THREAD;
- }
- spin_unlock_bh(&ts->ts_state_lock);
-
- return 0;
-}
-
-static void iscsi_check_to_add_additional_sets(void)
-{
- int thread_sets_add;
-
- spin_lock(&inactive_ts_lock);
- thread_sets_add = iscsit_global->inactive_ts;
- spin_unlock(&inactive_ts_lock);
- if (thread_sets_add == 1)
- iscsi_allocate_thread_sets(1);
-}
-
-static int iscsi_signal_thread_pre_handler(struct iscsi_thread_set *ts)
-{
- spin_lock_bh(&ts->ts_state_lock);
- if (ts->status == ISCSI_THREAD_SET_DIE || kthread_should_stop() ||
- signal_pending(current)) {
- spin_unlock_bh(&ts->ts_state_lock);
- return -1;
- }
- spin_unlock_bh(&ts->ts_state_lock);
-
- return 0;
-}
-
-struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
-{
- int ret;
-
- spin_lock_bh(&ts->ts_state_lock);
- if (ts->create_threads) {
- spin_unlock_bh(&ts->ts_state_lock);
- goto sleep;
- }
-
- if (ts->status != ISCSI_THREAD_SET_DIE)
- flush_signals(current);
-
- if (ts->delay_inactive && (--ts->thread_count == 0)) {
- spin_unlock_bh(&ts->ts_state_lock);
-
- if (!iscsit_global->in_shutdown)
- iscsi_deallocate_extra_thread_sets();
-
- iscsi_add_ts_to_inactive_list(ts);
- spin_lock_bh(&ts->ts_state_lock);
- }
-
- if ((ts->status == ISCSI_THREAD_SET_RESET) &&
- (ts->thread_clear & ISCSI_CLEAR_RX_THREAD))
- complete(&ts->rx_restart_comp);
-
- ts->thread_clear &= ~ISCSI_CLEAR_RX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
-sleep:
- ret = wait_for_completion_interruptible(&ts->rx_start_comp);
- if (ret != 0)
- return NULL;
-
- if (iscsi_signal_thread_pre_handler(ts) < 0)
- return NULL;
-
- iscsi_check_to_add_additional_sets();
-
- spin_lock_bh(&ts->ts_state_lock);
- if (!ts->conn) {
- pr_err("struct iscsi_thread_set->conn is NULL for"
- " RX thread_id: %s/%d\n", current->comm, current->pid);
- spin_unlock_bh(&ts->ts_state_lock);
- return NULL;
- }
- ts->thread_clear |= ISCSI_CLEAR_RX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
-
- up(&ts->ts_activate_sem);
-
- return ts->conn;
-}
-
-struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
-{
- int ret;
-
- spin_lock_bh(&ts->ts_state_lock);
- if (ts->create_threads) {
- spin_unlock_bh(&ts->ts_state_lock);
- goto sleep;
- }
-
- if (ts->status != ISCSI_THREAD_SET_DIE)
- flush_signals(current);
-
- if (ts->delay_inactive && (--ts->thread_count == 0)) {
- spin_unlock_bh(&ts->ts_state_lock);
-
- if (!iscsit_global->in_shutdown)
- iscsi_deallocate_extra_thread_sets();
-
- iscsi_add_ts_to_inactive_list(ts);
- spin_lock_bh(&ts->ts_state_lock);
- }
- if ((ts->status == ISCSI_THREAD_SET_RESET) &&
- (ts->thread_clear & ISCSI_CLEAR_TX_THREAD))
- complete(&ts->tx_restart_comp);
-
- ts->thread_clear &= ~ISCSI_CLEAR_TX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
-sleep:
- ret = wait_for_completion_interruptible(&ts->tx_start_comp);
- if (ret != 0)
- return NULL;
-
- if (iscsi_signal_thread_pre_handler(ts) < 0)
- return NULL;
-
- iscsi_check_to_add_additional_sets();
-
- spin_lock_bh(&ts->ts_state_lock);
- if (!ts->conn) {
- pr_err("struct iscsi_thread_set->conn is NULL for"
- " TX thread_id: %s/%d\n", current->comm, current->pid);
- spin_unlock_bh(&ts->ts_state_lock);
- return NULL;
- }
- ts->thread_clear |= ISCSI_CLEAR_TX_THREAD;
- spin_unlock_bh(&ts->ts_state_lock);
-
- up(&ts->ts_activate_sem);
-
- return ts->conn;
-}
-
-int iscsi_thread_set_init(void)
-{
- int size;
-
- iscsit_global->ts_bitmap_count = ISCSI_TS_BITMAP_BITS;
-
- size = BITS_TO_LONGS(iscsit_global->ts_bitmap_count) * sizeof(long);
- iscsit_global->ts_bitmap = kzalloc(size, GFP_KERNEL);
- if (!iscsit_global->ts_bitmap) {
- pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void iscsi_thread_set_free(void)
-{
- kfree(iscsit_global->ts_bitmap);
-}
diff --git a/drivers/target/iscsi/iscsi_target_tq.h b/drivers/target/iscsi/iscsi_target_tq.h
deleted file mode 100644
index cc1eede5ab3a..000000000000
--- a/drivers/target/iscsi/iscsi_target_tq.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#ifndef ISCSI_THREAD_QUEUE_H
-#define ISCSI_THREAD_QUEUE_H
-
-/*
- * Defines for thread sets.
- */
-extern int iscsi_thread_set_force_reinstatement(struct iscsi_conn *);
-extern int iscsi_allocate_thread_sets(u32);
-extern void iscsi_deallocate_thread_sets(void);
-extern void iscsi_activate_thread_set(struct iscsi_conn *, struct iscsi_thread_set *);
-extern struct iscsi_thread_set *iscsi_get_thread_set(void);
-extern void iscsi_set_thread_clear(struct iscsi_conn *, u8);
-extern void iscsi_set_thread_set_signal(struct iscsi_conn *, u8);
-extern int iscsi_release_thread_set(struct iscsi_conn *);
-extern struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *);
-extern struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *);
-extern int iscsi_thread_set_init(void);
-extern void iscsi_thread_set_free(void);
-
-extern int iscsi_target_tx_thread(void *);
-extern int iscsi_target_rx_thread(void *);
-
-#define TARGET_THREAD_SET_COUNT 4
-
-#define ISCSI_RX_THREAD 1
-#define ISCSI_TX_THREAD 2
-#define ISCSI_RX_THREAD_NAME "iscsi_trx"
-#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
-#define ISCSI_BLOCK_RX_THREAD 0x1
-#define ISCSI_BLOCK_TX_THREAD 0x2
-#define ISCSI_CLEAR_RX_THREAD 0x1
-#define ISCSI_CLEAR_TX_THREAD 0x2
-#define ISCSI_SIGNAL_RX_THREAD 0x1
-#define ISCSI_SIGNAL_TX_THREAD 0x2
-
-/* struct iscsi_thread_set->status */
-#define ISCSI_THREAD_SET_FREE 1
-#define ISCSI_THREAD_SET_ACTIVE 2
-#define ISCSI_THREAD_SET_DIE 3
-#define ISCSI_THREAD_SET_RESET 4
-#define ISCSI_THREAD_SET_DEALLOCATE_THREADS 5
-
-/* By default allow a maximum of 32K iSCSI connections */
-#define ISCSI_TS_BITMAP_BITS 32768
-
-struct iscsi_thread_set {
- /* flags used for blocking and restarting sets */
- int blocked_threads;
- /* flag for creating threads */
- int create_threads;
- /* flag for delaying readding to inactive list */
- int delay_inactive;
- /* status for thread set */
- int status;
- /* which threads have had signals sent */
- int signal_sent;
- /* flag for which threads exited first */
- int thread_clear;
- /* Active threads in the thread set */
- int thread_count;
- /* Unique thread ID */
- u32 thread_id;
- /* pointer to connection if set is active */
- struct iscsi_conn *conn;
- /* used for controlling ts state accesses */
- spinlock_t ts_state_lock;
- /* used for restarting thread queue */
- struct completion rx_restart_comp;
- /* used for restarting thread queue */
- struct completion tx_restart_comp;
- /* used for normal unused blocking */
- struct completion rx_start_comp;
- /* used for normal unused blocking */
- struct completion tx_start_comp;
- /* OS descriptor for rx thread */
- struct task_struct *rx_thread;
- /* OS descriptor for tx thread */
- struct task_struct *tx_thread;
- /* struct iscsi_thread_set in list list head*/
- struct list_head ts_list;
- struct semaphore ts_activate_sem;
-};
-
-#endif /*** ISCSI_THREAD_QUEUE_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 390df8ed72b2..b18edda3e8af 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -33,7 +33,6 @@
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_tpg.h"
-#include "iscsi_target_tq.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index c36bd7c29136..51f0c895c6a5 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -41,8 +41,7 @@
#define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *tcm_loop_fabric_configfs;
+static const struct target_core_fabric_ops loop_ops;
static struct workqueue_struct *tcm_loop_workqueue;
static struct kmem_cache *tcm_loop_cmd_cache;
@@ -108,7 +107,7 @@ static struct device_driver tcm_loop_driverfs = {
/*
* Used with root_device_register() in tcm_loop_alloc_core_bus() below
*/
-struct device *tcm_loop_primary;
+static struct device *tcm_loop_primary;
static void tcm_loop_submission_work(struct work_struct *work)
{
@@ -697,6 +696,13 @@ static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg
return 0;
}
+static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
+ tl_se_tpg);
+ return tl_tpg->tl_fabric_prot_type;
+}
+
static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
struct se_portal_group *se_tpg)
{
@@ -912,6 +918,46 @@ static void tcm_loop_port_unlink(
/* End items for tcm_loop_port_cit */
+static ssize_t tcm_loop_tpg_attrib_show_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
+ tl_se_tpg);
+
+ return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
+}
+
+static ssize_t tcm_loop_tpg_attrib_store_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
+ tl_se_tpg);
+ unsigned long val;
+ int ret = kstrtoul(page, 0, &val);
+
+ if (ret) {
+ pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
+ return ret;
+ }
+ if (val != 0 && val != 1 && val != 3) {
+ pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
+ return -EINVAL;
+ }
+ tl_tpg->tl_fabric_prot_type = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(tcm_loop, fabric_prot_type, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
+ &tcm_loop_tpg_attrib_fabric_prot_type.attr,
+ NULL,
+};
+
/* Start items for tcm_loop_nexus_cit */
static int tcm_loop_make_nexus(
@@ -937,7 +983,8 @@ static int tcm_loop_make_nexus(
/*
* Initialize the struct se_session pointer
*/
- tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);
+ tl_nexus->se_sess = transport_init_session(
+ TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
if (IS_ERR(tl_nexus->se_sess)) {
ret = PTR_ERR(tl_nexus->se_sess);
goto out;
@@ -1165,21 +1212,19 @@ static struct se_portal_group *tcm_loop_make_naa_tpg(
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
struct tcm_loop_tpg *tl_tpg;
- char *tpgt_str, *end_ptr;
int ret;
- unsigned short int tpgt;
+ unsigned long tpgt;
- tpgt_str = strstr(name, "tpgt_");
- if (!tpgt_str) {
+ if (strstr(name, "tpgt_") != name) {
pr_err("Unable to locate \"tpgt_#\" directory"
" group\n");
return ERR_PTR(-EINVAL);
}
- tpgt_str += 5; /* Skip ahead of "tpgt_" */
- tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
+ if (kstrtoul(name+5, 10, &tpgt))
+ return ERR_PTR(-EINVAL);
if (tpgt >= TL_TPGS_PER_HBA) {
- pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
+ pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
" %u\n", tpgt, TL_TPGS_PER_HBA);
return ERR_PTR(-EINVAL);
}
@@ -1189,14 +1234,13 @@ static struct se_portal_group *tcm_loop_make_naa_tpg(
/*
* Register the tl_tpg as a emulated SAS TCM Target Endpoint
*/
- ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
- wwn, &tl_tpg->tl_se_tpg, tl_tpg,
+ ret = core_tpg_register(&loop_ops, wwn, &tl_tpg->tl_se_tpg, tl_tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return ERR_PTR(-ENOMEM);
pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
- " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
+ " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
return &tl_tpg->tl_se_tpg;
@@ -1338,127 +1382,51 @@ static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
/* End items for tcm_loop_cit */
-static int tcm_loop_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
- /*
- * Set the TCM Loop HBA counter to zero
- */
- tcm_loop_hba_no_cnt = 0;
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
- if (IS_ERR(fabric)) {
- pr_err("tcm_loop_register_configfs() failed!\n");
- return PTR_ERR(fabric);
- }
- /*
- * Setup the fabric API of function pointers used by target_core_mod
- */
- fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
- fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
- fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
- fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
- fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
- fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
- fabric->tf_ops.tpg_get_pr_transport_id_len =
- &tcm_loop_get_pr_transport_id_len;
- fabric->tf_ops.tpg_parse_pr_out_transport_id =
- &tcm_loop_parse_pr_out_transport_id;
- fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
- fabric->tf_ops.tpg_check_demo_mode_cache =
- &tcm_loop_check_demo_mode_cache;
- fabric->tf_ops.tpg_check_demo_mode_write_protect =
- &tcm_loop_check_demo_mode_write_protect;
- fabric->tf_ops.tpg_check_prod_mode_write_protect =
- &tcm_loop_check_prod_mode_write_protect;
- /*
- * The TCM loopback fabric module runs in demo-mode to a local
- * virtual SCSI device, so fabric dependent initator ACLs are
- * not required.
- */
- fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
- fabric->tf_ops.tpg_release_fabric_acl =
- &tcm_loop_tpg_release_fabric_acl;
- fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
- /*
- * Used for setting up remaining TCM resources in process context
- */
- fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
- fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
- fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
- fabric->tf_ops.close_session = &tcm_loop_close_session;
- fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
- fabric->tf_ops.sess_get_initiator_sid = NULL;
- fabric->tf_ops.write_pending = &tcm_loop_write_pending;
- fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
- /*
- * Not used for TCM loopback
- */
- fabric->tf_ops.set_default_node_attributes =
- &tcm_loop_set_default_node_attributes;
- fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
- fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
- fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
- fabric->tf_ops.queue_status = &tcm_loop_queue_status;
- fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
- fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
-
- /*
- * Setup function pointers for generic logic in target_core_fabric_configfs.c
- */
- fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
- fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
- fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
- fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
- /*
- * fabric_post_link() and fabric_pre_unlink() are used for
- * registration and release of TCM Loop Virtual SCSI LUNs.
- */
- fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
- fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
- fabric->tf_ops.fabric_make_np = NULL;
- fabric->tf_ops.fabric_drop_np = NULL;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- /*
- * Once fabric->tf_ops has been setup, now register the fabric for
- * use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() for"
- " TCM_Loop failed!\n");
- target_fabric_configfs_free(fabric);
- return -1;
- }
- /*
- * Setup our local pointer to *fabric.
- */
- tcm_loop_fabric_configfs = fabric;
- pr_debug("TCM_LOOP[0] - Set fabric ->"
- " tcm_loop_fabric_configfs\n");
- return 0;
-}
-
-static void tcm_loop_deregister_configfs(void)
-{
- if (!tcm_loop_fabric_configfs)
- return;
-
- target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
- tcm_loop_fabric_configfs = NULL;
- pr_debug("TCM_LOOP[0] - Cleared"
- " tcm_loop_fabric_configfs\n");
-}
+static const struct target_core_fabric_ops loop_ops = {
+ .module = THIS_MODULE,
+ .name = "loopback",
+ .get_fabric_name = tcm_loop_get_fabric_name,
+ .get_fabric_proto_ident = tcm_loop_get_fabric_proto_ident,
+ .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
+ .tpg_get_tag = tcm_loop_get_tag,
+ .tpg_get_default_depth = tcm_loop_get_default_depth,
+ .tpg_get_pr_transport_id = tcm_loop_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = tcm_loop_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = tcm_loop_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = tcm_loop_check_demo_mode,
+ .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ tcm_loop_check_demo_mode_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ tcm_loop_check_prod_mode_write_protect,
+ .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
+ .tpg_alloc_fabric_acl = tcm_loop_tpg_alloc_fabric_acl,
+ .tpg_release_fabric_acl = tcm_loop_tpg_release_fabric_acl,
+ .tpg_get_inst_index = tcm_loop_get_inst_index,
+ .check_stop_free = tcm_loop_check_stop_free,
+ .release_cmd = tcm_loop_release_cmd,
+ .shutdown_session = tcm_loop_shutdown_session,
+ .close_session = tcm_loop_close_session,
+ .sess_get_index = tcm_loop_sess_get_index,
+ .write_pending = tcm_loop_write_pending,
+ .write_pending_status = tcm_loop_write_pending_status,
+ .set_default_node_attributes = tcm_loop_set_default_node_attributes,
+ .get_task_tag = tcm_loop_get_task_tag,
+ .get_cmd_state = tcm_loop_get_cmd_state,
+ .queue_data_in = tcm_loop_queue_data_in,
+ .queue_status = tcm_loop_queue_status,
+ .queue_tm_rsp = tcm_loop_queue_tm_rsp,
+ .aborted_task = tcm_loop_aborted_task,
+ .fabric_make_wwn = tcm_loop_make_scsi_hba,
+ .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
+ .fabric_make_tpg = tcm_loop_make_naa_tpg,
+ .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
+ .fabric_post_link = tcm_loop_port_link,
+ .fabric_pre_unlink = tcm_loop_port_unlink,
+ .tfc_wwn_attrs = tcm_loop_wwn_attrs,
+ .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
+ .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
+};
static int __init tcm_loop_fabric_init(void)
{
@@ -1482,7 +1450,7 @@ static int __init tcm_loop_fabric_init(void)
if (ret)
goto out_destroy_cache;
- ret = tcm_loop_register_configfs();
+ ret = target_register_template(&loop_ops);
if (ret)
goto out_release_core_bus;
@@ -1500,7 +1468,7 @@ out:
static void __exit tcm_loop_fabric_exit(void)
{
- tcm_loop_deregister_configfs();
+ target_unregister_template(&loop_ops);
tcm_loop_release_core_bus();
kmem_cache_destroy(tcm_loop_cmd_cache);
destroy_workqueue(tcm_loop_workqueue);
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 6ae49f272ba6..1e72ff77cac9 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -43,6 +43,7 @@ struct tcm_loop_nacl {
struct tcm_loop_tpg {
unsigned short tl_tpgt;
unsigned short tl_transport_status;
+ enum target_prot_type tl_fabric_prot_type;
atomic_t tl_tpg_port_count;
struct se_portal_group tl_se_tpg;
struct tcm_loop_hba *tl_hba;
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 9512af6a8114..18b0f9703ff2 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -42,8 +42,7 @@
#include "sbp_target.h"
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *sbp_fabric_configfs;
+static const struct target_core_fabric_ops sbp_ops;
/* FireWire address region for management and command block address handlers */
static const struct fw_address_region sbp_register_region = {
@@ -2215,8 +2214,7 @@ static struct se_portal_group *sbp_make_tpg(
goto out_free_tpg;
}
- ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn,
- &tpg->se_tpg, (void *)tpg,
+ ret = core_tpg_register(&sbp_ops, wwn, &tpg->se_tpg, tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
goto out_unreg_mgt_agt;
@@ -2503,7 +2501,9 @@ static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
NULL,
};
-static struct target_core_fabric_ops sbp_ops = {
+static const struct target_core_fabric_ops sbp_ops = {
+ .module = THIS_MODULE,
+ .name = "sbp",
.get_fabric_name = sbp_get_fabric_name,
.get_fabric_proto_ident = sbp_get_fabric_proto_ident,
.tpg_get_wwn = sbp_get_fabric_wwn,
@@ -2544,68 +2544,20 @@ static struct target_core_fabric_ops sbp_ops = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = sbp_make_nodeacl,
.fabric_drop_nodeacl = sbp_drop_nodeacl,
-};
-
-static int sbp_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- fabric = target_fabric_configfs_init(THIS_MODULE, "sbp");
- if (IS_ERR(fabric)) {
- pr_err("target_fabric_configfs_init() failed\n");
- return PTR_ERR(fabric);
- }
-
- fabric->tf_ops = sbp_ops;
-
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
-
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() failed for SBP\n");
- return ret;
- }
- sbp_fabric_configfs = fabric;
-
- return 0;
-};
-
-static void sbp_deregister_configfs(void)
-{
- if (!sbp_fabric_configfs)
- return;
-
- target_fabric_configfs_deregister(sbp_fabric_configfs);
- sbp_fabric_configfs = NULL;
+ .tfc_wwn_attrs = sbp_wwn_attrs,
+ .tfc_tpg_base_attrs = sbp_tpg_base_attrs,
+ .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
};
static int __init sbp_init(void)
{
- int ret;
-
- ret = sbp_register_configfs();
- if (ret < 0)
- return ret;
-
- return 0;
+ return target_register_template(&sbp_ops);
};
static void __exit sbp_exit(void)
{
- sbp_deregister_configfs();
+ target_unregister_template(&sbp_ops);
};
MODULE_DESCRIPTION("FireWire SBP fabric driver");
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 75d89adfccc0..ddaf76a4ac2a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -142,8 +142,8 @@ static struct config_group *target_core_register_fabric(
tf = target_core_get_fabric(name);
if (!tf) {
- pr_err("target_core_register_fabric() trying autoload for %s\n",
- name);
+ pr_debug("target_core_register_fabric() trying autoload for %s\n",
+ name);
/*
* Below are some hardcoded request_module() calls to automatically
@@ -165,8 +165,8 @@ static struct config_group *target_core_register_fabric(
*/
ret = request_module("iscsi_target_mod");
if (ret < 0) {
- pr_err("request_module() failed for"
- " iscsi_target_mod.ko: %d\n", ret);
+ pr_debug("request_module() failed for"
+ " iscsi_target_mod.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
} else if (!strncmp(name, "loopback", 8)) {
@@ -178,8 +178,8 @@ static struct config_group *target_core_register_fabric(
*/
ret = request_module("tcm_loop");
if (ret < 0) {
- pr_err("request_module() failed for"
- " tcm_loop.ko: %d\n", ret);
+ pr_debug("request_module() failed for"
+ " tcm_loop.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
}
@@ -188,8 +188,8 @@ static struct config_group *target_core_register_fabric(
}
if (!tf) {
- pr_err("target_core_get_fabric() failed for %s\n",
- name);
+ pr_debug("target_core_get_fabric() failed for %s\n",
+ name);
return ERR_PTR(-EINVAL);
}
pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
@@ -300,81 +300,17 @@ struct configfs_subsystem *target_core_subsystem[] = {
// Start functions called by external Target Fabrics Modules
//############################################################################*/
-/*
- * First function called by fabric modules to:
- *
- * 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
- * 2) Add struct target_fabric_configfs to g_tf_list
- * 3) Return struct target_fabric_configfs to fabric module to be passed
- * into target_fabric_configfs_register().
- */
-struct target_fabric_configfs *target_fabric_configfs_init(
- struct module *fabric_mod,
- const char *name)
+static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
{
- struct target_fabric_configfs *tf;
-
- if (!(name)) {
- pr_err("Unable to locate passed fabric name\n");
- return ERR_PTR(-EINVAL);
+ if (!tfo->name) {
+ pr_err("Missing tfo->name\n");
+ return -EINVAL;
}
- if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
+ if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) {
pr_err("Passed name: %s exceeds TARGET_FABRIC"
- "_NAME_SIZE\n", name);
- return ERR_PTR(-EINVAL);
+ "_NAME_SIZE\n", tfo->name);
+ return -EINVAL;
}
-
- tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
- if (!tf)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&tf->tf_list);
- atomic_set(&tf->tf_access_cnt, 0);
- /*
- * Setup the default generic struct config_item_type's (cits) in
- * struct target_fabric_configfs->tf_cit_tmpl
- */
- tf->tf_module = fabric_mod;
- target_fabric_setup_cits(tf);
-
- tf->tf_subsys = target_core_subsystem[0];
- snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
-
- mutex_lock(&g_tf_lock);
- list_add_tail(&tf->tf_list, &g_tf_list);
- mutex_unlock(&g_tf_lock);
-
- pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
- ">>>>>>>>>>>>>>\n");
- pr_debug("Initialized struct target_fabric_configfs: %p for"
- " %s\n", tf, tf->tf_name);
- return tf;
-}
-EXPORT_SYMBOL(target_fabric_configfs_init);
-
-/*
- * Called by fabric plugins after FAILED target_fabric_configfs_register() call.
- */
-void target_fabric_configfs_free(
- struct target_fabric_configfs *tf)
-{
- mutex_lock(&g_tf_lock);
- list_del(&tf->tf_list);
- mutex_unlock(&g_tf_lock);
-
- kfree(tf);
-}
-EXPORT_SYMBOL(target_fabric_configfs_free);
-
-/*
- * Perform a sanity check of the passed tf->tf_ops before completing
- * TCM fabric module registration.
- */
-static int target_fabric_tf_ops_check(
- struct target_fabric_configfs *tf)
-{
- struct target_core_fabric_ops *tfo = &tf->tf_ops;
-
if (!tfo->get_fabric_name) {
pr_err("Missing tfo->get_fabric_name()\n");
return -EINVAL;
@@ -508,77 +444,59 @@ static int target_fabric_tf_ops_check(
return 0;
}
-/*
- * Called 2nd from fabric module with returned parameter of
- * struct target_fabric_configfs * from target_fabric_configfs_init().
- *
- * Upon a successful registration, the new fabric's struct config_item is
- * return. Also, a pointer to this struct is set in the passed
- * struct target_fabric_configfs.
- */
-int target_fabric_configfs_register(
- struct target_fabric_configfs *tf)
+int target_register_template(const struct target_core_fabric_ops *fo)
{
+ struct target_fabric_configfs *tf;
int ret;
+ ret = target_fabric_tf_ops_check(fo);
+ if (ret)
+ return ret;
+
+ tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
if (!tf) {
- pr_err("Unable to locate target_fabric_configfs"
- " pointer\n");
- return -EINVAL;
- }
- if (!tf->tf_subsys) {
- pr_err("Unable to target struct config_subsystem"
- " pointer\n");
- return -EINVAL;
+ pr_err("%s: could not allocate memory!\n", __func__);
+ return -ENOMEM;
}
- ret = target_fabric_tf_ops_check(tf);
- if (ret < 0)
- return ret;
- pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
- ">>>>>>>>>>\n");
+ INIT_LIST_HEAD(&tf->tf_list);
+ atomic_set(&tf->tf_access_cnt, 0);
+
+ /*
+ * Setup the default generic struct config_item_type's (cits) in
+ * struct target_fabric_configfs->tf_cit_tmpl
+ */
+ tf->tf_module = fo->module;
+ tf->tf_subsys = target_core_subsystem[0];
+ snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name);
+
+ tf->tf_ops = *fo;
+ target_fabric_setup_cits(tf);
+
+ mutex_lock(&g_tf_lock);
+ list_add_tail(&tf->tf_list, &g_tf_list);
+ mutex_unlock(&g_tf_lock);
+
return 0;
}
-EXPORT_SYMBOL(target_fabric_configfs_register);
+EXPORT_SYMBOL(target_register_template);
-void target_fabric_configfs_deregister(
- struct target_fabric_configfs *tf)
+void target_unregister_template(const struct target_core_fabric_ops *fo)
{
- struct configfs_subsystem *su;
+ struct target_fabric_configfs *t;
- if (!tf) {
- pr_err("Unable to locate passed target_fabric_"
- "configfs\n");
- return;
- }
- su = tf->tf_subsys;
- if (!su) {
- pr_err("Unable to locate passed tf->tf_subsys"
- " pointer\n");
- return;
- }
- pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
- ">>>>>>>>>>>>\n");
mutex_lock(&g_tf_lock);
- if (atomic_read(&tf->tf_access_cnt)) {
- mutex_unlock(&g_tf_lock);
- pr_err("Non zero tf->tf_access_cnt for fabric %s\n",
- tf->tf_name);
- BUG();
+ list_for_each_entry(t, &g_tf_list, tf_list) {
+ if (!strcmp(t->tf_name, fo->name)) {
+ BUG_ON(atomic_read(&t->tf_access_cnt));
+ list_del(&t->tf_list);
+ kfree(t);
+ break;
+ }
}
- list_del(&tf->tf_list);
mutex_unlock(&g_tf_lock);
-
- pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
- " %s\n", tf->tf_name);
- tf->tf_module = NULL;
- tf->tf_subsys = NULL;
- kfree(tf);
-
- pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
- ">>>>>\n");
}
-EXPORT_SYMBOL(target_fabric_configfs_deregister);
+EXPORT_SYMBOL(target_unregister_template);
/*##############################################################################
// Stop functions called by external Target Fabrics Modules
@@ -945,7 +863,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
struct se_lun *lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg;
- struct target_core_fabric_ops *tfo;
+ const struct target_core_fabric_ops *tfo;
ssize_t len = 0;
spin_lock(&dev->dev_reservation_lock);
@@ -979,7 +897,7 @@ SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
struct se_device *dev, char *page)
{
- struct target_core_fabric_ops *tfo;
+ const struct target_core_fabric_ops *tfo;
struct t10_pr_registration *pr_reg;
unsigned char buf[384];
char i_buf[PR_REG_ISID_ID_LEN];
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 0c3f90130b7d..1f7886bb16bf 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -56,6 +56,20 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf)
pr_debug("Setup generic %s\n", __stringify(_name)); \
}
+#define TF_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
+static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
+{ \
+ struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
+ struct config_item_type *cit = &tfc->tfc_##_name##_cit; \
+ struct configfs_attribute **attrs = tf->tf_ops.tfc_##_name##_attrs; \
+ \
+ cit->ct_item_ops = _item_ops; \
+ cit->ct_group_ops = _group_ops; \
+ cit->ct_attrs = attrs; \
+ cit->ct_owner = tf->tf_module; \
+ pr_debug("Setup generic %s\n", __stringify(_name)); \
+}
+
/* Start of tfc_tpg_mappedlun_cit */
static int target_fabric_mappedlun_link(
@@ -278,7 +292,7 @@ static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
.store_attribute = target_fabric_nacl_attrib_attr_store,
};
-TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL);
/* End of tfc_tpg_nacl_attrib_cit */
@@ -291,7 +305,7 @@ static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
.store_attribute = target_fabric_nacl_auth_attr_store,
};
-TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL);
/* End of tfc_tpg_nacl_auth_cit */
@@ -304,7 +318,7 @@ static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
.store_attribute = target_fabric_nacl_param_attr_store,
};
-TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL);
/* End of tfc_tpg_nacl_param_cit */
@@ -461,8 +475,8 @@ static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
.drop_item = target_fabric_drop_mappedlun,
};
-TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
- &target_fabric_nacl_base_group_ops, NULL);
+TF_CIT_SETUP_DRV(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
+ &target_fabric_nacl_base_group_ops);
/* End of tfc_tpg_nacl_base_cit */
@@ -570,7 +584,7 @@ static struct configfs_item_operations target_fabric_np_base_item_ops = {
.store_attribute = target_fabric_np_base_attr_store,
};
-TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_np_base, &target_fabric_np_base_item_ops, NULL);
/* End of tfc_tpg_np_base_cit */
@@ -966,7 +980,7 @@ static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
.store_attribute = target_fabric_tpg_attrib_attr_store,
};
-TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL);
/* End of tfc_tpg_attrib_cit */
@@ -979,7 +993,7 @@ static struct configfs_item_operations target_fabric_tpg_auth_item_ops = {
.store_attribute = target_fabric_tpg_auth_attr_store,
};
-TF_CIT_SETUP(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL);
/* End of tfc_tpg_attrib_cit */
@@ -992,7 +1006,7 @@ static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
.store_attribute = target_fabric_tpg_param_attr_store,
};
-TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_param, &target_fabric_tpg_param_item_ops, NULL);
/* End of tfc_tpg_param_cit */
@@ -1018,7 +1032,7 @@ static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
.store_attribute = target_fabric_tpg_attr_store,
};
-TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(tpg_base, &target_fabric_tpg_base_item_ops, NULL);
/* End of tfc_tpg_base_cit */
@@ -1192,7 +1206,7 @@ static struct configfs_item_operations target_fabric_wwn_item_ops = {
.store_attribute = target_fabric_wwn_attr_store,
};
-TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
+TF_CIT_SETUP_DRV(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops);
/* End of tfc_wwn_cit */
@@ -1206,7 +1220,7 @@ static struct configfs_item_operations target_fabric_discovery_item_ops = {
.store_attribute = target_fabric_discovery_attr_store,
};
-TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
+TF_CIT_SETUP_DRV(discovery, &target_fabric_discovery_item_ops, NULL);
/* End of tfc_discovery_cit */
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 44620fb6bd45..f7e6e51aed36 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -264,40 +264,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
struct se_device *se_dev = cmd->se_dev;
struct fd_dev *dev = FD_DEV(se_dev);
struct file *prot_fd = dev->fd_prot_file;
- struct scatterlist *sg;
loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
unsigned char *buf;
- u32 prot_size, len, size;
- int rc, ret = 1, i;
+ u32 prot_size;
+ int rc, ret = 1;
prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
se_dev->prot_length;
if (!is_write) {
- fd_prot->prot_buf = vzalloc(prot_size);
+ fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
if (!fd_prot->prot_buf) {
pr_err("Unable to allocate fd_prot->prot_buf\n");
return -ENOMEM;
}
buf = fd_prot->prot_buf;
- fd_prot->prot_sg_nents = cmd->t_prot_nents;
- fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
- fd_prot->prot_sg_nents, GFP_KERNEL);
+ fd_prot->prot_sg_nents = 1;
+ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
+ GFP_KERNEL);
if (!fd_prot->prot_sg) {
pr_err("Unable to allocate fd_prot->prot_sg\n");
- vfree(fd_prot->prot_buf);
+ kfree(fd_prot->prot_buf);
return -ENOMEM;
}
- size = prot_size;
-
- for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
-
- len = min_t(u32, PAGE_SIZE, size);
- sg_set_buf(sg, buf, len);
- size -= len;
- buf += len;
- }
+ sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
+ sg_set_buf(fd_prot->prot_sg, buf, prot_size);
}
if (is_write) {
@@ -318,7 +310,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
if (is_write || ret < 0) {
kfree(fd_prot->prot_sg);
- vfree(fd_prot->prot_buf);
+ kfree(fd_prot->prot_buf);
}
return ret;
@@ -331,36 +323,33 @@ static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
struct fd_dev *dev = FD_DEV(se_dev);
struct file *fd = dev->fd_file;
struct scatterlist *sg;
- struct iovec *iov;
- mm_segment_t old_fs;
+ struct iov_iter iter;
+ struct bio_vec *bvec;
+ ssize_t len = 0;
loff_t pos = (cmd->t_task_lba * se_dev->dev_attrib.block_size);
int ret = 0, i;
- iov = kzalloc(sizeof(struct iovec) * sgl_nents, GFP_KERNEL);
- if (!iov) {
+ bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
+ if (!bvec) {
pr_err("Unable to allocate fd_do_readv iov[]\n");
return -ENOMEM;
}
for_each_sg(sgl, sg, sgl_nents, i) {
- iov[i].iov_len = sg->length;
- iov[i].iov_base = kmap(sg_page(sg)) + sg->offset;
- }
+ bvec[i].bv_page = sg_page(sg);
+ bvec[i].bv_len = sg->length;
+ bvec[i].bv_offset = sg->offset;
- old_fs = get_fs();
- set_fs(get_ds());
+ len += sg->length;
+ }
+ iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len);
if (is_write)
- ret = vfs_writev(fd, &iov[0], sgl_nents, &pos);
+ ret = vfs_iter_write(fd, &iter, &pos);
else
- ret = vfs_readv(fd, &iov[0], sgl_nents, &pos);
-
- set_fs(old_fs);
-
- for_each_sg(sgl, sg, sgl_nents, i)
- kunmap(sg_page(sg));
+ ret = vfs_iter_read(fd, &iter, &pos);
- kfree(iov);
+ kfree(bvec);
if (is_write) {
if (ret < 0 || ret != cmd->data_length) {
@@ -436,59 +425,17 @@ fd_execute_sync_cache(struct se_cmd *cmd)
return 0;
}
-static unsigned char *
-fd_setup_write_same_buf(struct se_cmd *cmd, struct scatterlist *sg,
- unsigned int len)
-{
- struct se_device *se_dev = cmd->se_dev;
- unsigned int block_size = se_dev->dev_attrib.block_size;
- unsigned int i = 0, end;
- unsigned char *buf, *p, *kmap_buf;
-
- buf = kzalloc(min_t(unsigned int, len, PAGE_SIZE), GFP_KERNEL);
- if (!buf) {
- pr_err("Unable to allocate fd_execute_write_same buf\n");
- return NULL;
- }
-
- kmap_buf = kmap(sg_page(sg)) + sg->offset;
- if (!kmap_buf) {
- pr_err("kmap() failed in fd_setup_write_same\n");
- kfree(buf);
- return NULL;
- }
- /*
- * Fill local *buf to contain multiple WRITE_SAME blocks up to
- * min(len, PAGE_SIZE)
- */
- p = buf;
- end = min_t(unsigned int, len, PAGE_SIZE);
-
- while (i < end) {
- memcpy(p, kmap_buf, block_size);
-
- i += block_size;
- p += block_size;
- }
- kunmap(sg_page(sg));
-
- return buf;
-}
-
static sense_reason_t
fd_execute_write_same(struct se_cmd *cmd)
{
struct se_device *se_dev = cmd->se_dev;
struct fd_dev *fd_dev = FD_DEV(se_dev);
- struct file *f = fd_dev->fd_file;
- struct scatterlist *sg;
- struct iovec *iov;
- mm_segment_t old_fs;
- sector_t nolb = sbc_get_write_same_sectors(cmd);
loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
- unsigned int len, len_tmp, iov_num;
- int i, rc;
- unsigned char *buf;
+ sector_t nolb = sbc_get_write_same_sectors(cmd);
+ struct iov_iter iter;
+ struct bio_vec *bvec;
+ unsigned int len = 0, i;
+ ssize_t ret;
if (!nolb) {
target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -499,56 +446,92 @@ fd_execute_write_same(struct se_cmd *cmd)
" backends not supported\n");
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- sg = &cmd->t_data_sg[0];
if (cmd->t_data_nents > 1 ||
- sg->length != cmd->se_dev->dev_attrib.block_size) {
+ cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
- " block_size: %u\n", cmd->t_data_nents, sg->length,
+ " block_size: %u\n",
+ cmd->t_data_nents,
+ cmd->t_data_sg[0].length,
cmd->se_dev->dev_attrib.block_size);
return TCM_INVALID_CDB_FIELD;
}
- len = len_tmp = nolb * se_dev->dev_attrib.block_size;
- iov_num = DIV_ROUND_UP(len, PAGE_SIZE);
-
- buf = fd_setup_write_same_buf(cmd, sg, len);
- if (!buf)
+ bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL);
+ if (!bvec)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- iov = vzalloc(sizeof(struct iovec) * iov_num);
- if (!iov) {
- pr_err("Unable to allocate fd_execute_write_same iovecs\n");
- kfree(buf);
+ for (i = 0; i < nolb; i++) {
+ bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]);
+ bvec[i].bv_len = cmd->t_data_sg[0].length;
+ bvec[i].bv_offset = cmd->t_data_sg[0].offset;
+
+ len += se_dev->dev_attrib.block_size;
+ }
+
+ iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len);
+ ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos);
+
+ kfree(bvec);
+ if (ret < 0 || ret != len) {
+ pr_err("vfs_iter_write() returned %zd for write same\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
- /*
- * Map the single fabric received scatterlist block now populated
- * in *buf into each iovec for I/O submission.
- */
- for (i = 0; i < iov_num; i++) {
- iov[i].iov_base = buf;
- iov[i].iov_len = min_t(unsigned int, len_tmp, PAGE_SIZE);
- len_tmp -= iov[i].iov_len;
+
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
+ return 0;
+}
+
+static int
+fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
+ void *buf, size_t bufsize)
+{
+ struct fd_dev *fd_dev = FD_DEV(se_dev);
+ struct file *prot_fd = fd_dev->fd_prot_file;
+ sector_t prot_length, prot;
+ loff_t pos = lba * se_dev->prot_length;
+
+ if (!prot_fd) {
+ pr_err("Unable to locate fd_dev->fd_prot_file\n");
+ return -ENODEV;
}
- old_fs = get_fs();
- set_fs(get_ds());
- rc = vfs_writev(f, &iov[0], iov_num, &pos);
- set_fs(old_fs);
+ prot_length = nolb * se_dev->prot_length;
- vfree(iov);
- kfree(buf);
+ for (prot = 0; prot < prot_length;) {
+ sector_t len = min_t(sector_t, bufsize, prot_length - prot);
+ ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
- if (rc < 0 || rc != len) {
- pr_err("vfs_writev() returned %d for write same\n", rc);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ if (ret != len) {
+ pr_err("vfs_write to prot file failed: %zd\n", ret);
+ return ret < 0 ? ret : -ENODEV;
+ }
+ prot += ret;
}
- target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
+static int
+fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
+{
+ void *buf;
+ int rc;
+
+ buf = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate FILEIO prot buf\n");
+ return -ENOMEM;
+ }
+ memset(buf, 0xff, PAGE_SIZE);
+
+ rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
+
+ free_page((unsigned long)buf);
+
+ return rc;
+}
+
static sense_reason_t
fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
{
@@ -556,6 +539,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
struct inode *inode = file->f_mapping->host;
int ret;
+ if (cmd->se_dev->dev_attrib.pi_prot_type) {
+ ret = fd_do_prot_unmap(cmd, lba, nolb);
+ if (ret)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
if (S_ISBLK(inode->i_mode)) {
/* The backend is block device, use discard */
struct block_device *bdev = inode->i_bdev;
@@ -595,7 +584,7 @@ fd_execute_write_same_unmap(struct se_cmd *cmd)
struct file *file = fd_dev->fd_file;
sector_t lba = cmd->t_task_lba;
sector_t nolb = sbc_get_write_same_sectors(cmd);
- int ret;
+ sense_reason_t ret;
if (!nolb) {
target_complete_cmd(cmd, SAM_STAT_GOOD);
@@ -643,7 +632,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (data_direction == DMA_FROM_DEVICE) {
memset(&fd_prot, 0, sizeof(struct fd_prot));
- if (cmd->prot_type) {
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
ret = fd_do_prot_rw(cmd, &fd_prot, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -651,23 +640,23 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
- if (ret > 0 && cmd->prot_type) {
+ if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors,
0, fd_prot.prot_sg, 0);
if (rc) {
kfree(fd_prot.prot_sg);
- vfree(fd_prot.prot_buf);
+ kfree(fd_prot.prot_buf);
return rc;
}
kfree(fd_prot.prot_sg);
- vfree(fd_prot.prot_buf);
+ kfree(fd_prot.prot_buf);
}
} else {
memset(&fd_prot, 0, sizeof(struct fd_prot));
- if (cmd->prot_type) {
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
ret = fd_do_prot_rw(cmd, &fd_prot, false);
@@ -678,7 +667,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
0, fd_prot.prot_sg, 0);
if (rc) {
kfree(fd_prot.prot_sg);
- vfree(fd_prot.prot_buf);
+ kfree(fd_prot.prot_buf);
return rc;
}
}
@@ -705,7 +694,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
vfs_fsync_range(fd_dev->fd_file, start, end, 1);
}
- if (ret > 0 && cmd->prot_type) {
+ if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
ret = fd_do_prot_rw(cmd, &fd_prot, true);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -714,7 +703,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (ret < 0) {
kfree(fd_prot.prot_sg);
- vfree(fd_prot.prot_buf);
+ kfree(fd_prot.prot_buf);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
@@ -878,48 +867,28 @@ static int fd_init_prot(struct se_device *dev)
static int fd_format_prot(struct se_device *dev)
{
- struct fd_dev *fd_dev = FD_DEV(dev);
- struct file *prot_fd = fd_dev->fd_prot_file;
- sector_t prot_length, prot;
unsigned char *buf;
- loff_t pos = 0;
int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
- int rc, ret = 0, size, len;
+ int ret;
if (!dev->dev_attrib.pi_prot_type) {
pr_err("Unable to format_prot while pi_prot_type == 0\n");
return -ENODEV;
}
- if (!prot_fd) {
- pr_err("Unable to locate fd_dev->fd_prot_file\n");
- return -ENODEV;
- }
buf = vzalloc(unit_size);
if (!buf) {
pr_err("Unable to allocate FILEIO prot buf\n");
return -ENOMEM;
}
- prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
- size = prot_length;
pr_debug("Using FILEIO prot_length: %llu\n",
- (unsigned long long)prot_length);
+ (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
+ dev->prot_length);
memset(buf, 0xff, unit_size);
- for (prot = 0; prot < prot_length; prot += unit_size) {
- len = min(unit_size, size);
- rc = kernel_write(prot_fd, buf, len, pos);
- if (rc != len) {
- pr_err("vfs_write to prot file failed: %d\n", rc);
- ret = -ENODEV;
- goto out;
- }
- pos += len;
- size -= len;
- }
-
-out:
+ ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
+ buf, unit_size);
vfree(buf);
return ret;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index d4a4b0fb444a..1b7947c2510f 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -444,7 +444,7 @@ iblock_execute_write_same_unmap(struct se_cmd *cmd)
struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
sector_t lba = cmd->t_task_lba;
sector_t nolb = sbc_get_write_same_sectors(cmd);
- int ret;
+ sense_reason_t ret;
ret = iblock_do_unmap(cmd, bdev, lba, nolb);
if (ret)
@@ -774,7 +774,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
sg_num--;
}
- if (cmd->prot_type) {
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
int rc = iblock_alloc_bip(cmd, bio_start);
if (rc)
goto fail_put_bios;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 60381db90026..874a9bc988d8 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -4,7 +4,13 @@
/* target_core_alua.c */
extern struct t10_alua_lu_gp *default_lu_gp;
+/* target_core_configfs.c */
+extern struct configfs_subsystem *target_core_subsystem[];
+
/* target_core_device.c */
+extern struct mutex g_device_mutex;
+extern struct list_head g_device_list;
+
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 2de6fb8cee8d..c1aa9655e96e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -78,6 +78,22 @@ enum preempt_type {
static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
struct t10_pr_registration *, int, int);
+static int is_reservation_holder(
+ struct t10_pr_registration *pr_res_holder,
+ struct t10_pr_registration *pr_reg)
+{
+ int pr_res_type;
+
+ if (pr_res_holder) {
+ pr_res_type = pr_res_holder->pr_res_type;
+
+ return pr_res_holder == pr_reg ||
+ pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
+ pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG;
+ }
+ return 0;
+}
+
static sense_reason_t
target_scsi2_reservation_check(struct se_cmd *cmd)
{
@@ -664,7 +680,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
struct se_dev_entry *deve_tmp;
struct se_node_acl *nacl_tmp;
struct se_port *port, *port_tmp;
- struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg, *pr_reg_atp, *pr_reg_tmp, *pr_reg_tmp_safe;
int ret;
/*
@@ -963,7 +979,7 @@ int core_scsi3_check_aptpl_registration(
}
static void __core_scsi3_dump_registration(
- struct target_core_fabric_ops *tfo,
+ const struct target_core_fabric_ops *tfo,
struct se_device *dev,
struct se_node_acl *nacl,
struct t10_pr_registration *pr_reg,
@@ -1004,7 +1020,7 @@ static void __core_scsi3_add_registration(
enum register_type register_type,
int register_move)
{
- struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
@@ -1220,8 +1236,10 @@ static void __core_scsi3_free_registration(
struct t10_pr_registration *pr_reg,
struct list_head *preempt_and_abort_list,
int dec_holders)
+ __releases(&pr_tmpl->registration_lock)
+ __acquires(&pr_tmpl->registration_lock)
{
- struct target_core_fabric_ops *tfo =
+ const struct target_core_fabric_ops *tfo =
pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
@@ -1445,7 +1463,7 @@ core_scsi3_decode_spec_i_port(
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
LIST_HEAD(tid_dest_list);
struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
- struct target_core_fabric_ops *tmp_tf_ops;
+ const struct target_core_fabric_ops *tmp_tf_ops;
unsigned char *buf;
unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
@@ -2287,7 +2305,6 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
if (pr_res_holder) {
- int pr_res_type = pr_res_holder->pr_res_type;
/*
* From spc4r17 Section 5.7.9: Reserving:
*
@@ -2298,9 +2315,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
* the logical unit, then the command shall be completed with
* RESERVATION CONFLICT status.
*/
- if ((pr_res_holder != pr_reg) &&
- (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
- (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
+ if (!is_reservation_holder(pr_res_holder, pr_reg)) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
pr_err("SPC-3 PR: Attempted RESERVE from"
" [%s]: %s while reservation already held by"
@@ -2409,7 +2424,7 @@ static void __core_scsi3_complete_pro_release(
int explicit,
int unreg)
{
- struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
+ const struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
int pr_res_type = 0, pr_res_scope = 0;
@@ -2477,7 +2492,6 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
- int all_reg = 0;
sense_reason_t ret = 0;
if (!se_sess || !se_lun) {
@@ -2514,13 +2528,9 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
spin_unlock(&dev->dev_reservation_lock);
goto out_put_pr_reg;
}
- if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
- (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
- all_reg = 1;
- if ((all_reg == 0) && (pr_res_holder != pr_reg)) {
+ if (!is_reservation_holder(pr_res_holder, pr_reg)) {
/*
- * Non 'All Registrants' PR Type cases..
* Release request from a registered I_T nexus that is not a
* persistent reservation holder. return GOOD status.
*/
@@ -2726,7 +2736,7 @@ static void __core_scsi3_complete_pro_preempt(
enum preempt_type preempt_type)
{
struct se_node_acl *nacl = pr_reg->pr_reg_nacl;
- struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
+ const struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
char i_buf[PR_REG_ISID_ID_LEN];
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3111,7 +3121,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
struct se_port *se_port;
struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
- struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
+ const struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char *buf;
@@ -3375,7 +3385,7 @@ after_iport_check:
* From spc4r17 section 5.7.8 Table 50 --
* Register behaviors for a REGISTER AND MOVE service action
*/
- if (pr_res_holder != pr_reg) {
+ if (!is_reservation_holder(pr_res_holder, pr_reg)) {
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 98e83ac5661b..a263bf5fab8d 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -139,10 +139,22 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
unsigned char *p;
while (total_sg_needed) {
+ unsigned int chain_entry = 0;
+
sg_per_table = (total_sg_needed > max_sg_per_table) ?
max_sg_per_table : total_sg_needed;
- sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
+#ifdef CONFIG_ARCH_HAS_SG_CHAIN
+
+ /*
+ * Reserve extra element for chain entry
+ */
+ if (sg_per_table < total_sg_needed)
+ chain_entry = 1;
+
+#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
+
+ sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
GFP_KERNEL);
if (!sg) {
pr_err("Unable to allocate scatterlist array"
@@ -150,7 +162,16 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
return -ENOMEM;
}
- sg_init_table(sg, sg_per_table);
+ sg_init_table(sg, sg_per_table + chain_entry);
+
+#ifdef CONFIG_ARCH_HAS_SG_CHAIN
+
+ if (i > 0) {
+ sg_chain(sg_table[i - 1].sg_table,
+ max_sg_per_table + 1, sg);
+ }
+
+#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
@@ -382,6 +403,76 @@ static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page
return NULL;
}
+typedef sense_reason_t (*dif_verify)(struct se_cmd *, sector_t, unsigned int,
+ unsigned int, struct scatterlist *, int);
+
+static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, dif_verify dif_verify)
+{
+ struct se_device *se_dev = cmd->se_dev;
+ struct rd_dev *dev = RD_DEV(se_dev);
+ struct rd_dev_sg_table *prot_table;
+ bool need_to_release = false;
+ struct scatterlist *prot_sg;
+ u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
+ u32 prot_offset, prot_page;
+ u32 prot_npages __maybe_unused;
+ u64 tmp;
+ sense_reason_t rc = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ tmp = cmd->t_task_lba * se_dev->prot_length;
+ prot_offset = do_div(tmp, PAGE_SIZE);
+ prot_page = tmp;
+
+ prot_table = rd_get_prot_table(dev, prot_page);
+ if (!prot_table)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ prot_sg = &prot_table->sg_table[prot_page -
+ prot_table->page_start_offset];
+
+#ifndef CONFIG_ARCH_HAS_SG_CHAIN
+
+ prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
+ PAGE_SIZE);
+
+ /*
+ * Allocate temporaly contiguous scatterlist entries if prot pages
+ * straddles multiple scatterlist tables.
+ */
+ if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
+ int i;
+
+ prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
+ if (!prot_sg)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+ need_to_release = true;
+ sg_init_table(prot_sg, prot_npages);
+
+ for (i = 0; i < prot_npages; i++) {
+ if (prot_page + i > prot_table->page_end_offset) {
+ prot_table = rd_get_prot_table(dev,
+ prot_page + i);
+ if (!prot_table) {
+ kfree(prot_sg);
+ return rc;
+ }
+ sg_unmark_end(&prot_sg[i - 1]);
+ }
+ prot_sg[i] = prot_table->sg_table[prot_page + i -
+ prot_table->page_start_offset];
+ }
+ }
+
+#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
+
+ rc = dif_verify(cmd, cmd->t_task_lba, sectors, 0, prot_sg, prot_offset);
+ if (need_to_release)
+ kfree(prot_sg);
+
+ return rc;
+}
+
static sense_reason_t
rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction)
@@ -419,24 +510,9 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
cmd->t_task_lba, rd_size, rd_page, rd_offset);
- if (cmd->prot_type && data_direction == DMA_TO_DEVICE) {
- struct rd_dev_sg_table *prot_table;
- struct scatterlist *prot_sg;
- u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
- u32 prot_offset, prot_page;
-
- tmp = cmd->t_task_lba * se_dev->prot_length;
- prot_offset = do_div(tmp, PAGE_SIZE);
- prot_page = tmp;
-
- prot_table = rd_get_prot_table(dev, prot_page);
- if (!prot_table)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
- prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
-
- rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors, 0,
- prot_sg, prot_offset);
+ if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
+ data_direction == DMA_TO_DEVICE) {
+ rc = rd_do_prot_rw(cmd, sbc_dif_verify_write);
if (rc)
return rc;
}
@@ -502,24 +578,9 @@ rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
sg_miter_stop(&m);
- if (cmd->prot_type && data_direction == DMA_FROM_DEVICE) {
- struct rd_dev_sg_table *prot_table;
- struct scatterlist *prot_sg;
- u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
- u32 prot_offset, prot_page;
-
- tmp = cmd->t_task_lba * se_dev->prot_length;
- prot_offset = do_div(tmp, PAGE_SIZE);
- prot_page = tmp;
-
- prot_table = rd_get_prot_table(dev, prot_page);
- if (!prot_table)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
- prot_sg = &prot_table->sg_table[prot_page - prot_table->page_start_offset];
-
- rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
- prot_sg, prot_offset);
+ if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
+ data_direction == DMA_FROM_DEVICE) {
+ rc = rd_do_prot_rw(cmd, sbc_dif_verify_read);
if (rc)
return rc;
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 3e7297411110..8855781ac653 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -93,6 +93,8 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
+ int pi_prot_type = dev->dev_attrib.pi_prot_type;
+
unsigned char *rbuf;
unsigned char buf[32];
unsigned long long blocks = dev->transport->get_blocks(dev);
@@ -114,8 +116,15 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
* Set P_TYPE and PROT_EN bits for DIF support
*/
if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
- if (dev->dev_attrib.pi_prot_type)
- buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
+ /*
+ * Only override a device's pi_prot_type if no T10-PI is
+ * available, and sess_prot_type has been explicitly enabled.
+ */
+ if (!pi_prot_type)
+ pi_prot_type = sess->sess_prot_type;
+
+ if (pi_prot_type)
+ buf[12] = (pi_prot_type - 1) << 1 | 0x1;
}
if (dev->transport->get_lbppbe)
@@ -312,7 +321,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
return 0;
}
-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
+static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
{
unsigned char *buf, *addr;
struct scatterlist *sg;
@@ -376,7 +385,7 @@ sbc_execute_rw(struct se_cmd *cmd)
cmd->data_direction);
}
-static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
+static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
{
struct se_device *dev = cmd->se_dev;
@@ -399,7 +408,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
return TCM_NO_SENSE;
}
-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
+static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
{
struct se_device *dev = cmd->se_dev;
struct scatterlist *write_sg = NULL, *sg;
@@ -414,11 +423,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
/*
* Handle early failure in transport_generic_request_failure(),
- * which will not have taken ->caw_mutex yet..
+ * which will not have taken ->caw_sem yet..
*/
- if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
+ if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
return TCM_NO_SENSE;
/*
+ * Handle special case for zero-length COMPARE_AND_WRITE
+ */
+ if (!cmd->data_length)
+ goto out;
+ /*
* Immediately exit + release dev->caw_sem if command has already
* been failed with a non-zero SCSI status.
*/
@@ -581,12 +595,13 @@ sbc_compare_and_write(struct se_cmd *cmd)
}
static int
-sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
+sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type,
bool is_write, struct se_cmd *cmd)
{
if (is_write) {
- cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
- TARGET_PROT_DOUT_INSERT;
+ cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP :
+ protect ? TARGET_PROT_DOUT_PASS :
+ TARGET_PROT_DOUT_INSERT;
switch (protect) {
case 0x0:
case 0x3:
@@ -610,8 +625,9 @@ sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
return -EINVAL;
}
} else {
- cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
- TARGET_PROT_DIN_STRIP;
+ cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT :
+ protect ? TARGET_PROT_DIN_PASS :
+ TARGET_PROT_DIN_STRIP;
switch (protect) {
case 0x0:
case 0x1:
@@ -644,11 +660,15 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
u32 sectors, bool is_write)
{
u8 protect = cdb[1] >> 5;
+ int sp_ops = cmd->se_sess->sup_prot_ops;
+ int pi_prot_type = dev->dev_attrib.pi_prot_type;
+ bool fabric_prot = false;
if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
- if (protect && !dev->dev_attrib.pi_prot_type) {
- pr_err("CDB contains protect bit, but device does not"
- " advertise PROTECT=1 feature bit\n");
+ if (unlikely(protect &&
+ !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) {
+ pr_err("CDB contains protect bit, but device + fabric does"
+ " not advertise PROTECT=1 feature bit\n");
return TCM_INVALID_CDB_FIELD;
}
if (cmd->prot_pto)
@@ -669,15 +689,32 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
cmd->reftag_seed = cmd->t_task_lba;
break;
case TARGET_DIF_TYPE0_PROT:
+ /*
+ * See if the fabric supports T10-PI, and the session has been
+ * configured to allow export PROTECT=1 feature bit with backend
+ * devices that don't support T10-PI.
+ */
+ fabric_prot = is_write ?
+ !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) :
+ !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT));
+
+ if (fabric_prot && cmd->se_sess->sess_prot_type) {
+ pi_prot_type = cmd->se_sess->sess_prot_type;
+ break;
+ }
+ if (!protect)
+ return TCM_NO_SENSE;
+ /* Fallthrough */
default:
- return TCM_NO_SENSE;
+ pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
+ "PROTECT: 0x%02x\n", cdb[0], protect);
+ return TCM_INVALID_CDB_FIELD;
}
- if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
- is_write, cmd))
+ if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd))
return TCM_INVALID_CDB_FIELD;
- cmd->prot_type = dev->dev_attrib.pi_prot_type;
+ cmd->prot_type = pi_prot_type;
cmd->prot_length = dev->prot_length * sectors;
/**
@@ -1166,14 +1203,16 @@ sbc_dif_generate(struct se_cmd *cmd)
sdt = paddr + offset;
sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
dev->dev_attrib.block_size));
- if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+ if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
sdt->app_tag = 0;
- pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
+ pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x"
" app_tag: 0x%04x ref_tag: %u\n",
- (unsigned long long)sector, sdt->guard_tag,
- sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+ (cmd->data_direction == DMA_TO_DEVICE) ?
+ "WRITE" : "READ", (unsigned long long)sector,
+ sdt->guard_tag, sdt->app_tag,
+ be32_to_cpu(sdt->ref_tag));
sector++;
offset += sizeof(struct se_dif_v1_tuple);
@@ -1185,12 +1224,16 @@ sbc_dif_generate(struct se_cmd *cmd)
}
static sense_reason_t
-sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
+sbc_dif_v1_verify(struct se_cmd *cmd, struct se_dif_v1_tuple *sdt,
const void *p, sector_t sector, unsigned int ei_lba)
{
+ struct se_device *dev = cmd->se_dev;
int block_size = dev->dev_attrib.block_size;
__be16 csum;
+ if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
+ goto check_ref;
+
csum = cpu_to_be16(crc_t10dif(p, block_size));
if (sdt->guard_tag != csum) {
@@ -1200,7 +1243,11 @@ sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
}
- if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT &&
+check_ref:
+ if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG))
+ return 0;
+
+ if (cmd->prot_type == TARGET_DIF_TYPE1_PROT &&
be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
" sector MSB: 0x%08x\n", (unsigned long long)sector,
@@ -1208,7 +1255,7 @@ sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
}
- if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE2_PROT &&
+ if (cmd->prot_type == TARGET_DIF_TYPE2_PROT &&
be32_to_cpu(sdt->ref_tag) != ei_lba) {
pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
" ei_lba: 0x%08x\n", (unsigned long long)sector,
@@ -1229,6 +1276,9 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
unsigned int i, len, left;
unsigned int offset = sg_off;
+ if (!sg)
+ return;
+
left = sectors * dev->prot_length;
for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
@@ -1292,7 +1342,7 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
(unsigned long long)sector, sdt->guard_tag,
sdt->app_tag, be32_to_cpu(sdt->ref_tag));
- rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
+ rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector,
ei_lba);
if (rc) {
kunmap_atomic(paddr);
@@ -1309,6 +1359,9 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
kunmap_atomic(paddr);
kunmap_atomic(daddr);
}
+ if (!sg)
+ return 0;
+
sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off);
return 0;
@@ -1353,7 +1406,7 @@ __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
continue;
}
- rc = sbc_dif_v1_verify(dev, sdt, daddr + j, sector,
+ rc = sbc_dif_v1_verify(cmd, sdt, daddr + j, sector,
ei_lba);
if (rc) {
kunmap_atomic(paddr);
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 6c8bd6bc175c..7912aa124385 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -103,10 +103,12 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
buf[5] |= 0x8;
/*
* Set Protection (PROTECT) bit when DIF has been enabled on the
- * device, and the transport supports VERIFY + PASS.
+ * device, and the fabric supports VERIFY + PASS. Also report
+ * PROTECT=1 if sess_prot_type has been configured to allow T10-PI
+ * to unprotected devices.
*/
if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
- if (dev->dev_attrib.pi_prot_type)
+ if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)
buf[5] |= 0x1;
}
@@ -467,9 +469,11 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
* only for TYPE3 protection.
*/
if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
- if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+ if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT ||
+ cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
buf[4] = 0x5;
- else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
+ else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
+ cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
buf[4] = 0x4;
}
@@ -861,7 +865,7 @@ static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
* TAG field.
*/
if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
- if (dev->dev_attrib.pi_prot_type)
+ if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type)
p[5] |= 0x80;
}
@@ -1099,7 +1103,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
unsigned char *buf;
unsigned char tbuf[SE_MODE_PAGE_BUF];
int length;
- int ret = 0;
+ sense_reason_t ret = 0;
int i;
if (!cmd->data_length) {
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index fa5e157db47b..315ec3458eeb 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -125,8 +125,8 @@ void core_tmr_abort_task(
if (dev != se_cmd->se_dev)
continue;
- /* skip se_cmd associated with tmr */
- if (tmr->task_cmd == se_cmd)
+ /* skip task management functions, including tmr->task_cmd */
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
continue;
ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 0696de9553d3..47f064415bf6 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -672,7 +672,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
}
int core_tpg_register(
- struct target_core_fabric_ops *tfo,
+ const struct target_core_fabric_ops *tfo,
struct se_wwn *se_wwn,
struct se_portal_group *se_tpg,
void *tpg_fabric_ptr,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ac3cbabdbdf0..3fe5cb240b6f 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -322,6 +322,7 @@ void __transport_register_session(
struct se_session *se_sess,
void *fabric_sess_ptr)
{
+ const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
unsigned char buf[PR_REG_ISID_LEN];
se_sess->se_tpg = se_tpg;
@@ -334,6 +335,21 @@ void __transport_register_session(
*/
if (se_nacl) {
/*
+ *
+ * Determine if fabric allows for T10-PI feature bits exposed to
+ * initiators for device backends with !dev->dev_attrib.pi_prot_type.
+ *
+ * If so, then always save prot_type on a per se_node_acl node
+ * basis and re-instate the previous sess_prot_type to avoid
+ * disabling PI from below any previously initiator side
+ * registered LUNs.
+ */
+ if (se_nacl->saved_prot_type)
+ se_sess->sess_prot_type = se_nacl->saved_prot_type;
+ else if (tfo->tpg_check_prot_fabric_only)
+ se_sess->sess_prot_type = se_nacl->saved_prot_type =
+ tfo->tpg_check_prot_fabric_only(se_tpg);
+ /*
* If the fabric module supports an ISID based TransportID,
* save this value in binary from the fabric I_T Nexus now.
*/
@@ -404,6 +420,30 @@ void target_put_session(struct se_session *se_sess)
}
EXPORT_SYMBOL(target_put_session);
+ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
+{
+ struct se_session *se_sess;
+ ssize_t len = 0;
+
+ spin_lock_bh(&se_tpg->session_lock);
+ list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
+ if (!se_sess->se_node_acl)
+ continue;
+ if (!se_sess->se_node_acl->dynamic_node_acl)
+ continue;
+ if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
+ break;
+
+ len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
+ se_sess->se_node_acl->initiatorname);
+ len += 1; /* Include NULL terminator */
+ }
+ spin_unlock_bh(&se_tpg->session_lock);
+
+ return len;
+}
+EXPORT_SYMBOL(target_show_dynamic_sessions);
+
static void target_complete_nacl(struct kref *kref)
{
struct se_node_acl *nacl = container_of(kref,
@@ -462,7 +502,7 @@ EXPORT_SYMBOL(transport_free_session);
void transport_deregister_session(struct se_session *se_sess)
{
struct se_portal_group *se_tpg = se_sess->se_tpg;
- struct target_core_fabric_ops *se_tfo;
+ const struct target_core_fabric_ops *se_tfo;
struct se_node_acl *se_nacl;
unsigned long flags;
bool comp_nacl = true;
@@ -1118,7 +1158,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
*/
void transport_init_se_cmd(
struct se_cmd *cmd,
- struct target_core_fabric_ops *tfo,
+ const struct target_core_fabric_ops *tfo,
struct se_session *se_sess,
u32 data_length,
int data_direction,
@@ -1570,6 +1610,8 @@ EXPORT_SYMBOL(target_submit_tmr);
* has completed.
*/
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
+ __releases(&cmd->t_state_lock)
+ __acquires(&cmd->t_state_lock)
{
bool was_active = false;
@@ -1615,11 +1657,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
transport_complete_task_attr(cmd);
/*
* Handle special case for COMPARE_AND_WRITE failure, where the
- * callback is expected to drop the per device ->caw_mutex.
+ * callback is expected to drop the per device ->caw_sem.
*/
if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
cmd->transport_complete_callback)
- cmd->transport_complete_callback(cmd);
+ cmd->transport_complete_callback(cmd, false);
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
@@ -1706,6 +1748,41 @@ void __target_execute_cmd(struct se_cmd *cmd)
}
}
+static int target_write_prot_action(struct se_cmd *cmd)
+{
+ u32 sectors;
+ /*
+ * Perform WRITE_INSERT of PI using software emulation when backend
+ * device has PI enabled, if the transport has not already generated
+ * PI using hardware WRITE_INSERT offload.
+ */
+ switch (cmd->prot_op) {
+ case TARGET_PROT_DOUT_INSERT:
+ if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
+ sbc_dif_generate(cmd);
+ break;
+ case TARGET_PROT_DOUT_STRIP:
+ if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
+ break;
+
+ sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
+ cmd->pi_err = sbc_dif_verify_write(cmd, cmd->t_task_lba,
+ sectors, 0, NULL, 0);
+ if (unlikely(cmd->pi_err)) {
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT;
+ spin_unlock_irq(&cmd->t_state_lock);
+ transport_generic_request_failure(cmd, cmd->pi_err);
+ return -1;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static bool target_handle_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -1785,15 +1862,9 @@ void target_execute_cmd(struct se_cmd *cmd)
cmd->t_state = TRANSPORT_PROCESSING;
cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
- /*
- * Perform WRITE_INSERT of PI using software emulation when backend
- * device has PI enabled, if the transport has not already generated
- * PI using hardware WRITE_INSERT offload.
- */
- if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) {
- if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
- sbc_dif_generate(cmd);
- }
+
+ if (target_write_prot_action(cmd))
+ return;
if (target_handle_task_attr(cmd)) {
spin_lock_irq(&cmd->t_state_lock);
@@ -1919,16 +1990,28 @@ static void transport_handle_queue_full(
schedule_work(&cmd->se_dev->qf_work_queue);
}
-static bool target_check_read_strip(struct se_cmd *cmd)
+static bool target_read_prot_action(struct se_cmd *cmd)
{
sense_reason_t rc;
- if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
- rc = sbc_dif_read_strip(cmd);
- if (rc) {
- cmd->pi_err = rc;
- return true;
+ switch (cmd->prot_op) {
+ case TARGET_PROT_DIN_STRIP:
+ if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
+ rc = sbc_dif_read_strip(cmd);
+ if (rc) {
+ cmd->pi_err = rc;
+ return true;
+ }
}
+ break;
+ case TARGET_PROT_DIN_INSERT:
+ if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
+ break;
+
+ sbc_dif_generate(cmd);
+ break;
+ default:
+ break;
}
return false;
@@ -1975,8 +2058,12 @@ static void target_complete_ok_work(struct work_struct *work)
if (cmd->transport_complete_callback) {
sense_reason_t rc;
- rc = cmd->transport_complete_callback(cmd);
+ rc = cmd->transport_complete_callback(cmd, true);
if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ !cmd->data_length)
+ goto queue_rsp;
+
return;
} else if (rc) {
ret = transport_send_check_condition_and_sense(cmd,
@@ -1990,6 +2077,7 @@ static void target_complete_ok_work(struct work_struct *work)
}
}
+queue_rsp:
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
spin_lock(&cmd->se_lun->lun_sep_lock);
@@ -2003,8 +2091,7 @@ static void target_complete_ok_work(struct work_struct *work)
* backend had PI enabled, if the transport will not be
* performing hardware READ_STRIP offload.
*/
- if (cmd->prot_op == TARGET_PROT_DIN_STRIP &&
- target_check_read_strip(cmd)) {
+ if (target_read_prot_action(cmd)) {
ret = transport_send_check_condition_and_sense(cmd,
cmd->pi_err, 0);
if (ret == -EAGAIN || ret == -ENOMEM)
@@ -2094,6 +2181,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
static inline void transport_free_pages(struct se_cmd *cmd)
{
if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+ /*
+ * Release special case READ buffer payload required for
+ * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
+ */
+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
+ transport_free_sgl(cmd->t_bidi_data_sg,
+ cmd->t_bidi_data_nents);
+ cmd->t_bidi_data_sg = NULL;
+ cmd->t_bidi_data_nents = 0;
+ }
transport_reset_sgl_orig(cmd);
return;
}
@@ -2246,6 +2343,7 @@ sense_reason_t
transport_generic_new_cmd(struct se_cmd *cmd)
{
int ret = 0;
+ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
/*
* Determine is the TCM fabric module has already allocated physical
@@ -2254,7 +2352,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
*/
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
cmd->data_length) {
- bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
if ((cmd->se_cmd_flags & SCF_BIDI) ||
(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
@@ -2285,6 +2382,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length, zero_flag);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ cmd->data_length) {
+ /*
+ * Special case for COMPARE_AND_WRITE with fabrics
+ * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
+ */
+ u32 caw_length = cmd->t_task_nolb *
+ cmd->se_dev->dev_attrib.block_size;
+
+ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
+ &cmd->t_bidi_data_nents,
+ caw_length, zero_flag);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* If this command is not a write we can execute it right here,
@@ -2376,10 +2487,8 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
- if (ack_kref) {
+ if (ack_kref)
kref_get(&se_cmd->cmd_kref);
- se_cmd->se_cmd_flags |= SCF_ACK_KREF;
- }
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (se_sess->sess_tearing_down) {
@@ -2398,6 +2507,7 @@ out:
EXPORT_SYMBOL(target_get_sess_cmd);
static void target_release_cmd_kref(struct kref *kref)
+ __releases(&se_cmd->se_sess->sess_cmd_lock)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 1a1bcf71ec9d..dbc872a6c981 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -344,8 +344,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
entry = (void *) mb + CMDR_OFF + cmd_head;
tcmu_flush_dcache_range(entry, sizeof(*entry));
- tcmu_hdr_set_op(&entry->hdr, TCMU_OP_PAD);
- tcmu_hdr_set_len(&entry->hdr, pad_size);
+ tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
+ tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
+ entry->hdr.cmd_id = 0; /* not used for PAD */
+ entry->hdr.kflags = 0;
+ entry->hdr.uflags = 0;
UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
@@ -355,9 +358,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
entry = (void *) mb + CMDR_OFF + cmd_head;
tcmu_flush_dcache_range(entry, sizeof(*entry));
- tcmu_hdr_set_op(&entry->hdr, TCMU_OP_CMD);
- tcmu_hdr_set_len(&entry->hdr, command_size);
- entry->cmd_id = tcmu_cmd->cmd_id;
+ tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
+ tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
+ entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+ entry->hdr.kflags = 0;
+ entry->hdr.uflags = 0;
/*
* Fix up iovecs, and handle if allocation in data ring wrapped.
@@ -376,7 +381,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
/* Even iov_base is relative to mb_addr */
iov->iov_len = copy_bytes;
- iov->iov_base = (void *) udev->data_off + udev->data_head;
+ iov->iov_base = (void __user *) udev->data_off +
+ udev->data_head;
iov_cnt++;
iov++;
@@ -388,7 +394,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
copy_bytes = sg->length - copy_bytes;
iov->iov_len = copy_bytes;
- iov->iov_base = (void *) udev->data_off + udev->data_head;
+ iov->iov_base = (void __user *) udev->data_off +
+ udev->data_head;
if (se_cmd->data_direction == DMA_TO_DEVICE) {
to = (void *) mb + udev->data_off + udev->data_head;
@@ -405,6 +412,8 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
kunmap_atomic(from);
}
entry->req.iov_cnt = iov_cnt;
+ entry->req.iov_bidi_cnt = 0;
+ entry->req.iov_dif_cnt = 0;
/* All offsets relative to mb_addr, not start of entry! */
cdb_off = CMDR_OFF + cmd_head + base_command_size;
@@ -462,6 +471,17 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
return;
}
+ if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
+ UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
+ pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
+ cmd->se_cmd);
+ transport_generic_request_failure(cmd->se_cmd,
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+ cmd->se_cmd = NULL;
+ kmem_cache_free(tcmu_cmd_cache, cmd);
+ return;
+ }
+
if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
se_cmd->scsi_sense_length);
@@ -540,14 +560,16 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
tcmu_flush_dcache_range(entry, sizeof(*entry));
- if (tcmu_hdr_get_op(&entry->hdr) == TCMU_OP_PAD) {
- UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size);
+ if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
+ UPDATE_HEAD(udev->cmdr_last_cleaned,
+ tcmu_hdr_get_len(entry->hdr.len_op),
+ udev->cmdr_size);
continue;
}
- WARN_ON(tcmu_hdr_get_op(&entry->hdr) != TCMU_OP_CMD);
+ WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
spin_lock(&udev->commands_lock);
- cmd = idr_find(&udev->commands, entry->cmd_id);
+ cmd = idr_find(&udev->commands, entry->hdr.cmd_id);
if (cmd)
idr_remove(&udev->commands, cmd->cmd_id);
spin_unlock(&udev->commands_lock);
@@ -560,7 +582,9 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
tcmu_handle_completion(cmd, entry);
- UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size);
+ UPDATE_HEAD(udev->cmdr_last_cleaned,
+ tcmu_hdr_get_len(entry->hdr.len_op),
+ udev->cmdr_size);
handled++;
}
@@ -838,14 +862,14 @@ static int tcmu_configure_device(struct se_device *dev)
udev->data_size = TCMU_RING_SIZE - CMDR_SIZE;
mb = udev->mb_addr;
- mb->version = 1;
+ mb->version = TCMU_MAILBOX_VERSION;
mb->cmdr_off = CMDR_OFF;
mb->cmdr_size = udev->cmdr_size;
WARN_ON(!PAGE_ALIGNED(udev->data_off));
WARN_ON(udev->data_size % PAGE_SIZE);
- info->version = "1";
+ info->version = xstr(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
info->mem[0].addr = (phys_addr_t) udev->mb_addr;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 33ac39bf75e5..a600ff15dcfd 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -34,20 +34,12 @@
#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
+#include "target_core_internal.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
#include "target_core_xcopy.h"
static struct workqueue_struct *xcopy_wq = NULL;
-/*
- * From target_core_device.c
- */
-extern struct mutex g_device_mutex;
-extern struct list_head g_device_list;
-/*
- * From target_core_configfs.c
- */
-extern struct configfs_subsystem *target_core_subsystem[];
static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
{
@@ -433,7 +425,7 @@ static int xcopy_pt_queue_status(struct se_cmd *se_cmd)
return 0;
}
-static struct target_core_fabric_ops xcopy_pt_tfo = {
+static const struct target_core_fabric_ops xcopy_pt_tfo = {
.get_fabric_name = xcopy_pt_get_fabric_name,
.get_task_tag = xcopy_pt_get_tag,
.get_cmd_state = xcopy_pt_get_cmd_state,
@@ -548,33 +540,22 @@ static void target_xcopy_setup_pt_port(
}
}
-static int target_xcopy_init_pt_lun(
- struct xcopy_pt_cmd *xpt_cmd,
- struct xcopy_op *xop,
- struct se_device *se_dev,
- struct se_cmd *pt_cmd,
- bool remote_port)
+static void target_xcopy_init_pt_lun(struct se_device *se_dev,
+ struct se_cmd *pt_cmd, bool remote_port)
{
/*
* Don't allocate + init an pt_cmd->se_lun if honoring local port for
* reservations. The pt_cmd->se_lun pointer will be setup from within
* target_xcopy_setup_pt_port()
*/
- if (!remote_port) {
- pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
- return 0;
+ if (remote_port) {
+ pr_debug("Setup emulated se_dev: %p from se_dev\n",
+ pt_cmd->se_dev);
+ pt_cmd->se_lun = &se_dev->xcopy_lun;
+ pt_cmd->se_dev = se_dev;
}
- pt_cmd->se_lun = &se_dev->xcopy_lun;
- pt_cmd->se_dev = se_dev;
-
- pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev);
- pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH;
-
- pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n",
- pt_cmd->se_lun->lun_se_dev);
-
- return 0;
+ pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
static int target_xcopy_setup_pt_cmd(
@@ -592,11 +573,8 @@ static int target_xcopy_setup_pt_cmd(
* Setup LUN+port to honor reservations based upon xop->op_origin for
* X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
*/
- rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port);
- if (rc < 0) {
- ret = rc;
- goto out;
- }
+ target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
+
xpt_cmd->xcopy_op = xop;
target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index a0bcfd3e7e7d..881deb3d499a 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -129,7 +129,6 @@ struct ft_cmd {
extern struct mutex ft_lport_lock;
extern struct fc4_prov ft_prov;
-extern struct target_fabric_configfs *ft_configfs;
extern unsigned int ft_debug_logging;
/*
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index efdcb9663a1a..65dce1345966 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -48,7 +48,7 @@
#include "tcm_fc.h"
-struct target_fabric_configfs *ft_configfs;
+static const struct target_core_fabric_ops ft_fabric_ops;
static LIST_HEAD(ft_wwn_list);
DEFINE_MUTEX(ft_lport_lock);
@@ -337,7 +337,7 @@ static struct se_portal_group *ft_add_tpg(
return NULL;
}
- ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
+ ret = core_tpg_register(&ft_fabric_ops, wwn, &tpg->se_tpg,
tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
destroy_workqueue(wq);
@@ -507,7 +507,9 @@ static u32 ft_tpg_get_inst_index(struct se_portal_group *se_tpg)
return tpg->index;
}
-static struct target_core_fabric_ops ft_fabric_ops = {
+static const struct target_core_fabric_ops ft_fabric_ops = {
+ .module = THIS_MODULE,
+ .name = "fc",
.get_fabric_name = ft_get_fabric_name,
.get_fabric_proto_ident = fc_get_fabric_proto_ident,
.tpg_get_wwn = ft_get_fabric_wwn,
@@ -552,62 +554,10 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = &ft_add_acl,
.fabric_drop_nodeacl = &ft_del_acl,
-};
-
-static int ft_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
- if (IS_ERR(fabric)) {
- pr_err("%s: target_fabric_configfs_init() failed!\n",
- __func__);
- return PTR_ERR(fabric);
- }
- fabric->tf_ops = ft_fabric_ops;
-
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = ft_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs =
- ft_nacl_base_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- /*
- * register the fabric for use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_debug("target_fabric_configfs_register() for"
- " FC Target failed!\n");
- target_fabric_configfs_free(fabric);
- return -1;
- }
-
- /*
- * Setup our local pointer to *fabric.
- */
- ft_configfs = fabric;
- return 0;
-}
-static void ft_deregister_configfs(void)
-{
- if (!ft_configfs)
- return;
- target_fabric_configfs_deregister(ft_configfs);
- ft_configfs = NULL;
-}
+ .tfc_wwn_attrs = ft_wwn_attrs,
+ .tfc_tpg_nacl_base_attrs = ft_nacl_base_attrs,
+};
static struct notifier_block ft_notifier = {
.notifier_call = ft_lport_notify
@@ -615,15 +565,24 @@ static struct notifier_block ft_notifier = {
static int __init ft_init(void)
{
- if (ft_register_configfs())
- return -1;
- if (fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov)) {
- ft_deregister_configfs();
- return -1;
- }
+ int ret;
+
+ ret = target_register_template(&ft_fabric_ops);
+ if (ret)
+ goto out;
+
+ ret = fc_fc4_register_provider(FC_TYPE_FCP, &ft_prov);
+ if (ret)
+ goto out_unregister_template;
+
blocking_notifier_chain_register(&fc_lport_notifier_head, &ft_notifier);
fc_lport_iterate(ft_lport_add, NULL);
return 0;
+
+out_unregister_template:
+ target_unregister_template(&ft_fabric_ops);
+out:
+ return ret;
}
static void __exit ft_exit(void)
@@ -632,7 +591,7 @@ static void __exit ft_exit(void)
&ft_notifier);
fc_fc4_deregister_provider(FC_TYPE_FCP, &ft_prov);
fc_lport_iterate(ft_lport_del, NULL);
- ft_deregister_configfs();
+ target_unregister_template(&ft_fabric_ops);
synchronize_rcu();
}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 422ebea96a64..4506e405c8f3 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -450,6 +450,18 @@ static unsigned int mem32_serial_in(struct uart_port *p, int offset)
return readl(p->membase + offset);
}
+static void mem32be_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = offset << p->regshift;
+ iowrite32be(value, p->membase + offset);
+}
+
+static unsigned int mem32be_serial_in(struct uart_port *p, int offset)
+{
+ offset = offset << p->regshift;
+ return ioread32be(p->membase + offset);
+}
+
static unsigned int io_serial_in(struct uart_port *p, int offset)
{
offset = offset << p->regshift;
@@ -488,6 +500,11 @@ static void set_io_from_upio(struct uart_port *p)
p->serial_out = mem32_serial_out;
break;
+ case UPIO_MEM32BE:
+ p->serial_in = mem32be_serial_in;
+ p->serial_out = mem32be_serial_out;
+ break;
+
#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
case UPIO_AU:
p->serial_in = au_serial_in;
@@ -513,6 +530,7 @@ serial_port_out_sync(struct uart_port *p, int offset, int value)
switch (p->iotype) {
case UPIO_MEM:
case UPIO_MEM32:
+ case UPIO_MEM32BE:
case UPIO_AU:
p->serial_out(p, offset, value);
p->serial_in(p, UART_LCR); /* safe, no side-effects */
@@ -2748,6 +2766,7 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
case UPIO_AU:
case UPIO_TSI:
case UPIO_MEM32:
+ case UPIO_MEM32BE:
case UPIO_MEM:
if (!port->mapbase)
break;
@@ -2784,6 +2803,7 @@ static void serial8250_release_std_resource(struct uart_8250_port *up)
case UPIO_AU:
case UPIO_TSI:
case UPIO_MEM32:
+ case UPIO_MEM32BE:
case UPIO_MEM:
if (!port->mapbase)
break;
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 8e119682266a..6c0fd8b9d1c3 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -42,6 +42,8 @@ unsigned int __weak __init serial8250_early_in(struct uart_port *port, int offse
return readb(port->membase + offset);
case UPIO_MEM32:
return readl(port->membase + (offset << 2));
+ case UPIO_MEM32BE:
+ return ioread32be(port->membase + (offset << 2));
case UPIO_PORT:
return inb(port->iobase + offset);
default:
@@ -58,6 +60,9 @@ void __weak __init serial8250_early_out(struct uart_port *port, int offset, int
case UPIO_MEM32:
writel(value, port->membase + (offset << 2));
break;
+ case UPIO_MEM32BE:
+ iowrite32be(value, port->membase + (offset << 2));
+ break;
case UPIO_PORT:
outb(value, port->iobase + offset);
break;
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index aa00154c4a6d..5b73afb9f9f3 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -116,7 +116,8 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->iotype = UPIO_MEM;
break;
case 4:
- port->iotype = UPIO_MEM32;
+ port->iotype = of_device_is_big_endian(np) ?
+ UPIO_MEM32BE : UPIO_MEM32;
break;
default:
dev_warn(&ofdev->dev, "unsupported reg-io-width (%d)\n",
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 6af58c6dba5e..2030565c6789 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1505,7 +1505,7 @@ static void destroy_ep_files (struct dev_data *dev)
list_del_init (&ep->epfiles);
dentry = ep->dentry;
ep->dentry = NULL;
- parent = dentry->d_parent->d_inode;
+ parent = d_inode(dentry->d_parent);
/* break link to controller */
if (ep->state == STATE_EP_ENABLED)
diff --git a/drivers/usb/gadget/legacy/tcm_usb_gadget.c b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
index 6e0a019aad54..8b80addc4ce6 100644
--- a/drivers/usb/gadget/legacy/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/legacy/tcm_usb_gadget.c
@@ -29,7 +29,7 @@
USB_GADGET_COMPOSITE_OPTIONS();
-static struct target_fabric_configfs *usbg_fabric_configfs;
+static const struct target_core_fabric_ops usbg_ops;
static inline struct f_uas *to_f_uas(struct usb_function *f)
{
@@ -1572,8 +1572,7 @@ static struct se_portal_group *usbg_make_tpg(
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
- ret = core_tpg_register(&usbg_fabric_configfs->tf_ops, wwn,
- &tpg->se_tpg, tpg,
+ ret = core_tpg_register(&usbg_ops, wwn, &tpg->se_tpg, tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
destroy_workqueue(tpg->workqueue);
@@ -1864,7 +1863,9 @@ static int usbg_check_stop_free(struct se_cmd *se_cmd)
return 1;
}
-static struct target_core_fabric_ops usbg_ops = {
+static const struct target_core_fabric_ops usbg_ops = {
+ .module = THIS_MODULE,
+ .name = "usb_gadget",
.get_fabric_name = usbg_get_fabric_name,
.get_fabric_proto_ident = usbg_get_fabric_proto_ident,
.tpg_get_wwn = usbg_get_fabric_wwn,
@@ -1906,46 +1907,9 @@ static struct target_core_fabric_ops usbg_ops = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = usbg_make_nodeacl,
.fabric_drop_nodeacl = usbg_drop_nodeacl,
-};
-
-static int usbg_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- fabric = target_fabric_configfs_init(THIS_MODULE, "usb_gadget");
- if (IS_ERR(fabric)) {
- printk(KERN_ERR "target_fabric_configfs_init() failed\n");
- return PTR_ERR(fabric);
- }
-
- fabric->tf_ops = usbg_ops;
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = usbg_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = usbg_base_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- printk(KERN_ERR "target_fabric_configfs_register() failed"
- " for usb-gadget\n");
- return ret;
- }
- usbg_fabric_configfs = fabric;
- return 0;
-};
-static void usbg_deregister_configfs(void)
-{
- if (!(usbg_fabric_configfs))
- return;
-
- target_fabric_configfs_deregister(usbg_fabric_configfs);
- usbg_fabric_configfs = NULL;
+ .tfc_wwn_attrs = usbg_wwn_attrs,
+ .tfc_tpg_base_attrs = usbg_base_attrs,
};
/* Start gadget.c code */
@@ -2454,16 +2418,13 @@ static void usbg_detach(struct usbg_tpg *tpg)
static int __init usb_target_gadget_init(void)
{
- int ret;
-
- ret = usbg_register_configfs();
- return ret;
+ return target_register_template(&usbg_ops);
}
module_init(usb_target_gadget_init);
static void __exit usb_target_gadget_exit(void)
{
- usbg_deregister_configfs();
+ target_unregister_template(&usbg_ops);
}
module_exit(usb_target_gadget_exit);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 71df240a467a..5e19bb53b3a9 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -131,6 +131,8 @@ struct vhost_scsi_tpg {
int tv_tpg_port_count;
/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
int tv_tpg_vhost_count;
+ /* Used for enabling T10-PI with legacy devices */
+ int tv_fabric_prot_type;
/* list for vhost_scsi_list */
struct list_head tv_tpg_list;
/* Used to protect access for tpg_nexus */
@@ -214,9 +216,7 @@ struct vhost_scsi {
int vs_events_nr; /* num of pending events, protected by vq->mutex */
};
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *vhost_scsi_fabric_configfs;
-
+static struct target_core_fabric_ops vhost_scsi_ops;
static struct workqueue_struct *vhost_scsi_workqueue;
/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
@@ -431,6 +431,14 @@ vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
port_nexus_ptr);
}
+static int vhost_scsi_check_prot_fabric_only(struct se_portal_group *se_tpg)
+{
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+
+ return tpg->tv_fabric_prot_type;
+}
+
static struct se_node_acl *
vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
{
@@ -1878,6 +1886,45 @@ static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
}
}
+static ssize_t vhost_scsi_tpg_attrib_store_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+ unsigned long val;
+ int ret = kstrtoul(page, 0, &val);
+
+ if (ret) {
+ pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
+ return ret;
+ }
+ if (val != 0 && val != 1 && val != 3) {
+ pr_err("Invalid vhost_scsi fabric_prot_type: %lu\n", val);
+ return -EINVAL;
+ }
+ tpg->tv_fabric_prot_type = val;
+
+ return count;
+}
+
+static ssize_t vhost_scsi_tpg_attrib_show_fabric_prot_type(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+ struct vhost_scsi_tpg, se_tpg);
+
+ return sprintf(page, "%d\n", tpg->tv_fabric_prot_type);
+}
+TF_TPG_ATTRIB_ATTR(vhost_scsi, fabric_prot_type, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *vhost_scsi_tpg_attrib_attrs[] = {
+ &vhost_scsi_tpg_attrib_fabric_prot_type.attr,
+ NULL,
+};
+
static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
const char *name)
{
@@ -2155,7 +2202,7 @@ vhost_scsi_make_tpg(struct se_wwn *wwn,
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
- ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn,
+ ret = core_tpg_register(&vhost_scsi_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
@@ -2277,6 +2324,8 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
};
static struct target_core_fabric_ops vhost_scsi_ops = {
+ .module = THIS_MODULE,
+ .name = "vhost",
.get_fabric_name = vhost_scsi_get_fabric_name,
.get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident,
.tpg_get_wwn = vhost_scsi_get_fabric_wwn,
@@ -2289,6 +2338,7 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
.tpg_check_demo_mode_cache = vhost_scsi_check_true,
.tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
.tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
+ .tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only,
.tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl,
.tpg_release_fabric_acl = vhost_scsi_release_fabric_acl,
.tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
@@ -2320,70 +2370,20 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
.fabric_drop_np = NULL,
.fabric_make_nodeacl = vhost_scsi_make_nodeacl,
.fabric_drop_nodeacl = vhost_scsi_drop_nodeacl,
+
+ .tfc_wwn_attrs = vhost_scsi_wwn_attrs,
+ .tfc_tpg_base_attrs = vhost_scsi_tpg_attrs,
+ .tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs,
};
-static int vhost_scsi_register_configfs(void)
+static int __init vhost_scsi_init(void)
{
- struct target_fabric_configfs *fabric;
- int ret;
+ int ret = -ENOMEM;
- pr_debug("vhost-scsi fabric module %s on %s/%s"
+ pr_debug("TCM_VHOST fabric module %s on %s/%s"
" on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
utsname()->machine);
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "vhost");
- if (IS_ERR(fabric)) {
- pr_err("target_fabric_configfs_init() failed\n");
- return PTR_ERR(fabric);
- }
- /*
- * Setup fabric->tf_ops from our local vhost_scsi_ops
- */
- fabric->tf_ops = vhost_scsi_ops;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- /*
- * Register the fabric for use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- pr_err("target_fabric_configfs_register() failed"
- " for TCM_VHOST\n");
- return ret;
- }
- /*
- * Setup our local pointer to *fabric
- */
- vhost_scsi_fabric_configfs = fabric;
- pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n");
- return 0;
-};
-
-static void vhost_scsi_deregister_configfs(void)
-{
- if (!vhost_scsi_fabric_configfs)
- return;
-
- target_fabric_configfs_deregister(vhost_scsi_fabric_configfs);
- vhost_scsi_fabric_configfs = NULL;
- pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n");
-};
-static int __init vhost_scsi_init(void)
-{
- int ret = -ENOMEM;
/*
* Use our own dedicated workqueue for submitting I/O into
* target core to avoid contention within system_wq.
@@ -2396,7 +2396,7 @@ static int __init vhost_scsi_init(void)
if (ret < 0)
goto out_destroy_workqueue;
- ret = vhost_scsi_register_configfs();
+ ret = target_register_template(&vhost_scsi_ops);
if (ret < 0)
goto out_vhost_scsi_deregister;
@@ -2412,7 +2412,7 @@ out:
static void vhost_scsi_exit(void)
{
- vhost_scsi_deregister_configfs();
+ target_unregister_template(&vhost_scsi_ops);
vhost_scsi_deregister();
destroy_workqueue(vhost_scsi_workqueue);
};
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index b546da5d8ea3..cab9f3f63a38 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -48,6 +48,16 @@ config VIRTIO_BALLOON
If unsure, say M.
+config VIRTIO_INPUT
+ tristate "Virtio input driver"
+ depends on VIRTIO
+ depends on INPUT
+ ---help---
+ This driver supports virtio input devices such as
+ keyboards, mice and tablets.
+
+ If unsure, say M.
+
config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices"
depends on HAS_IOMEM
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index d85565b8ea46..41e30e3dc842 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
+obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 5ce2aa48fc6e..b1877d73fa56 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -278,12 +278,6 @@ static struct bus_type virtio_bus = {
.remove = virtio_dev_remove,
};
-bool virtio_device_is_legacy_only(struct virtio_device_id id)
-{
- return id.device == VIRTIO_ID_BALLOON;
-}
-EXPORT_SYMBOL_GPL(virtio_device_is_legacy_only);
-
int register_virtio_driver(struct virtio_driver *driver)
{
/* Catch this early. */
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 6a356e344f82..82e80e034f25 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -214,8 +214,8 @@ static inline void update_stat(struct virtio_balloon *vb, int idx,
u16 tag, u64 val)
{
BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
- vb->stats[idx].tag = tag;
- vb->stats[idx].val = val;
+ vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag);
+ vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val);
}
#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
@@ -283,18 +283,27 @@ static void virtballoon_changed(struct virtio_device *vdev)
static inline s64 towards_target(struct virtio_balloon *vb)
{
- __le32 v;
s64 target;
+ u32 num_pages;
- virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &v);
+ virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages,
+ &num_pages);
- target = le32_to_cpu(v);
+ /* Legacy balloon config space is LE, unlike all other devices. */
+ if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
+ num_pages = le32_to_cpu((__force __le32)num_pages);
+
+ target = num_pages;
return target - vb->num_pages;
}
static void update_balloon_size(struct virtio_balloon *vb)
{
- __le32 actual = cpu_to_le32(vb->num_pages);
+ u32 actual = vb->num_pages;
+
+ /* Legacy balloon config space is LE, unlike all other devices. */
+ if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
+ actual = (__force u32)cpu_to_le32(actual);
virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
&actual);
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
new file mode 100644
index 000000000000..60e2a1677563
--- /dev/null
+++ b/drivers/virtio/virtio_input.c
@@ -0,0 +1,384 @@
+#include <linux/module.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/input.h>
+
+#include <uapi/linux/virtio_ids.h>
+#include <uapi/linux/virtio_input.h>
+
+struct virtio_input {
+ struct virtio_device *vdev;
+ struct input_dev *idev;
+ char name[64];
+ char serial[64];
+ char phys[64];
+ struct virtqueue *evt, *sts;
+ struct virtio_input_event evts[64];
+ spinlock_t lock;
+ bool ready;
+};
+
+static void virtinput_queue_evtbuf(struct virtio_input *vi,
+ struct virtio_input_event *evtbuf)
+{
+ struct scatterlist sg[1];
+
+ sg_init_one(sg, evtbuf, sizeof(*evtbuf));
+ virtqueue_add_inbuf(vi->evt, sg, 1, evtbuf, GFP_ATOMIC);
+}
+
+static void virtinput_recv_events(struct virtqueue *vq)
+{
+ struct virtio_input *vi = vq->vdev->priv;
+ struct virtio_input_event *event;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ if (vi->ready) {
+ while ((event = virtqueue_get_buf(vi->evt, &len)) != NULL) {
+ spin_unlock_irqrestore(&vi->lock, flags);
+ input_event(vi->idev,
+ le16_to_cpu(event->type),
+ le16_to_cpu(event->code),
+ le32_to_cpu(event->value));
+ spin_lock_irqsave(&vi->lock, flags);
+ virtinput_queue_evtbuf(vi, event);
+ }
+ virtqueue_kick(vq);
+ }
+ spin_unlock_irqrestore(&vi->lock, flags);
+}
+
+/*
+ * On error we are losing the status update, which isn't critical as
+ * this is typically used for stuff like keyboard leds.
+ */
+static int virtinput_send_status(struct virtio_input *vi,
+ u16 type, u16 code, s32 value)
+{
+ struct virtio_input_event *stsbuf;
+ struct scatterlist sg[1];
+ unsigned long flags;
+ int rc;
+
+ stsbuf = kzalloc(sizeof(*stsbuf), GFP_ATOMIC);
+ if (!stsbuf)
+ return -ENOMEM;
+
+ stsbuf->type = cpu_to_le16(type);
+ stsbuf->code = cpu_to_le16(code);
+ stsbuf->value = cpu_to_le32(value);
+ sg_init_one(sg, stsbuf, sizeof(*stsbuf));
+
+ spin_lock_irqsave(&vi->lock, flags);
+ if (vi->ready) {
+ rc = virtqueue_add_outbuf(vi->sts, sg, 1, stsbuf, GFP_ATOMIC);
+ virtqueue_kick(vi->sts);
+ } else {
+ rc = -ENODEV;
+ }
+ spin_unlock_irqrestore(&vi->lock, flags);
+
+ if (rc != 0)
+ kfree(stsbuf);
+ return rc;
+}
+
+static void virtinput_recv_status(struct virtqueue *vq)
+{
+ struct virtio_input *vi = vq->vdev->priv;
+ struct virtio_input_event *stsbuf;
+ unsigned long flags;
+ unsigned int len;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ while ((stsbuf = virtqueue_get_buf(vi->sts, &len)) != NULL)
+ kfree(stsbuf);
+ spin_unlock_irqrestore(&vi->lock, flags);
+}
+
+static int virtinput_status(struct input_dev *idev, unsigned int type,
+ unsigned int code, int value)
+{
+ struct virtio_input *vi = input_get_drvdata(idev);
+
+ return virtinput_send_status(vi, type, code, value);
+}
+
+static u8 virtinput_cfg_select(struct virtio_input *vi,
+ u8 select, u8 subsel)
+{
+ u8 size;
+
+ virtio_cwrite(vi->vdev, struct virtio_input_config, select, &select);
+ virtio_cwrite(vi->vdev, struct virtio_input_config, subsel, &subsel);
+ virtio_cread(vi->vdev, struct virtio_input_config, size, &size);
+ return size;
+}
+
+static void virtinput_cfg_bits(struct virtio_input *vi, int select, int subsel,
+ unsigned long *bits, unsigned int bitcount)
+{
+ unsigned int bit;
+ u8 *virtio_bits;
+ u8 bytes;
+
+ bytes = virtinput_cfg_select(vi, select, subsel);
+ if (!bytes)
+ return;
+ if (bitcount > bytes * 8)
+ bitcount = bytes * 8;
+
+ /*
+ * Bitmap in virtio config space is a simple stream of bytes,
+ * with the first byte carrying bits 0-7, second bits 8-15 and
+ * so on.
+ */
+ virtio_bits = kzalloc(bytes, GFP_KERNEL);
+ if (!virtio_bits)
+ return;
+ virtio_cread_bytes(vi->vdev, offsetof(struct virtio_input_config,
+ u.bitmap),
+ virtio_bits, bytes);
+ for (bit = 0; bit < bitcount; bit++) {
+ if (virtio_bits[bit / 8] & (1 << (bit % 8)))
+ __set_bit(bit, bits);
+ }
+ kfree(virtio_bits);
+
+ if (select == VIRTIO_INPUT_CFG_EV_BITS)
+ __set_bit(subsel, vi->idev->evbit);
+}
+
+static void virtinput_cfg_abs(struct virtio_input *vi, int abs)
+{
+ u32 mi, ma, re, fu, fl;
+
+ virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ABS_INFO, abs);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.min, &mi);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.max, &ma);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.res, &re);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu);
+ virtio_cread(vi->vdev, struct virtio_input_config, u.abs.flat, &fl);
+ input_set_abs_params(vi->idev, abs, mi, ma, fu, fl);
+ input_abs_set_res(vi->idev, abs, re);
+}
+
+static int virtinput_init_vqs(struct virtio_input *vi)
+{
+ struct virtqueue *vqs[2];
+ vq_callback_t *cbs[] = { virtinput_recv_events,
+ virtinput_recv_status };
+ static const char *names[] = { "events", "status" };
+ int err;
+
+ err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names);
+ if (err)
+ return err;
+ vi->evt = vqs[0];
+ vi->sts = vqs[1];
+
+ return 0;
+}
+
+static void virtinput_fill_evt(struct virtio_input *vi)
+{
+ unsigned long flags;
+ int i, size;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ size = virtqueue_get_vring_size(vi->evt);
+ if (size > ARRAY_SIZE(vi->evts))
+ size = ARRAY_SIZE(vi->evts);
+ for (i = 0; i < size; i++)
+ virtinput_queue_evtbuf(vi, &vi->evts[i]);
+ virtqueue_kick(vi->evt);
+ spin_unlock_irqrestore(&vi->lock, flags);
+}
+
+static int virtinput_probe(struct virtio_device *vdev)
+{
+ struct virtio_input *vi;
+ unsigned long flags;
+ size_t size;
+ int abs, err;
+
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+ return -ENODEV;
+
+ vi = kzalloc(sizeof(*vi), GFP_KERNEL);
+ if (!vi)
+ return -ENOMEM;
+
+ vdev->priv = vi;
+ vi->vdev = vdev;
+ spin_lock_init(&vi->lock);
+
+ err = virtinput_init_vqs(vi);
+ if (err)
+ goto err_init_vq;
+
+ vi->idev = input_allocate_device();
+ if (!vi->idev) {
+ err = -ENOMEM;
+ goto err_input_alloc;
+ }
+ input_set_drvdata(vi->idev, vi);
+
+ size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_NAME, 0);
+ virtio_cread_bytes(vi->vdev, offsetof(struct virtio_input_config,
+ u.string),
+ vi->name, min(size, sizeof(vi->name)));
+ size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_SERIAL, 0);
+ virtio_cread_bytes(vi->vdev, offsetof(struct virtio_input_config,
+ u.string),
+ vi->serial, min(size, sizeof(vi->serial)));
+ snprintf(vi->phys, sizeof(vi->phys),
+ "virtio%d/input0", vdev->index);
+ vi->idev->name = vi->name;
+ vi->idev->phys = vi->phys;
+ vi->idev->uniq = vi->serial;
+
+ size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_DEVIDS, 0);
+ if (size >= sizeof(struct virtio_input_devids)) {
+ virtio_cread(vi->vdev, struct virtio_input_config,
+ u.ids.bustype, &vi->idev->id.bustype);
+ virtio_cread(vi->vdev, struct virtio_input_config,
+ u.ids.vendor, &vi->idev->id.vendor);
+ virtio_cread(vi->vdev, struct virtio_input_config,
+ u.ids.product, &vi->idev->id.product);
+ virtio_cread(vi->vdev, struct virtio_input_config,
+ u.ids.version, &vi->idev->id.version);
+ } else {
+ vi->idev->id.bustype = BUS_VIRTUAL;
+ }
+
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_PROP_BITS, 0,
+ vi->idev->propbit, INPUT_PROP_CNT);
+ size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_REP);
+ if (size)
+ __set_bit(EV_REP, vi->idev->evbit);
+
+ vi->idev->dev.parent = &vdev->dev;
+ vi->idev->event = virtinput_status;
+
+ /* device -> kernel */
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_KEY,
+ vi->idev->keybit, KEY_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_REL,
+ vi->idev->relbit, REL_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_ABS,
+ vi->idev->absbit, ABS_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_MSC,
+ vi->idev->mscbit, MSC_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_SW,
+ vi->idev->swbit, SW_CNT);
+
+ /* kernel -> device */
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_LED,
+ vi->idev->ledbit, LED_CNT);
+ virtinput_cfg_bits(vi, VIRTIO_INPUT_CFG_EV_BITS, EV_SND,
+ vi->idev->sndbit, SND_CNT);
+
+ if (test_bit(EV_ABS, vi->idev->evbit)) {
+ for (abs = 0; abs < ABS_CNT; abs++) {
+ if (!test_bit(abs, vi->idev->absbit))
+ continue;
+ virtinput_cfg_abs(vi, abs);
+ }
+ }
+
+ virtio_device_ready(vdev);
+ vi->ready = true;
+ err = input_register_device(vi->idev);
+ if (err)
+ goto err_input_register;
+
+ virtinput_fill_evt(vi);
+ return 0;
+
+err_input_register:
+ spin_lock_irqsave(&vi->lock, flags);
+ vi->ready = false;
+ spin_unlock_irqrestore(&vi->lock, flags);
+ input_free_device(vi->idev);
+err_input_alloc:
+ vdev->config->del_vqs(vdev);
+err_init_vq:
+ kfree(vi);
+ return err;
+}
+
+static void virtinput_remove(struct virtio_device *vdev)
+{
+ struct virtio_input *vi = vdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ vi->ready = false;
+ spin_unlock_irqrestore(&vi->lock, flags);
+
+ input_unregister_device(vi->idev);
+ vdev->config->del_vqs(vdev);
+ kfree(vi);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int virtinput_freeze(struct virtio_device *vdev)
+{
+ struct virtio_input *vi = vdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vi->lock, flags);
+ vi->ready = false;
+ spin_unlock_irqrestore(&vi->lock, flags);
+
+ vdev->config->del_vqs(vdev);
+ return 0;
+}
+
+static int virtinput_restore(struct virtio_device *vdev)
+{
+ struct virtio_input *vi = vdev->priv;
+ int err;
+
+ err = virtinput_init_vqs(vi);
+ if (err)
+ return err;
+
+ virtio_device_ready(vdev);
+ vi->ready = true;
+ virtinput_fill_evt(vi);
+ return 0;
+}
+#endif
+
+static unsigned int features[] = {
+ /* none */
+};
+static struct virtio_device_id id_table[] = {
+ { VIRTIO_ID_INPUT, VIRTIO_DEV_ANY_ID },
+ { 0 },
+};
+
+static struct virtio_driver virtio_input_driver = {
+ .driver.name = KBUILD_MODNAME,
+ .driver.owner = THIS_MODULE,
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = virtinput_probe,
+ .remove = virtinput_remove,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtinput_freeze,
+ .restore = virtinput_restore,
+#endif
+};
+
+module_virtio_driver(virtio_input_driver);
+MODULE_DEVICE_TABLE(virtio, id_table);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Virtio input device driver");
+MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 6010d7ec0a0f..7a5e60dea6c5 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -581,14 +581,6 @@ static int virtio_mmio_probe(struct platform_device *pdev)
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
- /* Reject legacy-only IDs for version 2 devices */
- if (vm_dev->version == 2 &&
- virtio_device_is_legacy_only(vm_dev->vdev.id)) {
- dev_err(&pdev->dev, "Version 2 not supported for devices %u!\n",
- vm_dev->vdev.id.device);
- return -ENODEV;
- }
-
if (vm_dev->version == 1)
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 2aa38e59db2e..e88e0997a889 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -20,6 +20,50 @@
#define VIRTIO_PCI_NO_LEGACY
#include "virtio_pci_common.h"
+/*
+ * Type-safe wrappers for io accesses.
+ * Use these to enforce at compile time the following spec requirement:
+ *
+ * The driver MUST access each field using the “natural” access
+ * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
+ * for 16-bit fields and 8-bit accesses for 8-bit fields.
+ */
+static inline u8 vp_ioread8(u8 __iomem *addr)
+{
+ return ioread8(addr);
+}
+static inline u16 vp_ioread16 (u16 __iomem *addr)
+{
+ return ioread16(addr);
+}
+
+static inline u32 vp_ioread32(u32 __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
+{
+ iowrite8(value, addr);
+}
+
+static inline void vp_iowrite16(u16 value, u16 __iomem *addr)
+{
+ iowrite16(value, addr);
+}
+
+static inline void vp_iowrite32(u32 value, u32 __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+static void vp_iowrite64_twopart(u64 val,
+ __le32 __iomem *lo, __le32 __iomem *hi)
+{
+ vp_iowrite32((u32)val, lo);
+ vp_iowrite32(val >> 32, hi);
+}
+
static void __iomem *map_capability(struct pci_dev *dev, int off,
size_t minlen,
u32 align,
@@ -94,22 +138,16 @@ static void __iomem *map_capability(struct pci_dev *dev, int off,
return p;
}
-static void iowrite64_twopart(u64 val, __le32 __iomem *lo, __le32 __iomem *hi)
-{
- iowrite32((u32)val, lo);
- iowrite32(val >> 32, hi);
-}
-
/* virtio config->get_features() implementation */
static u64 vp_get_features(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
u64 features;
- iowrite32(0, &vp_dev->common->device_feature_select);
- features = ioread32(&vp_dev->common->device_feature);
- iowrite32(1, &vp_dev->common->device_feature_select);
- features |= ((u64)ioread32(&vp_dev->common->device_feature) << 32);
+ vp_iowrite32(0, &vp_dev->common->device_feature_select);
+ features = vp_ioread32(&vp_dev->common->device_feature);
+ vp_iowrite32(1, &vp_dev->common->device_feature_select);
+ features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32);
return features;
}
@@ -128,10 +166,10 @@ static int vp_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- iowrite32(0, &vp_dev->common->guest_feature_select);
- iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
- iowrite32(1, &vp_dev->common->guest_feature_select);
- iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
+ vp_iowrite32(0, &vp_dev->common->guest_feature_select);
+ vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
+ vp_iowrite32(1, &vp_dev->common->guest_feature_select);
+ vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
return 0;
}
@@ -210,14 +248,14 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
static u32 vp_generation(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- return ioread8(&vp_dev->common->config_generation);
+ return vp_ioread8(&vp_dev->common->config_generation);
}
/* config->{get,set}_status() implementations */
static u8 vp_get_status(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- return ioread8(&vp_dev->common->device_status);
+ return vp_ioread8(&vp_dev->common->device_status);
}
static void vp_set_status(struct virtio_device *vdev, u8 status)
@@ -225,17 +263,17 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* We should never be setting status to 0. */
BUG_ON(status == 0);
- iowrite8(status, &vp_dev->common->device_status);
+ vp_iowrite8(status, &vp_dev->common->device_status);
}
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
- iowrite8(0, &vp_dev->common->device_status);
+ vp_iowrite8(0, &vp_dev->common->device_status);
/* Flush out the status write, and flush in device writes,
* including MSI-X interrupts, if any. */
- ioread8(&vp_dev->common->device_status);
+ vp_ioread8(&vp_dev->common->device_status);
/* Flush pending VQ/configuration callbacks. */
vp_synchronize_vectors(vdev);
}
@@ -243,10 +281,10 @@ static void vp_reset(struct virtio_device *vdev)
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
/* Setup the vector used for configuration events */
- iowrite16(vector, &vp_dev->common->msix_config);
+ vp_iowrite16(vector, &vp_dev->common->msix_config);
/* Verify we had enough resources to assign the vector */
/* Will also flush the write out to device */
- return ioread16(&vp_dev->common->msix_config);
+ return vp_ioread16(&vp_dev->common->msix_config);
}
static size_t vring_pci_size(u16 num)
@@ -286,15 +324,15 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
u16 num, off;
int err;
- if (index >= ioread16(&cfg->num_queues))
+ if (index >= vp_ioread16(&cfg->num_queues))
return ERR_PTR(-ENOENT);
/* Select the queue we're interested in */
- iowrite16(index, &cfg->queue_select);
+ vp_iowrite16(index, &cfg->queue_select);
/* Check if queue is either not available or already active. */
- num = ioread16(&cfg->queue_size);
- if (!num || ioread16(&cfg->queue_enable))
+ num = vp_ioread16(&cfg->queue_size);
+ if (!num || vp_ioread16(&cfg->queue_enable))
return ERR_PTR(-ENOENT);
if (num & (num - 1)) {
@@ -303,7 +341,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
/* get offset of notification word for this vq */
- off = ioread16(&cfg->queue_notify_off);
+ off = vp_ioread16(&cfg->queue_notify_off);
info->num = num;
info->msix_vector = msix_vec;
@@ -322,13 +360,13 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
/* activate the queue */
- iowrite16(num, &cfg->queue_size);
- iowrite64_twopart(virt_to_phys(info->queue),
- &cfg->queue_desc_lo, &cfg->queue_desc_hi);
- iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
- &cfg->queue_avail_lo, &cfg->queue_avail_hi);
- iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
- &cfg->queue_used_lo, &cfg->queue_used_hi);
+ vp_iowrite16(num, &cfg->queue_size);
+ vp_iowrite64_twopart(virt_to_phys(info->queue),
+ &cfg->queue_desc_lo, &cfg->queue_desc_hi);
+ vp_iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)),
+ &cfg->queue_avail_lo, &cfg->queue_avail_hi);
+ vp_iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)),
+ &cfg->queue_used_lo, &cfg->queue_used_hi);
if (vp_dev->notify_base) {
/* offset should not wrap */
@@ -357,8 +395,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
- iowrite16(msix_vec, &cfg->queue_msix_vector);
- msix_vec = ioread16(&cfg->queue_msix_vector);
+ vp_iowrite16(msix_vec, &cfg->queue_msix_vector);
+ msix_vec = vp_ioread16(&cfg->queue_msix_vector);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto err_assign_vector;
@@ -393,8 +431,8 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* this, there's no way to go back except reset.
*/
list_for_each_entry(vq, &vdev->vqs, list) {
- iowrite16(vq->index, &vp_dev->common->queue_select);
- iowrite16(1, &vp_dev->common->queue_enable);
+ vp_iowrite16(vq->index, &vp_dev->common->queue_select);
+ vp_iowrite16(1, &vp_dev->common->queue_enable);
}
return 0;
@@ -405,13 +443,13 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
- iowrite16(vq->index, &vp_dev->common->queue_select);
+ vp_iowrite16(vq->index, &vp_dev->common->queue_select);
if (vp_dev->msix_enabled) {
- iowrite16(VIRTIO_MSI_NO_VECTOR,
- &vp_dev->common->queue_msix_vector);
+ vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
+ &vp_dev->common->queue_msix_vector);
/* Flush the write out to device */
- ioread16(&vp_dev->common->queue_msix_vector);
+ vp_ioread16(&vp_dev->common->queue_msix_vector);
}
if (!vp_dev->notify_base)
@@ -577,9 +615,6 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
}
vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
- if (virtio_device_is_legacy_only(vp_dev->vdev.id))
- return -ENODEV;
-
/* check for a common config: if not, use legacy mode (bar 0). */
common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
IORESOURCE_IO | IORESOURCE_MEM);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ce4f3a7f95fd..e5e7c5505de7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -169,7 +169,6 @@ config AT91SAM9X_WATCHDOG
config CADENCE_WATCHDOG
tristate "Cadence Watchdog Timer"
- depends on ARM
select WATCHDOG_CORE
help
Say Y here if you want to include support for the watchdog
@@ -1190,6 +1189,7 @@ config OCTEON_WDT
tristate "Cavium OCTEON SOC family Watchdog Timer"
depends on CAVIUM_OCTEON_SOC
default y
+ select WATCHDOG_CORE
select EXPORT_UASM if OCTEON_WDT = m
help
Hardware driver for OCTEON's on chip watchdog timer.
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index 4e37db3539a4..22d8ae65772a 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -99,12 +99,14 @@ static int secure_register_read(struct bcm_kona_wdt *wdt, uint32_t offset)
static int bcm_kona_wdt_dbg_show(struct seq_file *s, void *data)
{
- int ctl_val, cur_val, ret;
+ int ctl_val, cur_val;
unsigned long flags;
struct bcm_kona_wdt *wdt = s->private;
- if (!wdt)
- return seq_puts(s, "No device pointer\n");
+ if (!wdt) {
+ seq_puts(s, "No device pointer\n");
+ return 0;
+ }
spin_lock_irqsave(&wdt->lock, flags);
ctl_val = secure_register_read(wdt, SECWDOG_CTRL_REG);
@@ -112,7 +114,7 @@ static int bcm_kona_wdt_dbg_show(struct seq_file *s, void *data)
spin_unlock_irqrestore(&wdt->lock, flags);
if (ctl_val < 0 || cur_val < 0) {
- ret = seq_puts(s, "Error accessing hardware\n");
+ seq_puts(s, "Error accessing hardware\n");
} else {
int ctl, cur, ctl_sec, cur_sec, res;
@@ -121,15 +123,18 @@ static int bcm_kona_wdt_dbg_show(struct seq_file *s, void *data)
cur = cur_val & SECWDOG_COUNT_MASK;
ctl_sec = TICKS_TO_SECS(ctl, wdt);
cur_sec = TICKS_TO_SECS(cur, wdt);
- ret = seq_printf(s, "Resolution: %d / %d\n"
- "Control: %d s / %d (%#x) ticks\n"
- "Current: %d s / %d (%#x) ticks\n"
- "Busy count: %lu\n", res,
- wdt->resolution, ctl_sec, ctl, ctl, cur_sec,
- cur, cur, wdt->busy_count);
+ seq_printf(s,
+ "Resolution: %d / %d\n"
+ "Control: %d s / %d (%#x) ticks\n"
+ "Current: %d s / %d (%#x) ticks\n"
+ "Busy count: %lu\n",
+ res, wdt->resolution,
+ ctl_sec, ctl, ctl,
+ cur_sec, cur, cur,
+ wdt->busy_count);
}
- return ret;
+ return 0;
}
static int bcm_kona_dbg_open(struct inode *inode, struct file *file)
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 8453531545df..14521c8b3d5a 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -3,6 +3,8 @@
*
* Copyright (C) 2007, 2008, 2009, 2010 Cavium Networks
*
+ * Converted to use WATCHDOG_CORE by Aaro Koskinen <aaro.koskinen@iki.fi>.
+ *
* Some parts derived from wdt.c
*
* (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>,
@@ -103,13 +105,10 @@ MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static unsigned long octeon_wdt_is_open;
-static char expect_close;
-
-static u32 __initdata nmi_stage1_insns[64];
+static u32 nmi_stage1_insns[64] __initdata;
/* We need one branch and therefore one relocation per target label. */
-static struct uasm_label __initdata labels[5];
-static struct uasm_reloc __initdata relocs[5];
+static struct uasm_label labels[5] __initdata;
+static struct uasm_reloc relocs[5] __initdata;
enum lable_id {
label_enter_bootloader = 1
@@ -218,7 +217,8 @@ static void __init octeon_wdt_build_stage1(void)
pr_debug("\t.set pop\n");
if (len > 32)
- panic("NMI stage 1 handler exceeds 32 instructions, was %d\n", len);
+ panic("NMI stage 1 handler exceeds 32 instructions, was %d\n",
+ len);
}
static int cpu2core(int cpu)
@@ -294,6 +294,7 @@ static void octeon_wdt_write_hex(u64 value, int digits)
{
int d;
int v;
+
for (d = 0; d < digits; d++) {
v = (value >> ((digits - d - 1) * 4)) & 0xf;
if (v >= 10)
@@ -303,7 +304,7 @@ static void octeon_wdt_write_hex(u64 value, int digits)
}
}
-const char *reg_name[] = {
+static const char reg_name[][3] = {
"$0", "at", "v0", "v1", "a0", "a1", "a2", "a3",
"a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
@@ -444,7 +445,7 @@ static int octeon_wdt_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static void octeon_wdt_ping(void)
+static int octeon_wdt_ping(struct watchdog_device __always_unused *wdog)
{
int cpu;
int coreid;
@@ -457,10 +458,12 @@ static void octeon_wdt_ping(void)
!cpumask_test_cpu(cpu, &irq_enabled_cpus)) {
/* We have to enable the irq */
int irq = OCTEON_IRQ_WDOG0 + coreid;
+
enable_irq(irq);
cpumask_set_cpu(cpu, &irq_enabled_cpus);
}
}
+ return 0;
}
static void octeon_wdt_calc_parameters(int t)
@@ -489,7 +492,8 @@ static void octeon_wdt_calc_parameters(int t)
timeout_cnt = ((octeon_get_io_clock_rate() >> 8) * timeout_sec) >> 8;
}
-static int octeon_wdt_set_heartbeat(int t)
+static int octeon_wdt_set_timeout(struct watchdog_device *wdog,
+ unsigned int t)
{
int cpu;
int coreid;
@@ -509,158 +513,45 @@ static int octeon_wdt_set_heartbeat(int t)
cvmx_write_csr(CVMX_CIU_WDOGX(coreid), ciu_wdog.u64);
cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1);
}
- octeon_wdt_ping(); /* Get the irqs back on. */
+ octeon_wdt_ping(wdog); /* Get the irqs back on. */
return 0;
}
-/**
- * octeon_wdt_write:
- * @file: file handle to the watchdog
- * @buf: buffer to write (unused as data does not matter here
- * @count: count of bytes
- * @ppos: pointer to the position to write. No seeks allowed
- *
- * A write to a watchdog device is defined as a keepalive signal. Any
- * write of data will do, as we we don't define content meaning.
- */
-
-static ssize_t octeon_wdt_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- if (count) {
- if (!nowayout) {
- size_t i;
-
- /* In case it was set long ago */
- expect_close = 0;
-
- for (i = 0; i != count; i++) {
- char c;
- if (get_user(c, buf + i))
- return -EFAULT;
- if (c == 'V')
- expect_close = 1;
- }
- }
- octeon_wdt_ping();
- }
- return count;
-}
-
-/**
- * octeon_wdt_ioctl:
- * @file: file handle to the device
- * @cmd: watchdog command
- * @arg: argument pointer
- *
- * The watchdog API defines a common set of functions for all
- * watchdogs according to their available features. We only
- * actually usefully support querying capabilities and setting
- * the timeout.
- */
-
-static long octeon_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_heartbeat;
-
- static struct watchdog_info ident = {
- .options = WDIOF_SETTIMEOUT|
- WDIOF_MAGICCLOSE|
- WDIOF_KEEPALIVEPING,
- .firmware_version = 1,
- .identity = "OCTEON",
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
- case WDIOC_KEEPALIVE:
- octeon_wdt_ping();
- return 0;
- case WDIOC_SETTIMEOUT:
- if (get_user(new_heartbeat, p))
- return -EFAULT;
- if (octeon_wdt_set_heartbeat(new_heartbeat))
- return -EINVAL;
- /* Fall through. */
- case WDIOC_GETTIMEOUT:
- return put_user(heartbeat, p);
- default:
- return -ENOTTY;
- }
-}
-
-/**
- * octeon_wdt_open:
- * @inode: inode of device
- * @file: file handle to device
- *
- * The watchdog device has been opened. The watchdog device is single
- * open and on opening we do a ping to reset the counters.
- */
-
-static int octeon_wdt_open(struct inode *inode, struct file *file)
+static int octeon_wdt_start(struct watchdog_device *wdog)
{
- if (test_and_set_bit(0, &octeon_wdt_is_open))
- return -EBUSY;
- /*
- * Activate
- */
- octeon_wdt_ping();
+ octeon_wdt_ping(wdog);
do_coundown = 1;
- return nonseekable_open(inode, file);
+ return 0;
}
-/**
- * octeon_wdt_release:
- * @inode: inode to board
- * @file: file handle to board
- *
- * The watchdog has a configurable API. There is a religious dispute
- * between people who want their watchdog to be able to shut down and
- * those who want to be sure if the watchdog manager dies the machine
- * reboots. In the former case we disable the counters, in the latter
- * case you have to open it again very soon.
- */
-
-static int octeon_wdt_release(struct inode *inode, struct file *file)
+static int octeon_wdt_stop(struct watchdog_device *wdog)
{
- if (expect_close) {
- do_coundown = 0;
- octeon_wdt_ping();
- } else {
- pr_crit("WDT device closed unexpectedly. WDT will not stop!\n");
- }
- clear_bit(0, &octeon_wdt_is_open);
- expect_close = 0;
+ do_coundown = 0;
+ octeon_wdt_ping(wdog);
return 0;
}
-static const struct file_operations octeon_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = octeon_wdt_write,
- .unlocked_ioctl = octeon_wdt_ioctl,
- .open = octeon_wdt_open,
- .release = octeon_wdt_release,
+static struct notifier_block octeon_wdt_cpu_notifier = {
+ .notifier_call = octeon_wdt_cpu_callback,
};
-static struct miscdevice octeon_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &octeon_wdt_fops,
+static const struct watchdog_info octeon_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
+ .identity = "OCTEON",
};
-static struct notifier_block octeon_wdt_cpu_notifier = {
- .notifier_call = octeon_wdt_cpu_callback,
+static const struct watchdog_ops octeon_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = octeon_wdt_start,
+ .stop = octeon_wdt_stop,
+ .ping = octeon_wdt_ping,
+ .set_timeout = octeon_wdt_set_timeout,
};
+static struct watchdog_device octeon_wdt = {
+ .info = &octeon_wdt_info,
+ .ops = &octeon_wdt_ops,
+};
/**
* Module/ driver initialization.
@@ -685,7 +576,8 @@ static int __init octeon_wdt_init(void)
max_timeout_sec = 6;
do {
max_timeout_sec--;
- timeout_cnt = ((octeon_get_io_clock_rate() >> 8) * max_timeout_sec) >> 8;
+ timeout_cnt = ((octeon_get_io_clock_rate() >> 8) *
+ max_timeout_sec) >> 8;
} while (timeout_cnt > 65535);
BUG_ON(timeout_cnt == 0);
@@ -694,11 +586,15 @@ static int __init octeon_wdt_init(void)
pr_info("Initial granularity %d Sec\n", timeout_sec);
- ret = misc_register(&octeon_wdt_miscdev);
+ octeon_wdt.timeout = timeout_sec;
+ octeon_wdt.max_timeout = UINT_MAX;
+
+ watchdog_set_nowayout(&octeon_wdt, nowayout);
+
+ ret = watchdog_register_device(&octeon_wdt);
if (ret) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
- goto out;
+ pr_err("watchdog_register_device() failed: %d\n", ret);
+ return ret;
}
/* Build the NMI handler ... */
@@ -721,8 +617,7 @@ static int __init octeon_wdt_init(void)
__register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
cpu_notifier_register_done();
-out:
- return ret;
+ return 0;
}
/**
@@ -732,7 +627,7 @@ static void __exit octeon_wdt_cleanup(void)
{
int cpu;
- misc_deregister(&octeon_wdt_miscdev);
+ watchdog_unregister_device(&octeon_wdt);
cpu_notifier_register_begin();
__unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 55e220150103..b9c6049c3e78 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -216,7 +216,7 @@ static struct platform_driver platform_wdt_driver = {
module_platform_driver(platform_wdt_driver);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
-MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
MODULE_DESCRIPTION("PNX4008 Watchdog Driver");
module_param(heartbeat, uint, 0);
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index aa85618c4d03..aa03ca8f2d9b 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -20,9 +20,9 @@
#include <linux/reboot.h>
#include <linux/watchdog.h>
-#define WDT_RST 0x0
-#define WDT_EN 0x8
-#define WDT_BITE_TIME 0x24
+#define WDT_RST 0x38
+#define WDT_EN 0x40
+#define WDT_BITE_TIME 0x5C
struct qcom_wdt {
struct watchdog_device wdd;
@@ -117,6 +117,8 @@ static int qcom_wdt_probe(struct platform_device *pdev)
{
struct qcom_wdt *wdt;
struct resource *res;
+ struct device_node *np = pdev->dev.of_node;
+ u32 percpu_offset;
int ret;
wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
@@ -124,6 +126,14 @@ static int qcom_wdt_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* We use CPU0's DGT for the watchdog */
+ if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
+ percpu_offset = 0;
+
+ res->start += percpu_offset;
+ res->end += percpu_offset;
+
wdt->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
@@ -203,9 +213,8 @@ static int qcom_wdt_remove(struct platform_device *pdev)
}
static const struct of_device_id qcom_wdt_of_table[] = {
- { .compatible = "qcom,kpss-wdt-msm8960", },
- { .compatible = "qcom,kpss-wdt-apq8064", },
- { .compatible = "qcom,kpss-wdt-ipq8064", },
+ { .compatible = "qcom,kpss-timer" },
+ { .compatible = "qcom,scss-timer" },
{ },
};
MODULE_DEVICE_TABLE(of, qcom_wdt_of_table);
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index a62b1b6decf4..e7f0d5b60d3d 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -1,7 +1,7 @@
/*
* Watchdog driver for the RTC based watchdog in STMP3xxx and i.MX23/28
*
- * Author: Wolfram Sang <w.sang@pengutronix.de>
+ * Author: Wolfram Sang <kernel@pengutronix.de>
*
* Copyright (C) 2011-12 Wolfram Sang, Pengutronix
*
@@ -129,4 +129,4 @@ module_platform_driver(stmp3xxx_wdt_driver);
MODULE_DESCRIPTION("STMP3XXX RTC Watchdog Driver");
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index a270004c9605..7cd226da15fe 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -276,4 +276,8 @@ config XEN_AUTO_XLATE
help
Support for auto-translated physmap guests.
+config XEN_ACPI
+ def_bool y
+ depends on X86 && ACPI
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 40edd1cbb60d..e293bc507cbc 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -13,7 +13,7 @@ CFLAGS_efi.o += -fshort-wchar
dom0-$(CONFIG_PCI) += pci.o
dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
-dom0-$(CONFIG_ACPI) += acpi.o $(xen-pad-y)
+dom0-$(CONFIG_XEN_ACPI) += acpi.o $(xen-pad-y)
xen-pad-$(CONFIG_X86) += xen-acpi-pad.o
dom0-$(CONFIG_X86) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 07ef38325223..b7f51504f85a 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -204,8 +204,7 @@ static LIST_HEAD(scsiback_free_pages);
static DEFINE_MUTEX(scsiback_mutex);
static LIST_HEAD(scsiback_list);
-/* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *scsiback_fabric_configfs;
+static const struct target_core_fabric_ops scsiback_ops;
static void scsiback_get(struct vscsibk_info *info)
{
@@ -1902,7 +1901,7 @@ scsiback_make_tpg(struct se_wwn *wwn,
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
- ret = core_tpg_register(&scsiback_fabric_configfs->tf_ops, wwn,
+ ret = core_tpg_register(&scsiback_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
@@ -1944,7 +1943,9 @@ static int scsiback_check_false(struct se_portal_group *se_tpg)
return 0;
}
-static struct target_core_fabric_ops scsiback_ops = {
+static const struct target_core_fabric_ops scsiback_ops = {
+ .module = THIS_MODULE,
+ .name = "xen-pvscsi",
.get_fabric_name = scsiback_get_fabric_name,
.get_fabric_proto_ident = scsiback_get_fabric_proto_ident,
.tpg_get_wwn = scsiback_get_fabric_wwn,
@@ -1991,62 +1992,10 @@ static struct target_core_fabric_ops scsiback_ops = {
.fabric_make_nodeacl = scsiback_make_nodeacl,
.fabric_drop_nodeacl = scsiback_drop_nodeacl,
#endif
-};
-
-static int scsiback_register_configfs(void)
-{
- struct target_fabric_configfs *fabric;
- int ret;
-
- pr_debug("fabric module %s on %s/%s on "UTS_RELEASE"\n",
- VSCSI_VERSION, utsname()->sysname, utsname()->machine);
- /*
- * Register the top level struct config_item_type with TCM core
- */
- fabric = target_fabric_configfs_init(THIS_MODULE, "xen-pvscsi");
- if (IS_ERR(fabric))
- return PTR_ERR(fabric);
- /*
- * Setup fabric->tf_ops from our local scsiback_ops
- */
- fabric->tf_ops = scsiback_ops;
- /*
- * Setup default attribute lists for various fabric->tf_cit_tmpl
- */
- fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = scsiback_wwn_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = scsiback_tpg_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = scsiback_param_attrs;
- fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
- fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
- /*
- * Register the fabric for use within TCM
- */
- ret = target_fabric_configfs_register(fabric);
- if (ret < 0) {
- target_fabric_configfs_free(fabric);
- return ret;
- }
- /*
- * Setup our local pointer to *fabric
- */
- scsiback_fabric_configfs = fabric;
- pr_debug("Set fabric -> scsiback_fabric_configfs\n");
- return 0;
-};
-
-static void scsiback_deregister_configfs(void)
-{
- if (!scsiback_fabric_configfs)
- return;
-
- target_fabric_configfs_deregister(scsiback_fabric_configfs);
- scsiback_fabric_configfs = NULL;
- pr_debug("Cleared scsiback_fabric_configfs\n");
+ .tfc_wwn_attrs = scsiback_wwn_attrs,
+ .tfc_tpg_base_attrs = scsiback_tpg_attrs,
+ .tfc_tpg_param_attrs = scsiback_param_attrs,
};
static const struct xenbus_device_id scsiback_ids[] = {
@@ -2078,6 +2027,9 @@ static int __init scsiback_init(void)
if (!xen_domain())
return -ENODEV;
+ pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
+ VSCSI_VERSION, utsname()->sysname, utsname()->machine);
+
scsiback_cachep = kmem_cache_create("vscsiif_cache",
sizeof(struct vscsibk_pend), 0, 0, scsiback_init_pend);
if (!scsiback_cachep)
@@ -2087,7 +2039,7 @@ static int __init scsiback_init(void)
if (ret)
goto out_cache_destroy;
- ret = scsiback_register_configfs();
+ ret = target_register_template(&scsiback_ops);
if (ret)
goto out_unregister_xenbus;
@@ -2110,7 +2062,7 @@ static void __exit scsiback_exit(void)
BUG();
gnttab_free_pages(1, &page);
}
- scsiback_deregister_configfs();
+ target_unregister_template(&scsiback_ops);
xenbus_unregister_driver(&scsiback_driver);
kmem_cache_destroy(scsiback_cachep);
}