summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/bfq-iosched.txt29
-rw-r--r--Documentation/block/null_blk.txt4
-rw-r--r--Documentation/process/submit-checklist.rst27
-rw-r--r--Documentation/translations/ja_JP/SubmitChecklist22
-rw-r--r--arch/arc/configs/haps_hs_defconfig1
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig1
-rw-r--r--arch/arm/configs/aspeed_g4_defconfig1
-rw-r--r--arch/arm/configs/aspeed_g5_defconfig1
-rw-r--r--arch/arm/configs/at91_dt_defconfig1
-rw-r--r--arch/arm/configs/clps711x_defconfig1
-rw-r--r--arch/arm/configs/efm32_defconfig1
-rw-r--r--arch/arm/configs/ezx_defconfig1
-rw-r--r--arch/arm/configs/h3600_defconfig1
-rw-r--r--arch/arm/configs/imote2_defconfig1
-rw-r--r--arch/arm/configs/moxart_defconfig1
-rw-r--r--arch/arm/configs/multi_v4t_defconfig1
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/stm32_defconfig1
-rw-r--r--arch/arm/configs/u300_defconfig1
-rw-r--r--arch/arm/configs/vexpress_defconfig1
-rw-r--r--arch/m68k/configs/amcore_defconfig1
-rw-r--r--arch/m68k/configs/m5475evb_defconfig1
-rw-r--r--arch/m68k/configs/stmark2_defconfig1
-rw-r--r--arch/mips/configs/ar7_defconfig1
-rw-r--r--arch/mips/configs/decstation_defconfig1
-rw-r--r--arch/mips/configs/decstation_r4k_defconfig1
-rw-r--r--arch/mips/configs/loongson1b_defconfig1
-rw-r--r--arch/mips/configs/loongson1c_defconfig1
-rw-r--r--arch/mips/configs/rb532_defconfig1
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig1
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig1
-rw-r--r--arch/sh/configs/apsh4ad0a_defconfig1
-rw-r--r--arch/sh/configs/ecovec24-romimage_defconfig1
-rw-r--r--arch/sh/configs/rsk7264_defconfig1
-rw-r--r--arch/sh/configs/rsk7269_defconfig1
-rw-r--r--arch/sh/configs/sh7785lcr_32bit_defconfig1
-rw-r--r--block/Kconfig24
-rw-r--r--block/bfq-cgroup.c5
-rw-r--r--block/bfq-iosched.c800
-rw-r--r--block/bfq-iosched.h96
-rw-r--r--block/bfq-wf2q.c12
-rw-r--r--block/bio.c213
-rw-r--r--block/blk-merge.c135
-rw-r--r--block/blk.h2
-rw-r--r--block/elevator.c6
-rw-r--r--block/genhd.c48
-rw-r--r--block/opal_proto.h2
-rw-r--r--block/sed-opal.c716
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/drbd/drbd_int.h5
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c35
-rw-r--r--drivers/block/paride/pcd.c1
-rw-r--r--drivers/block/paride/pd.c1
-rw-r--r--drivers/block/paride/pf.c1
-rw-r--r--drivers/block/pktcdvd.c1
-rw-r--r--drivers/block/ps3disk.c4
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/swim3.c1
-rw-r--r--drivers/block/virtio_blk.c3
-rw-r--r--drivers/block/xsysace.c1
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-cd_ioctl.c5
-rw-r--r--drivers/ide/ide-gd.c6
-rw-r--r--drivers/md/dm-exception-store.h28
-rw-r--r--drivers/md/dm-integrity.c8
-rw-r--r--drivers/md/md-bitmap.c8
-rw-r--r--drivers/md/md.c195
-rw-r--r--drivers/md/md.h25
-rw-r--r--drivers/md/raid5.c25
-rw-r--r--drivers/nvdimm/pfn_devs.c4
-rw-r--r--drivers/nvme/host/core.c6
-rw-r--r--drivers/nvme/host/pci.c263
-rw-r--r--drivers/nvme/target/core.c9
-rw-r--r--drivers/nvme/target/fc.c7
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c6
-rw-r--r--drivers/nvme/target/tcp.c24
-rw-r--r--drivers/scsi/sd.c33
-rw-r--r--drivers/scsi/sr.c1
-rw-r--r--drivers/xen/biomerge.c5
-rw-r--r--fs/ext4/resize.c2
-rw-r--r--fs/ext4/super.c32
-rw-r--r--fs/gfs2/Kconfig1
-rw-r--r--fs/nfs/Kconfig1
-rw-r--r--fs/ocfs2/super.c10
-rw-r--r--fs/stack.c15
-rw-r--r--fs/xfs/Kconfig1
-rw-r--r--fs/xfs/xfs_super.c10
-rw-r--r--include/linux/bio.h3
-rw-r--r--include/linux/blk_types.h29
-rw-r--r--include/linux/blkdev.h34
-rw-r--r--include/linux/bvec.h23
-rw-r--r--include/linux/genhd.h19
-rw-r--r--include/linux/kernel.h14
-rw-r--r--include/linux/types.h5
-rw-r--r--include/uapi/linux/sed-opal.h2
-rw-r--r--include/xen/xen.h4
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h4
107 files changed, 1695 insertions, 1411 deletions
diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
index 98a8dd5ee385..1a0f2ac02eb6 100644
--- a/Documentation/block/bfq-iosched.txt
+++ b/Documentation/block/bfq-iosched.txt
@@ -20,13 +20,26 @@ for that device, by setting low_latency to 0. See Section 3 for
details on how to configure BFQ for the desired tradeoff between
latency and throughput, or on how to maximize throughput.
-BFQ has a non-null overhead, which limits the maximum IOPS that a CPU
-can process for a device scheduled with BFQ. To give an idea of the
-limits on slow or average CPUs, here are, first, the limits of BFQ for
-three different CPUs, on, respectively, an average laptop, an old
-desktop, and a cheap embedded system, in case full hierarchical
-support is enabled (i.e., CONFIG_BFQ_GROUP_IOSCHED is set), but
-CONFIG_DEBUG_BLK_CGROUP is not set (Section 4-2):
+As every I/O scheduler, BFQ adds some overhead to per-I/O-request
+processing. To give an idea of this overhead, the total,
+single-lock-protected, per-request processing time of BFQ---i.e., the
+sum of the execution times of the request insertion, dispatch and
+completion hooks---is, e.g., 1.9 us on an Intel Core i7-2760QM@2.40GHz
+(dated CPU for notebooks; time measured with simple code
+instrumentation, and using the throughput-sync.sh script of the S
+suite [1], in performance-profiling mode). To put this result into
+context, the total, single-lock-protected, per-request execution time
+of the lightest I/O scheduler available in blk-mq, mq-deadline, is 0.7
+us (mq-deadline is ~800 LOC, against ~10500 LOC for BFQ).
+
+Scheduling overhead further limits the maximum IOPS that a CPU can
+process (already limited by the execution of the rest of the I/O
+stack). To give an idea of the limits with BFQ, on slow or average
+CPUs, here are, first, the limits of BFQ for three different CPUs, on,
+respectively, an average laptop, an old desktop, and a cheap embedded
+system, in case full hierarchical support is enabled (i.e.,
+CONFIG_BFQ_GROUP_IOSCHED is set), but CONFIG_DEBUG_BLK_CGROUP is not
+set (Section 4-2):
- Intel i7-4850HQ: 400 KIOPS
- AMD A8-3850: 250 KIOPS
- ARM CortexTM-A53 Octa-core: 80 KIOPS
@@ -566,3 +579,5 @@ applications. Unset this tunable if you need/want to control weights.
Slightly extended version:
http://algogroup.unimore.it/people/paolo/disk_sched/bfq-v1-suite-
results.pdf
+
+[3] https://github.com/Algodev-github/S
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
index 4cad1024fff7..41f0a3d33bbd 100644
--- a/Documentation/block/null_blk.txt
+++ b/Documentation/block/null_blk.txt
@@ -93,3 +93,7 @@ zoned=[0/1]: Default: 0
zone_size=[MB]: Default: 256
Per zone size when exposed as a zoned block device. Must be a power of two.
+
+zone_nr_conv=[nr_conv]: Default: 0
+ The number of conventional zones to create when block device is zoned. If
+ zone_nr_conv >= nr_zones, it will be reduced to nr_zones - 1.
diff --git a/Documentation/process/submit-checklist.rst b/Documentation/process/submit-checklist.rst
index 367353c54949..c88867b173d9 100644
--- a/Documentation/process/submit-checklist.rst
+++ b/Documentation/process/submit-checklist.rst
@@ -72,47 +72,44 @@ and elsewhere regarding submitting Linux kernel patches.
13) Has been build- and runtime tested with and without ``CONFIG_SMP`` and
``CONFIG_PREEMPT.``
-14) If the patch affects IO/Disk, etc: has been tested with and without
- ``CONFIG_LBDAF.``
+16) All codepaths have been exercised with all lockdep features enabled.
-15) All codepaths have been exercised with all lockdep features enabled.
+17) All new ``/proc`` entries are documented under ``Documentation/``
-16) All new ``/proc`` entries are documented under ``Documentation/``
-
-17) All new kernel boot parameters are documented in
+18) All new kernel boot parameters are documented in
``Documentation/admin-guide/kernel-parameters.rst``.
-18) All new module parameters are documented with ``MODULE_PARM_DESC()``
+19) All new module parameters are documented with ``MODULE_PARM_DESC()``
-19) All new userspace interfaces are documented in ``Documentation/ABI/``.
+20) All new userspace interfaces are documented in ``Documentation/ABI/``.
See ``Documentation/ABI/README`` for more information.
Patches that change userspace interfaces should be CCed to
linux-api@vger.kernel.org.
-20) Check that it all passes ``make headers_check``.
+21) Check that it all passes ``make headers_check``.
-21) Has been checked with injection of at least slab and page-allocation
+22) Has been checked with injection of at least slab and page-allocation
failures. See ``Documentation/fault-injection/``.
If the new code is substantial, addition of subsystem-specific fault
injection might be appropriate.
-22) Newly-added code has been compiled with ``gcc -W`` (use
+23) Newly-added code has been compiled with ``gcc -W`` (use
``make EXTRA_CFLAGS=-W``). This will generate lots of noise, but is good
for finding bugs like "warning: comparison between signed and unsigned".
-23) Tested after it has been merged into the -mm patchset to make sure
+24) Tested after it has been merged into the -mm patchset to make sure
that it still works with all of the other queued patches and various
changes in the VM, VFS, and other subsystems.
-24) All memory barriers {e.g., ``barrier()``, ``rmb()``, ``wmb()``} need a
+25) All memory barriers {e.g., ``barrier()``, ``rmb()``, ``wmb()``} need a
comment in the source code that explains the logic of what they are doing
and why.
-25) If any ioctl's are added by the patch, then also update
+26) If any ioctl's are added by the patch, then also update
``Documentation/ioctl/ioctl-number.txt``.
-26) If your modified source code depends on or uses any of the kernel
+27) If your modified source code depends on or uses any of the kernel
APIs or features that are related to the following ``Kconfig`` symbols,
then test multiple builds with the related ``Kconfig`` symbols disabled
and/or ``=m`` (if that option is available) [not all of these at the
diff --git a/Documentation/translations/ja_JP/SubmitChecklist b/Documentation/translations/ja_JP/SubmitChecklist
index 60c7c35ac517..b42220d3d46c 100644
--- a/Documentation/translations/ja_JP/SubmitChecklist
+++ b/Documentation/translations/ja_JP/SubmitChecklist
@@ -74,38 +74,34 @@ Linux カーネルパッチ投稿者向けチェックリスト
13: CONFIG_SMP, CONFIG_PREEMPT を有効にした場合と無効にした場合の両方で
ビルドした上、動作確認を行ってください。
-14: もしパッチがディスクのI/O性能などに影響を与えるようであれば、
- 'CONFIG_LBDAF'オプションを有効にした場合と無効にした場合の両方で
- テストを実施してみてください。
+14: lockdepの機能を全て有効にした上で、全てのコードパスを評価してください。
-15: lockdepの機能を全て有効にした上で、全てのコードパスを評価してください。
-
-16: /proc に新しいエントリを追加した場合には、Documentation/ 配下に
+15: /proc に新しいエントリを追加した場合には、Documentation/ 配下に
必ずドキュメントを追加してください。
-17: 新しいブートパラメータを追加した場合には、
+16: 新しいブートパラメータを追加した場合には、
必ずDocumentation/admin-guide/kernel-parameters.rst に説明を追加してください。
-18: 新しくmoduleにパラメータを追加した場合には、MODULE_PARM_DESC()を
+17: 新しくmoduleにパラメータを追加した場合には、MODULE_PARM_DESC()を
利用して必ずその説明を記述してください。
-19: 新しいuserspaceインタフェースを作成した場合には、Documentation/ABI/ に
+18: 新しいuserspaceインタフェースを作成した場合には、Documentation/ABI/ に
Documentation/ABI/README を参考にして必ずドキュメントを追加してください。
-20: 'make headers_check'を実行して全く問題がないことを確認してください。
+19: 'make headers_check'を実行して全く問題がないことを確認してください。
-21: 少なくともslabアロケーションとpageアロケーションに失敗した場合の
+20: 少なくともslabアロケーションとpageアロケーションに失敗した場合の
挙動について、fault-injectionを利用して確認してください。
Documentation/fault-injection/ を参照してください。
追加したコードがかなりの量であったならば、サブシステム特有の
fault-injectionを追加したほうが良いかもしれません。
-22: 新たに追加したコードは、`gcc -W'でコンパイルしてください。
+21: 新たに追加したコードは、`gcc -W'でコンパイルしてください。
このオプションは大量の不要なメッセージを出力しますが、
"warning: comparison between signed and unsigned" のようなメッセージは、
バグを見つけるのに役に立ちます。
-23: 投稿したパッチが -mm パッチセットにマージされた後、全ての既存のパッチや
+22: 投稿したパッチが -mm パッチセットにマージされた後、全ての既存のパッチや
VM, VFS およびその他のサブシステムに関する様々な変更と、現時点でも共存
できることを確認するテストを行ってください。
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
index f56cc2070c11..b117e6c16d41 100644
--- a/arch/arc/configs/haps_hs_defconfig
+++ b/arch/arc/configs/haps_hs_defconfig
@@ -15,7 +15,6 @@ CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
index b6f2482c7e74..33a787c375e2 100644
--- a/arch/arc/configs/haps_hs_smp_defconfig
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -17,7 +17,6 @@ CONFIG_PERF_EVENTS=y
CONFIG_SLAB=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 318e4cd29629..de398c7b10b3 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -18,7 +18,6 @@ CONFIG_PERF_EVENTS=y
CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index c15807b0e0c1..2dbd34a9ff07 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -20,7 +20,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 65e983fd942b..c7135f1e2583 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -18,7 +18,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 08c5b99ac341..385a71d3c478 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -18,7 +18,6 @@ CONFIG_PERF_EVENTS=y
CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 5b5e26d67955..248a2c3bdc12 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -17,7 +17,6 @@ CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 26af9b2f7fcb..1a4bc7b660fb 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -12,7 +12,6 @@ CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arm/configs/aspeed_g4_defconfig b/arch/arm/configs/aspeed_g4_defconfig
index 1446262921b4..bdbade6af9c7 100644
--- a/arch/arm/configs/aspeed_g4_defconfig
+++ b/arch/arm/configs/aspeed_g4_defconfig
@@ -23,7 +23,6 @@ CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_JUMP_LABEL=y
CONFIG_STRICT_KERNEL_RWX=y
CONFIG_GCC_PLUGINS=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEBUG_FS is not set
# CONFIG_IOSCHED_DEADLINE is not set
diff --git a/arch/arm/configs/aspeed_g5_defconfig b/arch/arm/configs/aspeed_g5_defconfig
index 02fa3a41add5..4bde84eae4eb 100644
--- a/arch/arm/configs/aspeed_g5_defconfig
+++ b/arch/arm/configs/aspeed_g5_defconfig
@@ -23,7 +23,6 @@ CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_JUMP_LABEL=y
CONFIG_STRICT_KERNEL_RWX=y
CONFIG_GCC_PLUGINS=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEBUG_FS is not set
# CONFIG_IOSCHED_DEADLINE is not set
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index e4b1be66b3f5..b7752929975c 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -9,7 +9,6 @@ CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arm/configs/clps711x_defconfig b/arch/arm/configs/clps711x_defconfig
index fc105c9178cc..09ae750164e0 100644
--- a/arch/arm/configs/clps711x_defconfig
+++ b/arch/arm/configs/clps711x_defconfig
@@ -6,7 +6,6 @@ CONFIG_RD_LZMA=y
CONFIG_EMBEDDED=y
CONFIG_SLOB=y
CONFIG_JUMP_LABEL=y
-# CONFIG_LBDAF is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_CLPS711X=y
diff --git a/arch/arm/configs/efm32_defconfig b/arch/arm/configs/efm32_defconfig
index ee42158f41ec..10ea92513a69 100644
--- a/arch/arm/configs/efm32_defconfig
+++ b/arch/arm/configs/efm32_defconfig
@@ -11,7 +11,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index 484e51fbd4a6..e3afca5bd9d6 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -13,7 +13,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
diff --git a/arch/arm/configs/h3600_defconfig b/arch/arm/configs/h3600_defconfig
index ebeca11faa48..175881b7da7c 100644
--- a/arch/arm/configs/h3600_defconfig
+++ b/arch/arm/configs/h3600_defconfig
@@ -4,7 +4,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index f204017c26b9..9b779e13e05d 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -12,7 +12,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig
index 078228a19339..6a11669fa536 100644
--- a/arch/arm/configs/moxart_defconfig
+++ b/arch/arm/configs/moxart_defconfig
@@ -15,7 +15,6 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_MULTI_V4=y
diff --git a/arch/arm/configs/multi_v4t_defconfig b/arch/arm/configs/multi_v4t_defconfig
index 9a6390c172d6..eeea0c41138b 100644
--- a/arch/arm/configs/multi_v4t_defconfig
+++ b/arch/arm/configs/multi_v4t_defconfig
@@ -5,7 +5,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
CONFIG_SLOB=y
CONFIG_JUMP_LABEL=y
-# CONFIG_LBDAF is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_MULTI_V4T=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index cfc00b0961ec..8448a7f407a4 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -17,7 +17,6 @@ CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig
index 0258ba891376..152321d2893e 100644
--- a/arch/arm/configs/stm32_defconfig
+++ b/arch/arm/configs/stm32_defconfig
@@ -13,7 +13,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index 36d77406e31b..831ba6a9ee8b 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -9,7 +9,6 @@ CONFIG_EXPERT=y
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index 392ed3b3613c..484d77a7f589 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -14,7 +14,6 @@ CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig
index 0857cdbfde0c..d5e683dd885d 100644
--- a/arch/m68k/configs/amcore_defconfig
+++ b/arch/m68k/configs/amcore_defconfig
@@ -12,7 +12,6 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
diff --git a/arch/m68k/configs/m5475evb_defconfig b/arch/m68k/configs/m5475evb_defconfig
index 4f4ccd13c11b..434bd3750966 100644
--- a/arch/m68k/configs/m5475evb_defconfig
+++ b/arch/m68k/configs/m5475evb_defconfig
@@ -11,7 +11,6 @@ CONFIG_SYSCTL_SYSCALL=y
# CONFIG_AIO is not set
CONFIG_EMBEDDED=y
CONFIG_MODULES=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/m68k/configs/stmark2_defconfig b/arch/m68k/configs/stmark2_defconfig
index 69f23c7b0497..27fa9465d19d 100644
--- a/arch/m68k/configs/stmark2_defconfig
+++ b/arch/m68k/configs/stmark2_defconfig
@@ -17,7 +17,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_BLK_CMDLINE_PARSER=y
# CONFIG_MMU is not set
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index 9fbfb6e5c7d2..c83fdf649327 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -18,7 +18,6 @@ CONFIG_KEXEC=y
# CONFIG_SECCOMP is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_BSD_DISKLABEL=y
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index 0c86ed86266a..30a6eafdb1d0 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -17,7 +17,6 @@ CONFIG_TC=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_LBDAF is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_OSF_PARTITION=y
# CONFIG_EFI_PARTITION is not set
diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig
index 0e54ab2680ce..e2b58dbf4aa9 100644
--- a/arch/mips/configs/decstation_r4k_defconfig
+++ b/arch/mips/configs/decstation_r4k_defconfig
@@ -16,7 +16,6 @@ CONFIG_TC=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_LBDAF is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_OSF_PARTITION=y
# CONFIG_EFI_PARTITION is not set
diff --git a/arch/mips/configs/loongson1b_defconfig b/arch/mips/configs/loongson1b_defconfig
index b064d68a5424..aa7e98c5f5fc 100644
--- a/arch/mips/configs/loongson1b_defconfig
+++ b/arch/mips/configs/loongson1b_defconfig
@@ -19,7 +19,6 @@ CONFIG_MACH_LOONGSON32=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
diff --git a/arch/mips/configs/loongson1c_defconfig b/arch/mips/configs/loongson1c_defconfig
index 5d76559b56cd..520e7ef35383 100644
--- a/arch/mips/configs/loongson1c_defconfig
+++ b/arch/mips/configs/loongson1c_defconfig
@@ -20,7 +20,6 @@ CONFIG_LOONGSON1_LS1C=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index 7befe05fd813..ed1038f62a2c 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -19,7 +19,6 @@ CONFIG_PCI=y
# CONFIG_PCI_QUIRKS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index 50a2c9ad583f..b0f0c5f9ad9d 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -17,7 +17,6 @@ CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP=y
CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 37ae4b57c001..a8f9bbef0975 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -14,7 +14,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PA7100LC=y
CONFIG_SMP=y
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig
index 825c641726c4..d0d9ebc7165b 100644
--- a/arch/sh/configs/apsh4ad0a_defconfig
+++ b/arch/sh/configs/apsh4ad0a_defconfig
@@ -19,7 +19,6 @@ CONFIG_SLAB=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_CPU_SUBTYPE_SH7786=y
diff --git a/arch/sh/configs/ecovec24-romimage_defconfig b/arch/sh/configs/ecovec24-romimage_defconfig
index 0c5dfccbfe37..bdb61d1d0127 100644
--- a/arch/sh/configs/ecovec24-romimage_defconfig
+++ b/arch/sh/configs/ecovec24-romimage_defconfig
@@ -7,7 +7,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_KALLSYMS is not set
CONFIG_SLAB=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7724=y
CONFIG_MEMORY_SIZE=0x10000000
diff --git a/arch/sh/configs/rsk7264_defconfig b/arch/sh/configs/rsk7264_defconfig
index 2b9b731fc86b..ad003ee469ea 100644
--- a/arch/sh/configs/rsk7264_defconfig
+++ b/arch/sh/configs/rsk7264_defconfig
@@ -16,7 +16,6 @@ CONFIG_PERF_COUNTERS=y
CONFIG_SLAB=y
CONFIG_MMAP_ALLOW_UNINITIALIZED=y
CONFIG_PROFILING=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_DEADLINE is not set
diff --git a/arch/sh/configs/rsk7269_defconfig b/arch/sh/configs/rsk7269_defconfig
index d041f7bcb84c..27fc01d58cf8 100644
--- a/arch/sh/configs/rsk7269_defconfig
+++ b/arch/sh/configs/rsk7269_defconfig
@@ -3,7 +3,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig
index 2ddf5ca7094e..a89ccc15af23 100644
--- a/arch/sh/configs/sh7785lcr_32bit_defconfig
+++ b/arch/sh/configs/sh7785lcr_32bit_defconfig
@@ -11,7 +11,6 @@ CONFIG_PROFILING=y
CONFIG_GCOV_KERNEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7785=y
CONFIG_MEMORY_START=0x40000000
diff --git a/block/Kconfig b/block/Kconfig
index 028bc085dac8..1b220101a9cb 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -26,30 +26,6 @@ menuconfig BLOCK
if BLOCK
-config LBDAF
- bool "Support for large (2TB+) block devices and files"
- depends on !64BIT
- default y
- help
- Enable block devices or files of size 2TB and larger.
-
- This option is required to support the full capacity of large
- (2TB+) block devices, including RAID, disk, Network Block Device,
- Logical Volume Manager (LVM) and loopback.
-
- This option also enables support for single files larger than
- 2TB.
-
- The ext4 filesystem requires that this feature be enabled in
- order to support filesystems that have the huge_file feature
- enabled. Otherwise, it will refuse to mount in the read-write
- mode any filesystems that use the huge_file feature, which is
- enabled by default by mke2fs.ext4.
-
- The GFS2 filesystem also requires this feature.
-
- If unsure, say Y.
-
config BLK_SCSI_REQUEST
bool
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index c6113af31960..793c027ca60e 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -578,7 +578,8 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqg_and_blkg_get(bfqg);
if (bfq_bfqq_busy(bfqq)) {
- bfq_pos_tree_add_move(bfqd, bfqq);
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
bfq_activate_bfqq(bfqd, bfqq);
}
@@ -1102,7 +1103,7 @@ struct cftype bfq_blkcg_legacy_files[] = {
},
#endif /* CONFIG_DEBUG_BLK_CGROUP */
- /* the same statictics which cover the bfqg and its descendants */
+ /* the same statistics which cover the bfqg and its descendants */
{
.name = "bfq.io_service_bytes_recursive",
.private = (unsigned long)&blkcg_policy_bfq,
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 5ba1e0d841b4..b85a4ab8b9db 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -189,7 +189,7 @@ static const int bfq_default_max_budget = 16 * 1024;
/*
* When a sync request is dispatched, the queue that contains that
* request, and all the ancestor entities of that queue, are charged
- * with the number of sectors of the request. In constrast, if the
+ * with the number of sectors of the request. In contrast, if the
* request is async, then the queue and its ancestor entities are
* charged with the number of sectors of the request, multiplied by
* the factor below. This throttles the bandwidth for async I/O,
@@ -217,7 +217,7 @@ const int bfq_timeout = HZ / 8;
* queue merging.
*
* As can be deduced from the low time limit below, queue merging, if
- * successful, happens at the very beggining of the I/O of the involved
+ * successful, happens at the very beginning of the I/O of the involved
* cooperating processes, as a consequence of the arrival of the very
* first requests from each cooperator. After that, there is very
* little chance to find cooperators.
@@ -242,6 +242,14 @@ static struct kmem_cache *bfq_pool;
blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
+/*
+ * Sync random I/O is likely to be confused with soft real-time I/O,
+ * because it is characterized by limited throughput and apparently
+ * isochronous arrival pattern. To avoid false positives, queues
+ * containing only random (seeky) I/O are prevented from being tagged
+ * as soft real-time.
+ */
+#define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history & -1)
/* Min number of samples required to perform peak-rate update */
#define BFQ_RATE_MIN_SAMPLES 32
@@ -433,7 +441,7 @@ void bfq_schedule_dispatch(struct bfq_data *bfqd)
/*
* Lifted from AS - choose which of rq1 and rq2 that is best served now.
- * We choose the request that is closesr to the head right now. Distance
+ * We choose the request that is closer to the head right now. Distance
* behind the head is penalized and only allowed to a certain extent.
*/
static struct request *bfq_choose_req(struct bfq_data *bfqd,
@@ -595,7 +603,16 @@ static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
bfq_merge_time_limit);
}
-void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+/*
+ * The following function is not marked as __cold because it is
+ * actually cold, but for the same performance goal described in the
+ * comments on the likely() at the beginning of
+ * bfq_setup_cooperator(). Unexpectedly, to reach an even lower
+ * execution time for the case where this function is not invoked, we
+ * had to add an unlikely() in each involved if().
+ */
+void __cold
+bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
struct rb_node **p, *parent;
struct bfq_queue *__bfqq;
@@ -629,12 +646,19 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
}
/*
- * The following function returns true if every queue must receive the
- * same share of the throughput (this condition is used when deciding
- * whether idling may be disabled, see the comments in the function
- * bfq_better_to_idle()).
+ * The following function returns false either if every active queue
+ * must receive the same share of the throughput (symmetric scenario),
+ * or, as a special case, if bfqq must receive a share of the
+ * throughput lower than or equal to the share that every other active
+ * queue must receive. If bfqq does sync I/O, then these are the only
+ * two cases where bfqq happens to be guaranteed its share of the
+ * throughput even if I/O dispatching is not plugged when bfqq remains
+ * temporarily empty (for more details, see the comments in the
+ * function bfq_better_to_idle()). For this reason, the return value
+ * of this function is used to check whether I/O-dispatch plugging can
+ * be avoided.
*
- * Such a scenario occurs when:
+ * The above first case (symmetric scenario) occurs when:
* 1) all active queues have the same weight,
* 2) all active queues belong to the same I/O-priority class,
* 3) all active groups at the same level in the groups tree have the same
@@ -654,30 +678,36 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
* support or the cgroups interface are not enabled, thus no state
* needs to be maintained in this case.
*/
-static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
+static bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
{
+ bool smallest_weight = bfqq &&
+ bfqq->weight_counter &&
+ bfqq->weight_counter ==
+ container_of(
+ rb_first_cached(&bfqd->queue_weights_tree),
+ struct bfq_weight_counter,
+ weights_node);
+
/*
* For queue weights to differ, queue_weights_tree must contain
* at least two nodes.
*/
- bool varied_queue_weights = !RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
- (bfqd->queue_weights_tree.rb_node->rb_left ||
- bfqd->queue_weights_tree.rb_node->rb_right);
+ bool varied_queue_weights = !smallest_weight &&
+ !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) &&
+ (bfqd->queue_weights_tree.rb_root.rb_node->rb_left ||
+ bfqd->queue_weights_tree.rb_root.rb_node->rb_right);
bool multiple_classes_busy =
(bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
(bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
(bfqd->busy_queues[1] && bfqd->busy_queues[2]);
- /*
- * For queue weights to differ, queue_weights_tree must contain
- * at least two nodes.
- */
- return !(varied_queue_weights || multiple_classes_busy
+ return varied_queue_weights || multiple_classes_busy
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|| bfqd->num_groups_with_pending_reqs > 0
#endif
- );
+ ;
}
/*
@@ -694,10 +724,11 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
* should be low too.
*/
void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
- struct rb_root *root)
+ struct rb_root_cached *root)
{
struct bfq_entity *entity = &bfqq->entity;
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
+ bool leftmost = true;
/*
* Do not insert if the queue is already associated with a
@@ -726,8 +757,10 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
}
if (entity->weight < __counter->weight)
new = &((*new)->rb_left);
- else
+ else {
new = &((*new)->rb_right);
+ leftmost = false;
+ }
}
bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
@@ -736,7 +769,7 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
/*
* In the unlucky event of an allocation failure, we just
* exit. This will cause the weight of queue to not be
- * considered in bfq_symmetric_scenario, which, in its turn,
+ * considered in bfq_asymmetric_scenario, which, in its turn,
* causes the scenario to be deemed wrongly symmetric in case
* bfqq's weight would have been the only weight making the
* scenario asymmetric. On the bright side, no unbalance will
@@ -750,7 +783,8 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqq->weight_counter->weight = entity->weight;
rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
- rb_insert_color(&bfqq->weight_counter->weights_node, root);
+ rb_insert_color_cached(&bfqq->weight_counter->weights_node, root,
+ leftmost);
inc_counter:
bfqq->weight_counter->num_active++;
@@ -765,7 +799,7 @@ inc_counter:
*/
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
- struct rb_root *root)
+ struct rb_root_cached *root)
{
if (!bfqq->weight_counter)
return;
@@ -774,7 +808,7 @@ void __bfq_weights_tree_remove(struct bfq_data *bfqd,
if (bfqq->weight_counter->num_active > 0)
goto reset_entity_pointer;
- rb_erase(&bfqq->weight_counter->weights_node, root);
+ rb_erase_cached(&bfqq->weight_counter->weights_node, root);
kfree(bfqq->weight_counter);
reset_entity_pointer:
@@ -889,7 +923,7 @@ static unsigned long bfq_serv_to_charge(struct request *rq,
struct bfq_queue *bfqq)
{
if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
- !bfq_symmetric_scenario(bfqq->bfqd))
+ bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
return blk_rq_sectors(rq);
return blk_rq_sectors(rq) * bfq_async_charge_factor;
@@ -955,7 +989,7 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
* of several files
* mplayer took 23 seconds to start, if constantly weight-raised.
*
- * As for higher values than that accomodating the above bad
+ * As for higher values than that accommodating the above bad
* scenario, tests show that higher values would often yield
* the opposite of the desired result, i.e., would worsen
* responsiveness by allowing non-interactive applications to
@@ -994,6 +1028,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
else
bfq_clear_bfqq_IO_bound(bfqq);
+ bfqq->entity.new_weight = bic->saved_weight;
bfqq->ttime = bic->saved_ttime;
bfqq->wr_coeff = bic->saved_wr_coeff;
bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
@@ -1041,8 +1076,18 @@ static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
hlist_del_init(&item->burst_list_node);
- hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
- bfqd->burst_size = 1;
+
+ /*
+ * Start the creation of a new burst list only if there is no
+ * active queue. See comments on the conditional invocation of
+ * bfq_handle_burst().
+ */
+ if (bfq_tot_busy_queues(bfqd) == 0) {
+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
+ bfqd->burst_size = 1;
+ } else
+ bfqd->burst_size = 0;
+
bfqd->burst_parent_entity = bfqq->entity.parent;
}
@@ -1098,7 +1143,8 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
* many parallel threads/processes. Examples are systemd during boot,
* or git grep. To help these processes get their job done as soon as
* possible, it is usually better to not grant either weight-raising
- * or device idling to their queues.
+ * or device idling to their queues, unless these queues must be
+ * protected from the I/O flowing through other active queues.
*
* In this comment we describe, firstly, the reasons why this fact
* holds, and, secondly, the next function, which implements the main
@@ -1110,7 +1156,10 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
* cumulatively served, the sooner the target job of these queues gets
* completed. As a consequence, weight-raising any of these queues,
* which also implies idling the device for it, is almost always
- * counterproductive. In most cases it just lowers throughput.
+ * counterproductive, unless there are other active queues to isolate
+ * these new queues from. If there no other active queues, then
+ * weight-raising these new queues just lowers throughput in most
+ * cases.
*
* On the other hand, a burst of queue creations may be caused also by
* the start of an application that does not consist of a lot of
@@ -1144,14 +1193,16 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
* are very rare. They typically occur if some service happens to
* start doing I/O exactly when the interactive task starts.
*
- * Turning back to the next function, it implements all the steps
- * needed to detect the occurrence of a large burst and to properly
- * mark all the queues belonging to it (so that they can then be
- * treated in a different way). This goal is achieved by maintaining a
- * "burst list" that holds, temporarily, the queues that belong to the
- * burst in progress. The list is then used to mark these queues as
- * belonging to a large burst if the burst does become large. The main
- * steps are the following.
+ * Turning back to the next function, it is invoked only if there are
+ * no active queues (apart from active queues that would belong to the
+ * same, possible burst bfqq would belong to), and it implements all
+ * the steps needed to detect the occurrence of a large burst and to
+ * properly mark all the queues belonging to it (so that they can then
+ * be treated in a different way). This goal is achieved by
+ * maintaining a "burst list" that holds, temporarily, the queues that
+ * belong to the burst in progress. The list is then used to mark
+ * these queues as belonging to a large burst if the burst does become
+ * large. The main steps are the following.
*
* . when the very first queue is created, the queue is inserted into the
* list (as it could be the first queue in a possible burst)
@@ -1596,6 +1647,7 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
*/
in_burst = bfq_bfqq_in_large_burst(bfqq);
soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
+ !BFQQ_TOTALLY_SEEKY(bfqq) &&
!in_burst &&
time_is_before_jiffies(bfqq->soft_rt_next_start) &&
bfqq->dispatched == 0;
@@ -1704,6 +1756,123 @@ static void bfq_add_request(struct request *rq)
bfqq->queued[rq_is_sync(rq)]++;
bfqd->queued++;
+ if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) {
+ /*
+ * Periodically reset inject limit, to make sure that
+ * the latter eventually drops in case workload
+ * changes, see step (3) in the comments on
+ * bfq_update_inject_limit().
+ */
+ if (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
+ msecs_to_jiffies(1000))) {
+ /* invalidate baseline total service time */
+ bfqq->last_serv_time_ns = 0;
+
+ /*
+ * Reset pointer in case we are waiting for
+ * some request completion.
+ */
+ bfqd->waited_rq = NULL;
+
+ /*
+ * If bfqq has a short think time, then start
+ * by setting the inject limit to 0
+ * prudentially, because the service time of
+ * an injected I/O request may be higher than
+ * the think time of bfqq, and therefore, if
+ * one request was injected when bfqq remains
+ * empty, this injected request might delay
+ * the service of the next I/O request for
+ * bfqq significantly. In case bfqq can
+ * actually tolerate some injection, then the
+ * adaptive update will however raise the
+ * limit soon. This lucky circumstance holds
+ * exactly because bfqq has a short think
+ * time, and thus, after remaining empty, is
+ * likely to get new I/O enqueued---and then
+ * completed---before being expired. This is
+ * the very pattern that gives the
+ * limit-update algorithm the chance to
+ * measure the effect of injection on request
+ * service times, and then to update the limit
+ * accordingly.
+ *
+ * On the opposite end, if bfqq has a long
+ * think time, then start directly by 1,
+ * because:
+ * a) on the bright side, keeping at most one
+ * request in service in the drive is unlikely
+ * to cause any harm to the latency of bfqq's
+ * requests, as the service time of a single
+ * request is likely to be lower than the
+ * think time of bfqq;
+ * b) on the downside, after becoming empty,
+ * bfqq is likely to expire before getting its
+ * next request. With this request arrival
+ * pattern, it is very hard to sample total
+ * service times and update the inject limit
+ * accordingly (see comments on
+ * bfq_update_inject_limit()). So the limit is
+ * likely to be never, or at least seldom,
+ * updated. As a consequence, by setting the
+ * limit to 1, we avoid that no injection ever
+ * occurs with bfqq. On the downside, this
+ * proactive step further reduces chances to
+ * actually compute the baseline total service
+ * time. Thus it reduces chances to execute the
+ * limit-update algorithm and possibly raise the
+ * limit to more than 1.
+ */
+ if (bfq_bfqq_has_short_ttime(bfqq))
+ bfqq->inject_limit = 0;
+ else
+ bfqq->inject_limit = 1;
+ bfqq->decrease_time_jif = jiffies;
+ }
+
+ /*
+ * The following conditions must hold to setup a new
+ * sampling of total service time, and then a new
+ * update of the inject limit:
+ * - bfqq is in service, because the total service
+ * time is evaluated only for the I/O requests of
+ * the queues in service;
+ * - this is the right occasion to compute or to
+ * lower the baseline total service time, because
+ * there are actually no requests in the drive,
+ * or
+ * the baseline total service time is available, and
+ * this is the right occasion to compute the other
+ * quantity needed to update the inject limit, i.e.,
+ * the total service time caused by the amount of
+ * injection allowed by the current value of the
+ * limit. It is the right occasion because injection
+ * has actually been performed during the service
+ * hole, and there are still in-flight requests,
+ * which are very likely to be exactly the injected
+ * requests, or part of them;
+ * - the minimum interval for sampling the total
+ * service time and updating the inject limit has
+ * elapsed.
+ */
+ if (bfqq == bfqd->in_service_queue &&
+ (bfqd->rq_in_driver == 0 ||
+ (bfqq->last_serv_time_ns > 0 &&
+ bfqd->rqs_injected && bfqd->rq_in_driver > 0)) &&
+ time_is_before_eq_jiffies(bfqq->decrease_time_jif +
+ msecs_to_jiffies(100))) {
+ bfqd->last_empty_occupied_ns = ktime_get_ns();
+ /*
+ * Start the state machine for measuring the
+ * total service time of rq: setting
+ * wait_dispatch will cause bfqd->waited_rq to
+ * be set when rq will be dispatched.
+ */
+ bfqd->wait_dispatch = true;
+ bfqd->rqs_injected = false;
+ }
+ }
+
elv_rb_add(&bfqq->sort_list, rq);
/*
@@ -1715,8 +1884,9 @@ static void bfq_add_request(struct request *rq)
/*
* Adjust priority tree position, if next_rq changes.
+ * See comments on bfq_pos_tree_add_move() for the unlikely().
*/
- if (prev != bfqq->next_rq)
+ if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
bfq_pos_tree_add_move(bfqd, bfqq);
if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
@@ -1856,7 +2026,9 @@ static void bfq_remove_request(struct request_queue *q,
bfqq->pos_root = NULL;
}
} else {
- bfq_pos_tree_add_move(bfqd, bfqq);
+ /* see comments on bfq_pos_tree_add_move() for the unlikely() */
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
if (rq->cmd_flags & REQ_META)
@@ -1941,7 +2113,12 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
*/
if (prev != bfqq->next_rq) {
bfq_updated_next_req(bfqd, bfqq);
- bfq_pos_tree_add_move(bfqd, bfqq);
+ /*
+ * See comments on bfq_pos_tree_add_move() for
+ * the unlikely().
+ */
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
}
}
@@ -2224,6 +2401,46 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_queue *in_service_bfqq, *new_bfqq;
/*
+ * Do not perform queue merging if the device is non
+ * rotational and performs internal queueing. In fact, such a
+ * device reaches a high speed through internal parallelism
+ * and pipelining. This means that, to reach a high
+ * throughput, it must have many requests enqueued at the same
+ * time. But, in this configuration, the internal scheduling
+ * algorithm of the device does exactly the job of queue
+ * merging: it reorders requests so as to obtain as much as
+ * possible a sequential I/O pattern. As a consequence, with
+ * the workload generated by processes doing interleaved I/O,
+ * the throughput reached by the device is likely to be the
+ * same, with and without queue merging.
+ *
+ * Disabling merging also provides a remarkable benefit in
+ * terms of throughput. Merging tends to make many workloads
+ * artificially more uneven, because of shared queues
+ * remaining non empty for incomparably more time than
+ * non-merged queues. This may accentuate workload
+ * asymmetries. For example, if one of the queues in a set of
+ * merged queues has a higher weight than a normal queue, then
+ * the shared queue may inherit such a high weight and, by
+ * staying almost always active, may force BFQ to perform I/O
+ * plugging most of the time. This evidently makes it harder
+ * for BFQ to let the device reach a high throughput.
+ *
+ * Finally, the likely() macro below is not used because one
+ * of the two branches is more likely than the other, but to
+ * have the code path after the following if() executed as
+ * fast as possible for the case of a non rotational device
+ * with queueing. We want it because this is the fastest kind
+ * of device. On the opposite end, the likely() may lengthen
+ * the execution time of BFQ for the case of slower devices
+ * (rotational or at least without queueing). But in this case
+ * the execution time of BFQ matters very little, if not at
+ * all.
+ */
+ if (likely(bfqd->nonrot_with_queueing))
+ return NULL;
+
+ /*
* Prevent bfqq from being merged if it has been created too
* long ago. The idea is that true cooperating processes, and
* thus their associated bfq_queues, are supposed to be
@@ -2286,6 +2503,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
if (!bic)
return;
+ bic->saved_weight = bfqq->entity.orig_weight;
bic->saved_ttime = bfqq->ttime;
bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
@@ -2374,6 +2592,16 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
* assignment causes no harm).
*/
new_bfqq->bic = NULL;
+ /*
+ * If the queue is shared, the pid is the pid of one of the associated
+ * processes. Which pid depends on the exact sequence of merge events
+ * the queue underwent. So printing such a pid is useless and confusing
+ * because it reports a random pid between those of the associated
+ * processes.
+ * We mark such a queue with a pid -1, and then print SHARED instead of
+ * a pid in logging messages.
+ */
+ new_bfqq->pid = -1;
bfqq->bic = NULL;
/* release process reference to bfqq */
bfq_put_queue(bfqq);
@@ -2408,8 +2636,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
/*
* bic still points to bfqq, then it has not yet been
* redirected to some other bfq_queue, and a queue
- * merge beween bfqq and new_bfqq can be safely
- * fulfillled, i.e., bic can be redirected to new_bfqq
+ * merge between bfqq and new_bfqq can be safely
+ * fulfilled, i.e., bic can be redirected to new_bfqq
* and bfqq can be put.
*/
bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
@@ -2543,10 +2771,14 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
* queue).
*/
if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
- bfq_symmetric_scenario(bfqd))
+ !bfq_asymmetric_scenario(bfqd, bfqq))
sl = min_t(u64, sl, BFQ_MIN_TT);
+ else if (bfqq->wr_coeff > 1)
+ sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
bfqd->last_idling_start = ktime_get();
+ bfqd->last_idling_start_jiffies = jiffies;
+
hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
HRTIMER_MODE_REL);
bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
@@ -2848,8 +3080,10 @@ static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_requeue_bfqq(bfqd, bfqq, true);
/*
* Resort priority tree of potential close cooperators.
+ * See comments on bfq_pos_tree_add_move() for the unlikely().
*/
- bfq_pos_tree_add_move(bfqd, bfqq);
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
/*
@@ -3223,13 +3457,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
}
-static bool bfq_bfqq_injectable(struct bfq_queue *bfqq)
-{
- return BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
- blk_queue_nonrot(bfqq->bfqd->queue) &&
- bfqq->bfqd->hw_tag;
-}
-
/**
* bfq_bfqq_expire - expire a queue.
* @bfqd: device owning the queue.
@@ -3344,6 +3571,14 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
/*
+ * bfqq expired, so no total service time needs to be computed
+ * any longer: reset state machine for measuring total service
+ * times.
+ */
+ bfqd->rqs_injected = bfqd->wait_dispatch = false;
+ bfqd->waited_rq = NULL;
+
+ /*
* Increase, decrease or leave budget unchanged according to
* reason.
*/
@@ -3352,8 +3587,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
/* bfqq is gone, no more actions on it */
return;
- bfqq->injected_service = 0;
-
/* mark bfqq as waiting a request only if a bic still points to it */
if (!bfq_bfqq_busy(bfqq) &&
reason != BFQQE_BUDGET_TIMEOUT &&
@@ -3497,8 +3730,9 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
}
/*
- * There is a case where idling must be performed not for
- * throughput concerns, but to preserve service guarantees.
+ * There is a case where idling does not have to be performed for
+ * throughput concerns, but to preserve the throughput share of
+ * the process associated with bfqq.
*
* To introduce this case, we can note that allowing the drive
* to enqueue more than one request at a time, and hence
@@ -3514,77 +3748,83 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
* concern about per-process throughput distribution, and
* makes its decisions only on a per-request basis. Therefore,
* the service distribution enforced by the drive's internal
- * scheduler is likely to coincide with the desired
- * device-throughput distribution only in a completely
- * symmetric scenario where:
- * (i) each of these processes must get the same throughput as
- * the others;
- * (ii) the I/O of each process has the same properties, in
- * terms of locality (sequential or random), direction
- * (reads or writes), request sizes, greediness
- * (from I/O-bound to sporadic), and so on.
- * In fact, in such a scenario, the drive tends to treat
- * the requests of each of these processes in about the same
- * way as the requests of the others, and thus to provide
- * each of these processes with about the same throughput
- * (which is exactly the desired throughput distribution). In
- * contrast, in any asymmetric scenario, device idling is
- * certainly needed to guarantee that bfqq receives its
- * assigned fraction of the device throughput (see [1] for
- * details).
- * The problem is that idling may significantly reduce
- * throughput with certain combinations of types of I/O and
- * devices. An important example is sync random I/O, on flash
- * storage with command queueing. So, unless bfqq falls in the
- * above cases where idling also boosts throughput, it would
- * be important to check conditions (i) and (ii) accurately,
- * so as to avoid idling when not strictly needed for service
- * guarantees.
+ * scheduler is likely to coincide with the desired throughput
+ * distribution only in a completely symmetric, or favorably
+ * skewed scenario where:
+ * (i-a) each of these processes must get the same throughput as
+ * the others,
+ * (i-b) in case (i-a) does not hold, it holds that the process
+ * associated with bfqq must receive a lower or equal
+ * throughput than any of the other processes;
+ * (ii) the I/O of each process has the same properties, in
+ * terms of locality (sequential or random), direction
+ * (reads or writes), request sizes, greediness
+ * (from I/O-bound to sporadic), and so on;
+
+ * In fact, in such a scenario, the drive tends to treat the requests
+ * of each process in about the same way as the requests of the
+ * others, and thus to provide each of these processes with about the
+ * same throughput. This is exactly the desired throughput
+ * distribution if (i-a) holds, or, if (i-b) holds instead, this is an
+ * even more convenient distribution for (the process associated with)
+ * bfqq.
*
- * Unfortunately, it is extremely difficult to thoroughly
- * check condition (ii). And, in case there are active groups,
- * it becomes very difficult to check condition (i) too. In
- * fact, if there are active groups, then, for condition (i)
- * to become false, it is enough that an active group contains
- * more active processes or sub-groups than some other active
- * group. More precisely, for condition (i) to hold because of
- * such a group, it is not even necessary that the group is
- * (still) active: it is sufficient that, even if the group
- * has become inactive, some of its descendant processes still
- * have some request already dispatched but still waiting for
- * completion. In fact, requests have still to be guaranteed
- * their share of the throughput even after being
- * dispatched. In this respect, it is easy to show that, if a
- * group frequently becomes inactive while still having
- * in-flight requests, and if, when this happens, the group is
- * not considered in the calculation of whether the scenario
- * is asymmetric, then the group may fail to be guaranteed its
- * fair share of the throughput (basically because idling may
- * not be performed for the descendant processes of the group,
- * but it had to be). We address this issue with the
- * following bi-modal behavior, implemented in the function
- * bfq_symmetric_scenario().
+ * In contrast, in any asymmetric or unfavorable scenario, device
+ * idling (I/O-dispatch plugging) is certainly needed to guarantee
+ * that bfqq receives its assigned fraction of the device throughput
+ * (see [1] for details).
+ *
+ * The problem is that idling may significantly reduce throughput with
+ * certain combinations of types of I/O and devices. An important
+ * example is sync random I/O on flash storage with command
+ * queueing. So, unless bfqq falls in cases where idling also boosts
+ * throughput, it is important to check conditions (i-a), i(-b) and
+ * (ii) accurately, so as to avoid idling when not strictly needed for
+ * service guarantees.
+ *
+ * Unfortunately, it is extremely difficult to thoroughly check
+ * condition (ii). And, in case there are active groups, it becomes
+ * very difficult to check conditions (i-a) and (i-b) too. In fact,
+ * if there are active groups, then, for conditions (i-a) or (i-b) to
+ * become false 'indirectly', it is enough that an active group
+ * contains more active processes or sub-groups than some other active
+ * group. More precisely, for conditions (i-a) or (i-b) to become
+ * false because of such a group, it is not even necessary that the
+ * group is (still) active: it is sufficient that, even if the group
+ * has become inactive, some of its descendant processes still have
+ * some request already dispatched but still waiting for
+ * completion. In fact, requests have still to be guaranteed their
+ * share of the throughput even after being dispatched. In this
+ * respect, it is easy to show that, if a group frequently becomes
+ * inactive while still having in-flight requests, and if, when this
+ * happens, the group is not considered in the calculation of whether
+ * the scenario is asymmetric, then the group may fail to be
+ * guaranteed its fair share of the throughput (basically because
+ * idling may not be performed for the descendant processes of the
+ * group, but it had to be). We address this issue with the following
+ * bi-modal behavior, implemented in the function
+ * bfq_asymmetric_scenario().
*
* If there are groups with requests waiting for completion
* (as commented above, some of these groups may even be
* already inactive), then the scenario is tagged as
* asymmetric, conservatively, without checking any of the
- * conditions (i) and (ii). So the device is idled for bfqq.
+ * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
* This behavior matches also the fact that groups are created
* exactly if controlling I/O is a primary concern (to
* preserve bandwidth and latency guarantees).
*
- * On the opposite end, if there are no groups with requests
- * waiting for completion, then only condition (i) is actually
- * controlled, i.e., provided that condition (i) holds, idling
- * is not performed, regardless of whether condition (ii)
- * holds. In other words, only if condition (i) does not hold,
- * then idling is allowed, and the device tends to be
- * prevented from queueing many requests, possibly of several
- * processes. Since there are no groups with requests waiting
- * for completion, then, to control condition (i) it is enough
- * to check just whether all the queues with requests waiting
- * for completion also have the same weight.
+ * On the opposite end, if there are no groups with requests waiting
+ * for completion, then only conditions (i-a) and (i-b) are actually
+ * controlled, i.e., provided that conditions (i-a) or (i-b) holds,
+ * idling is not performed, regardless of whether condition (ii)
+ * holds. In other words, only if conditions (i-a) and (i-b) do not
+ * hold, then idling is allowed, and the device tends to be prevented
+ * from queueing many requests, possibly of several processes. Since
+ * there are no groups with requests waiting for completion, then, to
+ * control conditions (i-a) and (i-b) it is enough to check just
+ * whether all the queues with requests waiting for completion also
+ * have the same weight.
*
* Not checking condition (ii) evidently exposes bfqq to the
* risk of getting less throughput than its fair share.
@@ -3636,7 +3876,7 @@ static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
* compound condition that is checked below for deciding
* whether the scenario is asymmetric. To explain this
* compound condition, we need to add that the function
- * bfq_symmetric_scenario checks the weights of only
+ * bfq_asymmetric_scenario checks the weights of only
* non-weight-raised queues, for efficiency reasons (see
* comments on bfq_weights_tree_add()). Then the fact that
* bfqq is weight-raised is checked explicitly here. More
@@ -3664,7 +3904,7 @@ static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
return (bfqq->wr_coeff > 1 &&
bfqd->wr_busy_queues <
bfq_tot_busy_queues(bfqd)) ||
- !bfq_symmetric_scenario(bfqd);
+ bfq_asymmetric_scenario(bfqd, bfqq);
}
/*
@@ -3740,26 +3980,98 @@ static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
}
-static struct bfq_queue *bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
+/*
+ * This function chooses the queue from which to pick the next extra
+ * I/O request to inject, if it finds a compatible queue. See the
+ * comments on bfq_update_inject_limit() for details on the injection
+ * mechanism, and for the definitions of the quantities mentioned
+ * below.
+ */
+static struct bfq_queue *
+bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
{
- struct bfq_queue *bfqq;
+ struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
+ unsigned int limit = in_serv_bfqq->inject_limit;
+ /*
+ * If
+ * - bfqq is not weight-raised and therefore does not carry
+ * time-critical I/O,
+ * or
+ * - regardless of whether bfqq is weight-raised, bfqq has
+ * however a long think time, during which it can absorb the
+ * effect of an appropriate number of extra I/O requests
+ * from other queues (see bfq_update_inject_limit for
+ * details on the computation of this number);
+ * then injection can be performed without restrictions.
+ */
+ bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 ||
+ !bfq_bfqq_has_short_ttime(in_serv_bfqq);
/*
- * A linear search; but, with a high probability, very few
- * steps are needed to find a candidate queue, i.e., a queue
- * with enough budget left for its next request. In fact:
+ * If
+ * - the baseline total service time could not be sampled yet,
+ * so the inject limit happens to be still 0, and
+ * - a lot of time has elapsed since the plugging of I/O
+ * dispatching started, so drive speed is being wasted
+ * significantly;
+ * then temporarily raise inject limit to one request.
+ */
+ if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 &&
+ bfq_bfqq_wait_request(in_serv_bfqq) &&
+ time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
+ bfqd->bfq_slice_idle)
+ )
+ limit = 1;
+
+ if (bfqd->rq_in_driver >= limit)
+ return NULL;
+
+ /*
+ * Linear search of the source queue for injection; but, with
+ * a high probability, very few steps are needed to find a
+ * candidate queue, i.e., a queue with enough budget left for
+ * its next request. In fact:
* - BFQ dynamically updates the budget of every queue so as
* to accommodate the expected backlog of the queue;
* - if a queue gets all its requests dispatched as injected
* service, then the queue is removed from the active list
- * (and re-added only if it gets new requests, but with
- * enough budget for its new backlog).
+ * (and re-added only if it gets new requests, but then it
+ * is assigned again enough budget for its new backlog).
*/
list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
+ (in_serv_always_inject || bfqq->wr_coeff > 1) &&
bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
- bfq_bfqq_budget_left(bfqq))
- return bfqq;
+ bfq_bfqq_budget_left(bfqq)) {
+ /*
+ * Allow for only one large in-flight request
+ * on non-rotational devices, for the
+ * following reason. On non-rotationl drives,
+ * large requests take much longer than
+ * smaller requests to be served. In addition,
+ * the drive prefers to serve large requests
+ * w.r.t. to small ones, if it can choose. So,
+ * having more than one large requests queued
+ * in the drive may easily make the next first
+ * request of the in-service queue wait for so
+ * long to break bfqq's service guarantees. On
+ * the bright side, large requests let the
+ * drive reach a very high throughput, even if
+ * there is only one in-flight large request
+ * at a time.
+ */
+ if (blk_queue_nonrot(bfqd->queue) &&
+ blk_rq_sectors(bfqq->next_rq) >=
+ BFQQ_SECT_THR_NONROT)
+ limit = min_t(unsigned int, 1, limit);
+ else
+ limit = in_serv_bfqq->inject_limit;
+
+ if (bfqd->rq_in_driver < limit) {
+ bfqd->rqs_injected = true;
+ return bfqq;
+ }
+ }
return NULL;
}
@@ -3846,14 +4158,32 @@ check_queue:
* for a new request, or has requests waiting for a completion and
* may idle after their completion, then keep it anyway.
*
- * Yet, to boost throughput, inject service from other queues if
- * possible.
+ * Yet, inject service from other queues if it boosts
+ * throughput and is possible.
*/
if (bfq_bfqq_wait_request(bfqq) ||
(bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
- if (bfq_bfqq_injectable(bfqq) &&
- bfqq->injected_service * bfqq->inject_coeff <
- bfqq->entity.service * 10)
+ struct bfq_queue *async_bfqq =
+ bfqq->bic && bfqq->bic->bfqq[0] &&
+ bfq_bfqq_busy(bfqq->bic->bfqq[0]) ?
+ bfqq->bic->bfqq[0] : NULL;
+
+ /*
+ * If the process associated with bfqq has also async
+ * I/O pending, then inject it
+ * unconditionally. Injecting I/O from the same
+ * process can cause no harm to the process. On the
+ * contrary, it can only increase bandwidth and reduce
+ * latency for the process.
+ */
+ if (async_bfqq &&
+ icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
+ bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <=
+ bfq_bfqq_budget_left(async_bfqq))
+ bfqq = bfqq->bic->bfqq[0];
+ else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
+ (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
+ !bfq_bfqq_has_short_ttime(bfqq)))
bfqq = bfq_choose_bfqq_for_injection(bfqd);
else
bfqq = NULL;
@@ -3945,15 +4275,15 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
bfq_bfqq_served(bfqq, service_to_charge);
- bfq_dispatch_remove(bfqd->queue, rq);
+ if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
+ bfqd->wait_dispatch = false;
+ bfqd->waited_rq = rq;
+ }
- if (bfqq != bfqd->in_service_queue) {
- if (likely(bfqd->in_service_queue))
- bfqd->in_service_queue->injected_service +=
- bfq_serv_to_charge(rq, bfqq);
+ bfq_dispatch_remove(bfqd->queue, rq);
+ if (bfqq != bfqd->in_service_queue)
goto return_rq;
- }
/*
* If weight raising has to terminate for bfqq, then next
@@ -4384,13 +4714,6 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_mark_bfqq_has_short_ttime(bfqq);
bfq_mark_bfqq_sync(bfqq);
bfq_mark_bfqq_just_created(bfqq);
- /*
- * Aggressively inject a lot of service: up to 90%.
- * This coefficient remains constant during bfqq life,
- * but this behavior might be changed, after enough
- * testing and tuning.
- */
- bfqq->inject_coeff = 1;
} else
bfq_clear_bfqq_sync(bfqq);
@@ -4529,6 +4852,11 @@ bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
{
bfqq->seek_history <<= 1;
bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
+
+ if (bfqq->wr_coeff > 1 &&
+ bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
+ BFQQ_TOTALLY_SEEKY(bfqq))
+ bfq_bfqq_end_wr(bfqq);
}
static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
@@ -4823,6 +5151,9 @@ static void bfq_update_hw_tag(struct bfq_data *bfqd)
bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
bfqd->max_rq_in_driver = 0;
bfqd->hw_tag_samples = 0;
+
+ bfqd->nonrot_with_queueing =
+ blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
}
static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
@@ -4950,6 +5281,147 @@ static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq)
}
/*
+ * The processes associated with bfqq may happen to generate their
+ * cumulative I/O at a lower rate than the rate at which the device
+ * could serve the same I/O. This is rather probable, e.g., if only
+ * one process is associated with bfqq and the device is an SSD. It
+ * results in bfqq becoming often empty while in service. In this
+ * respect, if BFQ is allowed to switch to another queue when bfqq
+ * remains empty, then the device goes on being fed with I/O requests,
+ * and the throughput is not affected. In contrast, if BFQ is not
+ * allowed to switch to another queue---because bfqq is sync and
+ * I/O-dispatch needs to be plugged while bfqq is temporarily
+ * empty---then, during the service of bfqq, there will be frequent
+ * "service holes", i.e., time intervals during which bfqq gets empty
+ * and the device can only consume the I/O already queued in its
+ * hardware queues. During service holes, the device may even get to
+ * remaining idle. In the end, during the service of bfqq, the device
+ * is driven at a lower speed than the one it can reach with the kind
+ * of I/O flowing through bfqq.
+ *
+ * To counter this loss of throughput, BFQ implements a "request
+ * injection mechanism", which tries to fill the above service holes
+ * with I/O requests taken from other queues. The hard part in this
+ * mechanism is finding the right amount of I/O to inject, so as to
+ * both boost throughput and not break bfqq's bandwidth and latency
+ * guarantees. In this respect, the mechanism maintains a per-queue
+ * inject limit, computed as below. While bfqq is empty, the injection
+ * mechanism dispatches extra I/O requests only until the total number
+ * of I/O requests in flight---i.e., already dispatched but not yet
+ * completed---remains lower than this limit.
+ *
+ * A first definition comes in handy to introduce the algorithm by
+ * which the inject limit is computed. We define as first request for
+ * bfqq, an I/O request for bfqq that arrives while bfqq is in
+ * service, and causes bfqq to switch from empty to non-empty. The
+ * algorithm updates the limit as a function of the effect of
+ * injection on the service times of only the first requests of
+ * bfqq. The reason for this restriction is that these are the
+ * requests whose service time is affected most, because they are the
+ * first to arrive after injection possibly occurred.
+ *
+ * To evaluate the effect of injection, the algorithm measures the
+ * "total service time" of first requests. We define as total service
+ * time of an I/O request, the time that elapses since when the
+ * request is enqueued into bfqq, to when it is completed. This
+ * quantity allows the whole effect of injection to be measured. It is
+ * easy to see why. Suppose that some requests of other queues are
+ * actually injected while bfqq is empty, and that a new request R
+ * then arrives for bfqq. If the device does start to serve all or
+ * part of the injected requests during the service hole, then,
+ * because of this extra service, it may delay the next invocation of
+ * the dispatch hook of BFQ. Then, even after R gets eventually
+ * dispatched, the device may delay the actual service of R if it is
+ * still busy serving the extra requests, or if it decides to serve,
+ * before R, some extra request still present in its queues. As a
+ * conclusion, the cumulative extra delay caused by injection can be
+ * easily evaluated by just comparing the total service time of first
+ * requests with and without injection.
+ *
+ * The limit-update algorithm works as follows. On the arrival of a
+ * first request of bfqq, the algorithm measures the total time of the
+ * request only if one of the three cases below holds, and, for each
+ * case, it updates the limit as described below:
+ *
+ * (1) If there is no in-flight request. This gives a baseline for the
+ * total service time of the requests of bfqq. If the baseline has
+ * not been computed yet, then, after computing it, the limit is
+ * set to 1, to start boosting throughput, and to prepare the
+ * ground for the next case. If the baseline has already been
+ * computed, then it is updated, in case it results to be lower
+ * than the previous value.
+ *
+ * (2) If the limit is higher than 0 and there are in-flight
+ * requests. By comparing the total service time in this case with
+ * the above baseline, it is possible to know at which extent the
+ * current value of the limit is inflating the total service
+ * time. If the inflation is below a certain threshold, then bfqq
+ * is assumed to be suffering from no perceivable loss of its
+ * service guarantees, and the limit is even tentatively
+ * increased. If the inflation is above the threshold, then the
+ * limit is decreased. Due to the lack of any hysteresis, this
+ * logic makes the limit oscillate even in steady workload
+ * conditions. Yet we opted for it, because it is fast in reaching
+ * the best value for the limit, as a function of the current I/O
+ * workload. To reduce oscillations, this step is disabled for a
+ * short time interval after the limit happens to be decreased.
+ *
+ * (3) Periodically, after resetting the limit, to make sure that the
+ * limit eventually drops in case the workload changes. This is
+ * needed because, after the limit has gone safely up for a
+ * certain workload, it is impossible to guess whether the
+ * baseline total service time may have changed, without measuring
+ * it again without injection. A more effective version of this
+ * step might be to just sample the baseline, by interrupting
+ * injection only once, and then to reset/lower the limit only if
+ * the total service time with the current limit does happen to be
+ * too large.
+ *
+ * More details on each step are provided in the comments on the
+ * pieces of code that implement these steps: the branch handling the
+ * transition from empty to non empty in bfq_add_request(), the branch
+ * handling injection in bfq_select_queue(), and the function
+ * bfq_choose_bfqq_for_injection(). These comments also explain some
+ * exceptions, made by the injection mechanism in some special cases.
+ */
+static void bfq_update_inject_limit(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+{
+ u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
+ unsigned int old_limit = bfqq->inject_limit;
+
+ if (bfqq->last_serv_time_ns > 0) {
+ u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
+
+ if (tot_time_ns >= threshold && old_limit > 0) {
+ bfqq->inject_limit--;
+ bfqq->decrease_time_jif = jiffies;
+ } else if (tot_time_ns < threshold &&
+ old_limit < bfqd->max_rq_in_driver<<1)
+ bfqq->inject_limit++;
+ }
+
+ /*
+ * Either we still have to compute the base value for the
+ * total service time, and there seem to be the right
+ * conditions to do it, or we can lower the last base value
+ * computed.
+ */
+ if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 0) ||
+ tot_time_ns < bfqq->last_serv_time_ns) {
+ bfqq->last_serv_time_ns = tot_time_ns;
+ /*
+ * Now we certainly have a base value: make sure we
+ * start trying injection.
+ */
+ bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
+ }
+
+ /* update complete, not waiting for any request completion any longer */
+ bfqd->waited_rq = NULL;
+}
+
+/*
* Handle either a requeue or a finish for rq. The things to do are
* the same in both cases: all references to rq are to be dropped. In
* particular, rq is considered completed from the point of view of
@@ -4993,6 +5465,9 @@ static void bfq_finish_requeue_request(struct request *rq)
spin_lock_irqsave(&bfqd->lock, flags);
+ if (rq == bfqd->waited_rq)
+ bfq_update_inject_limit(bfqd, bfqq);
+
bfq_completed_request(bfqq, bfqd);
bfq_finish_requeue_request_body(bfqq);
@@ -5156,7 +5631,7 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
* preparation is that, after the prepare_request hook is invoked for
* rq, rq may still be transformed into a request with no icq, i.e., a
* request not associated with any queue. No bfq hook is invoked to
- * signal this tranformation. As a consequence, should these
+ * signal this transformation. As a consequence, should these
* preparation operations be performed when the prepare_request hook
* is invoked, and should rq be transformed one moment later, bfq
* would end up in an inconsistent state, because it would have
@@ -5247,7 +5722,29 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
}
}
- if (unlikely(bfq_bfqq_just_created(bfqq)))
+ /*
+ * Consider bfqq as possibly belonging to a burst of newly
+ * created queues only if:
+ * 1) A burst is actually happening (bfqd->burst_size > 0)
+ * or
+ * 2) There is no other active queue. In fact, if, in
+ * contrast, there are active queues not belonging to the
+ * possible burst bfqq may belong to, then there is no gain
+ * in considering bfqq as belonging to a burst, and
+ * therefore in not weight-raising bfqq. See comments on
+ * bfq_handle_burst().
+ *
+ * This filtering also helps eliminating false positives,
+ * occurring when bfqq does not belong to an actual large
+ * burst, but some background task (e.g., a service) happens
+ * to trigger the creation of new queues very close to when
+ * bfqq and its possible companion queues are created. See
+ * comments on bfq_handle_burst() for further details also on
+ * this issue.
+ */
+ if (unlikely(bfq_bfqq_just_created(bfqq) &&
+ (bfqd->burst_size > 0 ||
+ bfq_tot_busy_queues(bfqd) == 0)))
bfq_handle_burst(bfqd, bfqq);
return bfqq;
@@ -5507,7 +6004,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
HRTIMER_MODE_REL);
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
- bfqd->queue_weights_tree = RB_ROOT;
+ bfqd->queue_weights_tree = RB_ROOT_CACHED;
bfqd->num_groups_with_pending_reqs = 0;
INIT_LIST_HEAD(&bfqd->active_list);
@@ -5515,6 +6012,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
INIT_HLIST_HEAD(&bfqd->burst_list);
bfqd->hw_tag = -1;
+ bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
bfqd->bfq_max_budget = bfq_default_max_budget;
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 86394e503ca9..eba7cd449ab4 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -32,6 +32,8 @@
#define BFQ_DEFAULT_GRP_IOPRIO 0
#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
+#define MAX_PID_STR_LENGTH 12
+
/*
* Soft real-time applications are extremely more latency sensitive
* than interactive ones. Over-raise the weight of the former to
@@ -89,7 +91,7 @@ struct bfq_service_tree {
* expiration. This peculiar definition allows for the following
* optimization, not yet exploited: while a given entity is still in
* service, we already know which is the best candidate for next
- * service among the other active entitities in the same parent
+ * service among the other active entities in the same parent
* entity. We can then quickly compare the timestamps of the
* in-service entity with those of such best candidate.
*
@@ -140,7 +142,7 @@ struct bfq_weight_counter {
*
* Unless cgroups are used, the weight value is calculated from the
* ioprio to export the same interface as CFQ. When dealing with
- * ``well-behaved'' queues (i.e., queues that do not spend too much
+ * "well-behaved" queues (i.e., queues that do not spend too much
* time to consume their budget and have true sequential behavior, and
* when there are no external factors breaking anticipation) the
* relative weights at each level of the cgroups hierarchy should be
@@ -240,6 +242,13 @@ struct bfq_queue {
/* next ioprio and ioprio class if a change is in progress */
unsigned short new_ioprio, new_ioprio_class;
+ /* last total-service-time sample, see bfq_update_inject_limit() */
+ u64 last_serv_time_ns;
+ /* limit for request injection */
+ unsigned int inject_limit;
+ /* last time the inject limit has been decreased, in jiffies */
+ unsigned long decrease_time_jif;
+
/*
* Shared bfq_queue if queue is cooperating with one or more
* other queues.
@@ -357,29 +366,6 @@ struct bfq_queue {
/* max service rate measured so far */
u32 max_service_rate;
- /*
- * Ratio between the service received by bfqq while it is in
- * service, and the cumulative service (of requests of other
- * queues) that may be injected while bfqq is empty but still
- * in service. To increase precision, the coefficient is
- * measured in tenths of unit. Here are some example of (1)
- * ratios, (2) resulting percentages of service injected
- * w.r.t. to the total service dispatched while bfqq is in
- * service, and (3) corresponding values of the coefficient:
- * 1 (50%) -> 10
- * 2 (33%) -> 20
- * 10 (9%) -> 100
- * 9.9 (9%) -> 99
- * 1.5 (40%) -> 15
- * 0.5 (66%) -> 5
- * 0.1 (90%) -> 1
- *
- * So, if the coefficient is lower than 10, then
- * injected service is more than bfqq service.
- */
- unsigned int inject_coeff;
- /* amount of service injected in current service slot */
- unsigned int injected_service;
};
/**
@@ -419,6 +405,15 @@ struct bfq_io_cq {
bool was_in_burst_list;
/*
+ * Save the weight when a merge occurs, to be able
+ * to restore it in case of split. If the weight is not
+ * correctly resumed when the queue is recycled,
+ * then the weight of the recycled queue could differ
+ * from the weight of the original queue.
+ */
+ unsigned int saved_weight;
+
+ /*
* Similar to previous fields: save wr information.
*/
unsigned long saved_wr_coeff;
@@ -450,7 +445,7 @@ struct bfq_data {
* weight-raised @bfq_queue (see the comments to the functions
* bfq_weights_tree_[add|remove] for further details).
*/
- struct rb_root queue_weights_tree;
+ struct rb_root_cached queue_weights_tree;
/*
* Number of groups with at least one descendant process that
@@ -513,6 +508,9 @@ struct bfq_data {
/* number of requests dispatched and waiting for completion */
int rq_in_driver;
+ /* true if the device is non rotational and performs queueing */
+ bool nonrot_with_queueing;
+
/*
* Maximum number of requests in driver in the last
* @hw_tag_samples completed requests.
@@ -544,6 +542,26 @@ struct bfq_data {
/* time of last request completion (ns) */
u64 last_completion;
+ /* time of last transition from empty to non-empty (ns) */
+ u64 last_empty_occupied_ns;
+
+ /*
+ * Flag set to activate the sampling of the total service time
+ * of a just-arrived first I/O request (see
+ * bfq_update_inject_limit()). This will cause the setting of
+ * waited_rq when the request is finally dispatched.
+ */
+ bool wait_dispatch;
+ /*
+ * If set, then bfq_update_inject_limit() is invoked when
+ * waited_rq is eventually completed.
+ */
+ struct request *waited_rq;
+ /*
+ * True if some request has been injected during the last service hole.
+ */
+ bool rqs_injected;
+
/* time of first rq dispatch in current observation interval (ns) */
u64 first_dispatch;
/* time of last rq dispatch in current observation interval (ns) */
@@ -553,6 +571,7 @@ struct bfq_data {
ktime_t last_budget_start;
/* beginning of the last idle slice */
ktime_t last_idling_start;
+ unsigned long last_idling_start_jiffies;
/* number of samples in current observation interval */
int peak_rate_samples;
@@ -898,10 +917,10 @@ void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync);
struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
- struct rb_root *root);
+ struct rb_root_cached *root);
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
- struct rb_root *root);
+ struct rb_root_cached *root);
void bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq);
void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -1008,13 +1027,23 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
/* --------------- end of interface of B-WF2Q+ ---------------- */
/* Logging facilities. */
+static inline void bfq_pid_to_str(int pid, char *str, int len)
+{
+ if (pid != -1)
+ snprintf(str, len, "%d", pid);
+ else
+ snprintf(str, len, "SHARED-");
+}
+
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+ char pid_str[MAX_PID_STR_LENGTH]; \
+ bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
blk_add_cgroup_trace_msg((bfqd)->queue, \
bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \
- "bfq%d%c " fmt, (bfqq)->pid, \
+ "bfq%s%c " fmt, pid_str, \
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', ##args); \
} while (0)
@@ -1025,10 +1054,13 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#else /* CONFIG_BFQ_GROUP_IOSCHED */
-#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
- blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+ char pid_str[MAX_PID_STR_LENGTH]; \
+ bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
+ blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str, \
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
- ##args)
+ ##args); \
+} while (0)
#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
#endif /* CONFIG_BFQ_GROUP_IOSCHED */
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index ae4d000ac0af..48d899cfbe03 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -59,7 +59,7 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
* bfq_update_next_in_service - update sd->next_in_service
* @sd: sched_data for which to perform the update.
* @new_entity: if not NULL, pointer to the entity whose activation,
- * requeueing or repositionig triggered the invocation of
+ * requeueing or repositioning triggered the invocation of
* this function.
* @expiration: id true, this function is being invoked after the
* expiration of the in-service entity
@@ -90,7 +90,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
/*
* If this update is triggered by the activation, requeueing
- * or repositiong of an entity that does not coincide with
+ * or repositioning of an entity that does not coincide with
* sd->next_in_service, then a full lookup in the active tree
* can be avoided. In fact, it is enough to check whether the
* just-modified entity has the same priority as
@@ -737,7 +737,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
unsigned int prev_weight, new_weight;
struct bfq_data *bfqd = NULL;
- struct rb_root *root;
+ struct rb_root_cached *root;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_sched_data *sd;
struct bfq_group *bfqg;
@@ -1396,7 +1396,7 @@ left:
* In this first case, update the virtual time in @st too (see the
* comments on this update inside the function).
*
- * In constrast, if there is an in-service entity, then return the
+ * In contrast, if there is an in-service entity, then return the
* entity that would be set in service if not only the above
* conditions, but also the next one held true: the currently
* in-service entity, on expiration,
@@ -1479,12 +1479,12 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
* is being invoked as a part of the expiration path
* of the in-service queue. In this case, even if
* sd->in_service_entity is not NULL,
- * sd->in_service_entiy at this point is actually not
+ * sd->in_service_entity at this point is actually not
* in service any more, and, if needed, has already
* been properly queued or requeued into the right
* tree. The reason why sd->in_service_entity is still
* not NULL here, even if expiration is true, is that
- * sd->in_service_entiy is reset as a last step in the
+ * sd->in_service_entity is reset as a last step in the
* expiration path. So, if expiration is true, tell
* __bfq_lookup_next_entity that there is no
* sd->in_service_entity.
diff --git a/block/bio.c b/block/bio.c
index 716510ecd7ff..5959141d4e46 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -647,25 +647,70 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
}
EXPORT_SYMBOL(bio_clone_fast);
+static inline bool page_is_mergeable(const struct bio_vec *bv,
+ struct page *page, unsigned int len, unsigned int off,
+ bool same_page)
+{
+ phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
+ bv->bv_offset + bv->bv_len - 1;
+ phys_addr_t page_addr = page_to_phys(page);
+
+ if (vec_end_addr + 1 != page_addr + off)
+ return false;
+ if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
+ return false;
+
+ if ((vec_end_addr & PAGE_MASK) != page_addr) {
+ if (same_page)
+ return false;
+ if (pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Check if the @page can be added to the current segment(@bv), and make
+ * sure to call it only if page_is_mergeable(@bv, @page) is true
+ */
+static bool can_add_page_to_seg(struct request_queue *q,
+ struct bio_vec *bv, struct page *page, unsigned len,
+ unsigned offset)
+{
+ unsigned long mask = queue_segment_boundary(q);
+ phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
+ phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
+
+ if ((addr1 | mask) != (addr2 | mask))
+ return false;
+
+ if (bv->bv_len + len > queue_max_segment_size(q))
+ return false;
+
+ return true;
+}
+
/**
- * bio_add_pc_page - attempt to add page to bio
+ * __bio_add_pc_page - attempt to add page to passthrough bio
* @q: the target queue
* @bio: destination bio
* @page: page to add
* @len: vec entry length
* @offset: vec entry offset
+ * @put_same_page: put the page if it is same with last added page
*
* Attempt to add a page to the bio_vec maplist. This can fail for a
* number of reasons, such as the bio being full or target block device
* limitations. The target block device must allow bio's up to PAGE_SIZE,
* so it is always possible to add a single page to an empty bio.
*
- * This should only be used by REQ_PC bios.
+ * This should only be used by passthrough bios.
*/
-int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
- *page, unsigned int len, unsigned int offset)
+int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset,
+ bool put_same_page)
{
- int retried_segments = 0;
struct bio_vec *bvec;
/*
@@ -683,11 +728,14 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
* a consecutive offset. Optimize this special case.
*/
if (bio->bi_vcnt > 0) {
- struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
-
- if (page == prev->bv_page &&
- offset == prev->bv_offset + prev->bv_len) {
- prev->bv_len += len;
+ bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
+
+ if (page == bvec->bv_page &&
+ offset == bvec->bv_offset + bvec->bv_len) {
+ if (put_same_page)
+ put_page(page);
+ bvec_merge:
+ bvec->bv_len += len;
bio->bi_iter.bi_size += len;
goto done;
}
@@ -696,13 +744,20 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
- if (bvec_gap_to_prev(q, prev, offset))
+ if (bvec_gap_to_prev(q, bvec, offset))
return 0;
+
+ if (page_is_mergeable(bvec, page, len, offset, false) &&
+ can_add_page_to_seg(q, bvec, page, len, offset))
+ goto bvec_merge;
}
if (bio_full(bio))
return 0;
+ if (bio->bi_phys_segments >= queue_max_segments(q))
+ return 0;
+
/*
* setup the new entry, we might clear it again later if we
* cannot add the page
@@ -712,38 +767,19 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
bvec->bv_len = len;
bvec->bv_offset = offset;
bio->bi_vcnt++;
- bio->bi_phys_segments++;
bio->bi_iter.bi_size += len;
- /*
- * Perform a recount if the number of segments is greater
- * than queue_max_segments(q).
- */
-
- while (bio->bi_phys_segments > queue_max_segments(q)) {
-
- if (retried_segments)
- goto failed;
-
- retried_segments = 1;
- blk_recount_segments(q, bio);
- }
-
- /* If we may be able to merge these biovecs, force a recount */
- if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec))
- bio_clear_flag(bio, BIO_SEG_VALID);
-
done:
+ bio->bi_phys_segments = bio->bi_vcnt;
+ bio_set_flag(bio, BIO_SEG_VALID);
return len;
+}
+EXPORT_SYMBOL(__bio_add_pc_page);
- failed:
- bvec->bv_page = NULL;
- bvec->bv_len = 0;
- bvec->bv_offset = 0;
- bio->bi_vcnt--;
- bio->bi_iter.bi_size -= len;
- blk_recount_segments(q, bio);
- return 0;
+int bio_add_pc_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset)
+{
+ return __bio_add_pc_page(q, bio, page, len, offset, false);
}
EXPORT_SYMBOL(bio_add_pc_page);
@@ -770,18 +806,12 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
if (bio->bi_vcnt > 0) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
- phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
- bv->bv_offset + bv->bv_len - 1;
- phys_addr_t page_addr = page_to_phys(page);
-
- if (vec_end_addr + 1 != page_addr + off)
- return false;
- if (same_page && (vec_end_addr & PAGE_MASK) != page_addr)
- return false;
- bv->bv_len += len;
- bio->bi_iter.bi_size += len;
- return true;
+ if (page_is_mergeable(bv, page, len, off, same_page)) {
+ bv->bv_len += len;
+ bio->bi_iter.bi_size += len;
+ return true;
+ }
}
return false;
}
@@ -836,6 +866,26 @@ int bio_add_page(struct bio *bio, struct page *page,
}
EXPORT_SYMBOL(bio_add_page);
+static void bio_get_pages(struct bio *bio)
+{
+ struct bvec_iter_all iter_all;
+ struct bio_vec *bvec;
+ int i;
+
+ bio_for_each_segment_all(bvec, bio, i, iter_all)
+ get_page(bvec->bv_page);
+}
+
+static void bio_release_pages(struct bio *bio)
+{
+ struct bvec_iter_all iter_all;
+ struct bio_vec *bvec;
+ int i;
+
+ bio_for_each_segment_all(bvec, bio, i, iter_all)
+ put_page(bvec->bv_page);
+}
+
static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
{
const struct bio_vec *bv = iter->bvec;
@@ -848,20 +898,10 @@ static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
size = bio_add_page(bio, bv->bv_page, len,
bv->bv_offset + iter->iov_offset);
- if (size == len) {
- if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
- struct page *page;
- int i;
-
- mp_bvec_for_each_page(page, bv, i)
- get_page(page);
- }
-
- iov_iter_advance(iter, size);
- return 0;
- }
-
- return -EINVAL;
+ if (unlikely(size != len))
+ return -EINVAL;
+ iov_iter_advance(iter, size);
+ return 0;
}
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
@@ -934,29 +974,24 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
const bool is_bvec = iov_iter_is_bvec(iter);
- unsigned short orig_vcnt = bio->bi_vcnt;
+ int ret;
- /*
- * If this is a BVEC iter, then the pages are kernel pages. Don't
- * release them on IO completion, if the caller asked us to.
- */
- if (is_bvec && iov_iter_bvec_no_ref(iter))
- bio_set_flag(bio, BIO_NO_PAGE_REF);
+ if (WARN_ON_ONCE(bio->bi_vcnt))
+ return -EINVAL;
do {
- int ret;
-
if (is_bvec)
ret = __bio_iov_bvec_add_pages(bio, iter);
else
ret = __bio_iov_iter_get_pages(bio, iter);
+ } while (!ret && iov_iter_count(iter) && !bio_full(bio));
- if (unlikely(ret))
- return bio->bi_vcnt > orig_vcnt ? 0 : ret;
-
- } while (iov_iter_count(iter) && !bio_full(bio));
+ if (iov_iter_bvec_no_ref(iter))
+ bio_set_flag(bio, BIO_NO_PAGE_REF);
+ else
+ bio_get_pages(bio);
- return 0;
+ return bio->bi_vcnt ? 0 : ret;
}
static void submit_bio_wait_endio(struct bio *bio)
@@ -1388,21 +1423,14 @@ struct bio *bio_map_user_iov(struct request_queue *q,
for (j = 0; j < npages; j++) {
struct page *page = pages[j];
unsigned int n = PAGE_SIZE - offs;
- unsigned short prev_bi_vcnt = bio->bi_vcnt;
if (n > bytes)
n = bytes;
- if (!bio_add_pc_page(q, bio, page, n, offs))
+ if (!__bio_add_pc_page(q, bio, page, n, offs,
+ true))
break;
- /*
- * check if vector was merged with previous
- * drop page reference if needed
- */
- if (bio->bi_vcnt == prev_bi_vcnt)
- put_page(page);
-
added += n;
bytes -= n;
offs = 0;
@@ -1659,16 +1687,6 @@ void bio_set_pages_dirty(struct bio *bio)
}
}
-static void bio_release_pages(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i;
- struct bvec_iter_all iter_all;
-
- bio_for_each_segment_all(bvec, bio, i, iter_all)
- put_page(bvec->bv_page);
-}
-
/*
* bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
* If they are, then fine. If, however, some pages are clean then they must
@@ -2203,6 +2221,9 @@ static int __init init_bio(void)
bio_slab_nr = 0;
bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
GFP_KERNEL);
+
+ BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
+
if (!bio_slabs)
panic("bio: can't allocate bios\n");
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1c9d4f0f96ea..247b17f2a0f6 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -267,23 +267,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
goto split;
}
- if (bvprvp) {
- if (seg_size + bv.bv_len > queue_max_segment_size(q))
- goto new_segment;
- if (!biovec_phys_mergeable(q, bvprvp, &bv))
- goto new_segment;
-
- seg_size += bv.bv_len;
- bvprv = bv;
- bvprvp = &bvprv;
- sectors += bv.bv_len >> 9;
-
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
-
- continue;
- }
-new_segment:
if (nsegs == max_segs)
goto split;
@@ -370,12 +353,12 @@ EXPORT_SYMBOL(blk_queue_split);
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio)
{
- struct bio_vec bv, bvprv = { NULL };
- int prev = 0;
+ struct bio_vec uninitialized_var(bv), bvprv = { NULL };
unsigned int seg_size, nr_phys_segs;
unsigned front_seg_size;
struct bio *fbio, *bbio;
struct bvec_iter iter;
+ bool new_bio = false;
if (!bio)
return 0;
@@ -396,7 +379,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
nr_phys_segs = 0;
for_each_bio(bio) {
bio_for_each_bvec(bv, bio, iter) {
- if (prev) {
+ if (new_bio) {
if (seg_size + bv.bv_len
> queue_max_segment_size(q))
goto new_segment;
@@ -404,7 +387,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
goto new_segment;
seg_size += bv.bv_len;
- bvprv = bv;
if (nr_phys_segs == 1 && seg_size >
front_seg_size)
@@ -413,12 +395,15 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
continue;
}
new_segment:
- bvprv = bv;
- prev = 1;
bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
&front_seg_size, NULL, UINT_MAX);
+ new_bio = false;
}
bbio = bio;
+ if (likely(bio->bi_iter.bi_size)) {
+ bvprv = bv;
+ new_bio = true;
+ }
}
fbio->bi_seg_front_size = front_seg_size;
@@ -484,79 +469,85 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
struct scatterlist **sg)
{
unsigned nbytes = bvec->bv_len;
- unsigned nsegs = 0, total = 0, offset = 0;
+ unsigned nsegs = 0, total = 0;
while (nbytes > 0) {
- unsigned seg_size;
- struct page *pg;
- unsigned idx;
+ unsigned offset = bvec->bv_offset + total;
+ unsigned len = min(get_max_segment_size(q, offset), nbytes);
*sg = blk_next_sg(sg, sglist);
+ sg_set_page(*sg, bvec->bv_page, len, offset);
- seg_size = get_max_segment_size(q, bvec->bv_offset + total);
- seg_size = min(nbytes, seg_size);
-
- offset = (total + bvec->bv_offset) % PAGE_SIZE;
- idx = (total + bvec->bv_offset) / PAGE_SIZE;
- pg = bvec_nth_page(bvec->bv_page, idx);
-
- sg_set_page(*sg, pg, seg_size, offset);
-
- total += seg_size;
- nbytes -= seg_size;
+ total += len;
+ nbytes -= len;
nsegs++;
}
return nsegs;
}
-static inline void
-__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
- struct scatterlist *sglist, struct bio_vec *bvprv,
- struct scatterlist **sg, int *nsegs)
+static inline int __blk_bvec_map_sg(struct bio_vec bv,
+ struct scatterlist *sglist, struct scatterlist **sg)
+{
+ *sg = blk_next_sg(sg, sglist);
+ sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
+ return 1;
+}
+
+/* only try to merge bvecs into one sg if they are from two bios */
+static inline bool
+__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
+ struct bio_vec *bvprv, struct scatterlist **sg)
{
int nbytes = bvec->bv_len;
- if (*sg) {
- if ((*sg)->length + nbytes > queue_max_segment_size(q))
- goto new_segment;
- if (!biovec_phys_mergeable(q, bvprv, bvec))
- goto new_segment;
+ if (!*sg)
+ return false;
- (*sg)->length += nbytes;
- } else {
-new_segment:
- if (bvec->bv_offset + bvec->bv_len <= PAGE_SIZE) {
- *sg = blk_next_sg(sg, sglist);
- sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
- (*nsegs) += 1;
- } else
- (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg);
- }
- *bvprv = *bvec;
-}
+ if ((*sg)->length + nbytes > queue_max_segment_size(q))
+ return false;
-static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
- struct scatterlist *sglist, struct scatterlist **sg)
-{
- *sg = sglist;
- sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
- return 1;
+ if (!biovec_phys_mergeable(q, bvprv, bvec))
+ return false;
+
+ (*sg)->length += nbytes;
+
+ return true;
}
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist,
struct scatterlist **sg)
{
- struct bio_vec bvec, bvprv = { NULL };
+ struct bio_vec uninitialized_var(bvec), bvprv = { NULL };
struct bvec_iter iter;
int nsegs = 0;
+ bool new_bio = false;
- for_each_bio(bio)
- bio_for_each_bvec(bvec, bio, iter)
- __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
- &nsegs);
+ for_each_bio(bio) {
+ bio_for_each_bvec(bvec, bio, iter) {
+ /*
+ * Only try to merge bvecs from two bios given we
+ * have done bio internal merge when adding pages
+ * to bio
+ */
+ if (new_bio &&
+ __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
+ goto next_bvec;
+
+ if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
+ nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
+ else
+ nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
+ next_bvec:
+ new_bio = false;
+ }
+ if (likely(bio->bi_iter.bi_size)) {
+ bvprv = bvec;
+ new_bio = true;
+ }
+ }
return nsegs;
}
@@ -572,9 +563,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
int nsegs = 0;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg);
+ nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
- nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg);
+ nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
else if (rq->bio)
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
diff --git a/block/blk.h b/block/blk.h
index 5d636ee41663..e27fd1512e4b 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -75,7 +75,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
if (addr1 + vec1->bv_len != addr2)
return false;
- if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2))
+ if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
return false;
if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
return false;
diff --git a/block/elevator.c b/block/elevator.c
index d6d835a08de6..2e5399d9f40f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -509,8 +509,6 @@ void elv_unregister_queue(struct request_queue *q)
int elv_register(struct elevator_type *e)
{
- char *def = "";
-
/* create icq_cache if requested */
if (e->icq_size) {
if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
@@ -535,8 +533,8 @@ int elv_register(struct elevator_type *e)
list_add_tail(&e->list, &elv_list);
spin_unlock(&elv_list_lock);
- printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
- def);
+ printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
+
return 0;
}
EXPORT_SYMBOL_GPL(elv_register);
diff --git a/block/genhd.c b/block/genhd.c
index 703267865f14..1d0d25f7b0fe 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1628,12 +1628,11 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
/*
* If device-specific poll interval is set, always use it. If
- * the default is being used, poll iff there are events which
- * can't be monitored asynchronously.
+ * the default is being used, poll if the POLL flag is set.
*/
if (ev->poll_msecs >= 0)
intv_msecs = ev->poll_msecs;
- else if (disk->events & ~disk->async_events)
+ else if (disk->event_flags & DISK_EVENT_FLAG_POLL)
intv_msecs = disk_events_dfl_poll_msecs;
return msecs_to_jiffies(intv_msecs);
@@ -1843,11 +1842,13 @@ static void disk_check_events(struct disk_events *ev,
/*
* Tell userland about new events. Only the events listed in
- * @disk->events are reported. Unlisted events are processed the
- * same internally but never get reported to userland.
+ * @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT
+ * is set. Otherwise, events are processed internally but never
+ * get reported to userland.
*/
for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
- if (events & disk->events & (1 << i))
+ if ((events & disk->events & (1 << i)) &&
+ (disk->event_flags & DISK_EVENT_FLAG_UEVENT))
envp[nr_events++] = disk_uevents[i];
if (nr_events)
@@ -1860,6 +1861,7 @@ static void disk_check_events(struct disk_events *ev,
*
* events : list of all supported events
* events_async : list of events which can be detected w/o polling
+ * (always empty, only for backwards compatibility)
* events_poll_msecs : polling interval, 0: disable, -1: system default
*/
static ssize_t __disk_events_show(unsigned int events, char *buf)
@@ -1884,15 +1886,16 @@ static ssize_t disk_events_show(struct device *dev,
{
struct gendisk *disk = dev_to_disk(dev);
+ if (!(disk->event_flags & DISK_EVENT_FLAG_UEVENT))
+ return 0;
+
return __disk_events_show(disk->events, buf);
}
static ssize_t disk_events_async_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct gendisk *disk = dev_to_disk(dev);
-
- return __disk_events_show(disk->async_events, buf);
+ return 0;
}
static ssize_t disk_events_poll_msecs_show(struct device *dev,
@@ -1901,6 +1904,9 @@ static ssize_t disk_events_poll_msecs_show(struct device *dev,
{
struct gendisk *disk = dev_to_disk(dev);
+ if (!disk->ev)
+ return sprintf(buf, "-1\n");
+
return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
}
@@ -1917,6 +1923,9 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev,
if (intv < 0 && intv != -1)
return -EINVAL;
+ if (!disk->ev)
+ return -ENODEV;
+
disk_block_events(disk);
disk->ev->poll_msecs = intv;
__disk_unblock_events(disk, true);
@@ -1981,7 +1990,7 @@ static void disk_alloc_events(struct gendisk *disk)
{
struct disk_events *ev;
- if (!disk->fops->check_events)
+ if (!disk->fops->check_events || !disk->events)
return;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -2003,14 +2012,14 @@ static void disk_alloc_events(struct gendisk *disk)
static void disk_add_events(struct gendisk *disk)
{
- if (!disk->ev)
- return;
-
/* FIXME: error handling */
if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
pr_warn("%s: failed to create sysfs files for events\n",
disk->disk_name);
+ if (!disk->ev)
+ return;
+
mutex_lock(&disk_events_mutex);
list_add_tail(&disk->ev->node, &disk_events);
mutex_unlock(&disk_events_mutex);
@@ -2024,14 +2033,13 @@ static void disk_add_events(struct gendisk *disk)
static void disk_del_events(struct gendisk *disk)
{
- if (!disk->ev)
- return;
+ if (disk->ev) {
+ disk_block_events(disk);
- disk_block_events(disk);
-
- mutex_lock(&disk_events_mutex);
- list_del_init(&disk->ev->node);
- mutex_unlock(&disk_events_mutex);
+ mutex_lock(&disk_events_mutex);
+ list_del_init(&disk->ev->node);
+ mutex_unlock(&disk_events_mutex);
+ }
sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
}
diff --git a/block/opal_proto.h b/block/opal_proto.h
index e20be8258854..b6e352cfe982 100644
--- a/block/opal_proto.h
+++ b/block/opal_proto.h
@@ -170,6 +170,8 @@ enum opal_token {
OPAL_READLOCKED = 0x07,
OPAL_WRITELOCKED = 0x08,
OPAL_ACTIVEKEY = 0x0A,
+ /* lockingsp table */
+ OPAL_LIFECYCLE = 0x06,
/* locking info table */
OPAL_MAXRANGES = 0x04,
/* mbr control */
diff --git a/block/sed-opal.c b/block/sed-opal.c
index e0de4dd448b3..b1aa0cc25803 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -85,7 +85,6 @@ struct opal_dev {
void *data;
sec_send_recv *send_recv;
- const struct opal_step *steps;
struct mutex dev_lock;
u16 comid;
u32 hsn;
@@ -157,7 +156,7 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
/* C_PIN_TABLE object ID's */
- [OPAL_C_PIN_MSID] =
+ [OPAL_C_PIN_MSID] =
{ 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x84, 0x02},
[OPAL_C_PIN_SID] =
{ 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x01},
@@ -181,7 +180,7 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
* Derived from: TCG_Storage_Architecture_Core_Spec_v2.01_r1.00
* Section: 6.3 Assigned UIDs
*/
-static const u8 opalmethod[][OPAL_UID_LENGTH] = {
+static const u8 opalmethod[][OPAL_METHOD_LENGTH] = {
[OPAL_PROPERTIES] =
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01 },
[OPAL_STARTSESSION] =
@@ -217,6 +216,7 @@ static const u8 opalmethod[][OPAL_UID_LENGTH] = {
};
static int end_opal_session_error(struct opal_dev *dev);
+static int opal_discovery0_step(struct opal_dev *dev);
struct opal_suspend_data {
struct opal_lock_unlock unlk;
@@ -382,37 +382,50 @@ static void check_geometry(struct opal_dev *dev, const void *data)
dev->lowest_lba = geo->lowest_aligned_lba;
}
-static int next(struct opal_dev *dev)
+static int execute_step(struct opal_dev *dev,
+ const struct opal_step *step, size_t stepIndex)
{
- const struct opal_step *step;
- int state = 0, error = 0;
+ int error = step->fn(dev, step->data);
- do {
- step = &dev->steps[state];
- if (!step->fn)
- break;
+ if (error) {
+ pr_debug("Step %zu (%pS) failed with error %d: %s\n",
+ stepIndex, step->fn, error,
+ opal_error_to_human(error));
+ }
+
+ return error;
+}
- error = step->fn(dev, step->data);
- if (error) {
- pr_debug("Error on step function: %d with error %d: %s\n",
- state, error,
- opal_error_to_human(error));
-
- /* For each OPAL command we do a discovery0 then we
- * start some sort of session.
- * If we haven't passed state 1 then there was an error
- * on discovery0 or during the attempt to start a
- * session. Therefore we shouldn't attempt to terminate
- * a session, as one has not yet been created.
- */
- if (state > 1) {
- end_opal_session_error(dev);
- return error;
- }
+static int execute_steps(struct opal_dev *dev,
+ const struct opal_step *steps, size_t n_steps)
+{
+ size_t state = 0;
+ int error;
- }
- state++;
- } while (!error);
+ /* first do a discovery0 */
+ error = opal_discovery0_step(dev);
+ if (error)
+ return error;
+
+ for (state = 0; state < n_steps; state++) {
+ error = execute_step(dev, &steps[state], state);
+ if (error)
+ goto out_error;
+ }
+
+ return 0;
+
+out_error:
+ /*
+ * For each OPAL command the first step in steps starts some sort of
+ * session. If an error occurred in the initial discovery0 or if an
+ * error occurred in the first step (and thus stopping the loop with
+ * state == 0) then there was an error before or during the attempt to
+ * start a session. Therefore we shouldn't attempt to terminate a
+ * session, as one has not yet been created.
+ */
+ if (state > 0)
+ end_opal_session_error(dev);
return error;
}
@@ -510,15 +523,32 @@ static int opal_discovery0(struct opal_dev *dev, void *data)
return opal_discovery0_end(dev);
}
-static void add_token_u8(int *err, struct opal_dev *cmd, u8 tok)
+static int opal_discovery0_step(struct opal_dev *dev)
+{
+ const struct opal_step discovery0_step = {
+ opal_discovery0,
+ };
+ return execute_step(dev, &discovery0_step, 0);
+}
+
+static bool can_add(int *err, struct opal_dev *cmd, size_t len)
{
if (*err)
- return;
- if (cmd->pos >= IO_BUFFER_LENGTH - 1) {
- pr_debug("Error adding u8: end of buffer.\n");
+ return false;
+
+ if (len > IO_BUFFER_LENGTH || cmd->pos > IO_BUFFER_LENGTH - len) {
+ pr_debug("Error adding %zu bytes: end of buffer.\n", len);
*err = -ERANGE;
- return;
+ return false;
}
+
+ return true;
+}
+
+static void add_token_u8(int *err, struct opal_dev *cmd, u8 tok)
+{
+ if (!can_add(err, cmd, 1))
+ return;
cmd->cmd[cmd->pos++] = tok;
}
@@ -551,7 +581,6 @@ static void add_medium_atom_header(struct opal_dev *cmd, bool bytestring,
static void add_token_u64(int *err, struct opal_dev *cmd, u64 number)
{
-
size_t len;
int msb;
@@ -563,9 +592,8 @@ static void add_token_u64(int *err, struct opal_dev *cmd, u64 number)
msb = fls64(number);
len = DIV_ROUND_UP(msb, 8);
- if (cmd->pos >= IO_BUFFER_LENGTH - len - 1) {
+ if (!can_add(err, cmd, len + 1)) {
pr_debug("Error adding u64: end of buffer.\n");
- *err = -ERANGE;
return;
}
add_short_atom_header(cmd, false, false, len);
@@ -573,24 +601,19 @@ static void add_token_u64(int *err, struct opal_dev *cmd, u64 number)
add_token_u8(err, cmd, number >> (len * 8));
}
-static void add_token_bytestring(int *err, struct opal_dev *cmd,
- const u8 *bytestring, size_t len)
+static u8 *add_bytestring_header(int *err, struct opal_dev *cmd, size_t len)
{
size_t header_len = 1;
bool is_short_atom = true;
- if (*err)
- return;
-
if (len & ~SHORT_ATOM_LEN_MASK) {
header_len = 2;
is_short_atom = false;
}
- if (len >= IO_BUFFER_LENGTH - cmd->pos - header_len) {
+ if (!can_add(err, cmd, header_len + len)) {
pr_debug("Error adding bytestring: end of buffer.\n");
- *err = -ERANGE;
- return;
+ return NULL;
}
if (is_short_atom)
@@ -598,9 +621,19 @@ static void add_token_bytestring(int *err, struct opal_dev *cmd,
else
add_medium_atom_header(cmd, true, false, len);
- memcpy(&cmd->cmd[cmd->pos], bytestring, len);
- cmd->pos += len;
+ return &cmd->cmd[cmd->pos];
+}
+
+static void add_token_bytestring(int *err, struct opal_dev *cmd,
+ const u8 *bytestring, size_t len)
+{
+ u8 *start;
+ start = add_bytestring_header(err, cmd, len);
+ if (!start)
+ return;
+ memcpy(start, bytestring, len);
+ cmd->pos += len;
}
static int build_locking_range(u8 *buffer, size_t length, u8 lr)
@@ -623,7 +656,7 @@ static int build_locking_range(u8 *buffer, size_t length, u8 lr)
static int build_locking_user(u8 *buffer, size_t length, u8 lr)
{
if (length > OPAL_UID_LENGTH) {
- pr_debug("Can't build locking range user, Length OOB\n");
+ pr_debug("Can't build locking range user. Length OOB\n");
return -ERANGE;
}
@@ -649,6 +682,9 @@ static int cmd_finalize(struct opal_dev *cmd, u32 hsn, u32 tsn)
struct opal_header *hdr;
int err = 0;
+ /* close the parameter list opened from cmd_start */
+ add_token_u8(&err, cmd, OPAL_ENDLIST);
+
add_token_u8(&err, cmd, OPAL_ENDOFDATA);
add_token_u8(&err, cmd, OPAL_STARTLIST);
add_token_u8(&err, cmd, 0);
@@ -687,6 +723,11 @@ static const struct opal_resp_tok *response_get_token(
{
const struct opal_resp_tok *tok;
+ if (!resp) {
+ pr_debug("Response is NULL\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (n >= resp->num) {
pr_debug("Token number doesn't exist: %d, resp: %d\n",
n, resp->num);
@@ -869,27 +910,19 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
const char **store)
{
u8 skip;
- const struct opal_resp_tok *token;
+ const struct opal_resp_tok *tok;
*store = NULL;
- if (!resp) {
- pr_debug("Response is NULL\n");
+ tok = response_get_token(resp, n);
+ if (IS_ERR(tok))
return 0;
- }
-
- if (n >= resp->num) {
- pr_debug("Response has %d tokens. Can't access %d\n",
- resp->num, n);
- return 0;
- }
- token = &resp->toks[n];
- if (token->type != OPAL_DTA_TOKENID_BYTESTRING) {
+ if (tok->type != OPAL_DTA_TOKENID_BYTESTRING) {
pr_debug("Token is not a byte string!\n");
return 0;
}
- switch (token->width) {
+ switch (tok->width) {
case OPAL_WIDTH_TINY:
case OPAL_WIDTH_SHORT:
skip = 1;
@@ -905,37 +938,29 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
return 0;
}
- *store = token->pos + skip;
- return token->len - skip;
+ *store = tok->pos + skip;
+ return tok->len - skip;
}
static u64 response_get_u64(const struct parsed_resp *resp, int n)
{
- if (!resp) {
- pr_debug("Response is NULL\n");
- return 0;
- }
+ const struct opal_resp_tok *tok;
- if (n >= resp->num) {
- pr_debug("Response has %d tokens. Can't access %d\n",
- resp->num, n);
+ tok = response_get_token(resp, n);
+ if (IS_ERR(tok))
return 0;
- }
- if (resp->toks[n].type != OPAL_DTA_TOKENID_UINT) {
- pr_debug("Token is not unsigned it: %d\n",
- resp->toks[n].type);
+ if (tok->type != OPAL_DTA_TOKENID_UINT) {
+ pr_debug("Token is not unsigned int: %d\n", tok->type);
return 0;
}
- if (!(resp->toks[n].width == OPAL_WIDTH_TINY ||
- resp->toks[n].width == OPAL_WIDTH_SHORT)) {
- pr_debug("Atom is not short or tiny: %d\n",
- resp->toks[n].width);
+ if (tok->width != OPAL_WIDTH_TINY && tok->width != OPAL_WIDTH_SHORT) {
+ pr_debug("Atom is not short or tiny: %d\n", tok->width);
return 0;
}
- return resp->toks[n].stored.u;
+ return tok->stored.u;
}
static bool response_token_matches(const struct opal_resp_tok *token, u8 match)
@@ -991,6 +1016,27 @@ static void clear_opal_cmd(struct opal_dev *dev)
memset(dev->cmd, 0, IO_BUFFER_LENGTH);
}
+static int cmd_start(struct opal_dev *dev, const u8 *uid, const u8 *method)
+{
+ int err = 0;
+
+ clear_opal_cmd(dev);
+ set_comid(dev, dev->comid);
+
+ add_token_u8(&err, dev, OPAL_CALL);
+ add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
+ add_token_bytestring(&err, dev, method, OPAL_METHOD_LENGTH);
+
+ /*
+ * Every method call is followed by its parameters enclosed within
+ * OPAL_STARTLIST and OPAL_ENDLIST tokens. We automatically open the
+ * parameter list here and close it later in cmd_finalize.
+ */
+ add_token_u8(&err, dev, OPAL_STARTLIST);
+
+ return err;
+}
+
static int start_opal_session_cont(struct opal_dev *dev)
{
u32 hsn, tsn;
@@ -1050,24 +1096,47 @@ static int finalize_and_send(struct opal_dev *dev, cont_fn cont)
return opal_send_recv(dev, cont);
}
+/*
+ * request @column from table @table on device @dev. On success, the column
+ * data will be available in dev->resp->tok[4]
+ */
+static int generic_get_column(struct opal_dev *dev, const u8 *table,
+ u64 column)
+{
+ int err;
+
+ err = cmd_start(dev, table, opalmethod[OPAL_GET]);
+
+ add_token_u8(&err, dev, OPAL_STARTLIST);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_STARTCOLUMN);
+ add_token_u64(&err, dev, column);
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_ENDCOLUMN);
+ add_token_u64(&err, dev, column);
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_ENDLIST);
+
+ if (err)
+ return err;
+
+ return finalize_and_send(dev, parse_and_check_status);
+}
+
static int gen_key(struct opal_dev *dev, void *data)
{
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len));
kfree(dev->prev_data);
dev->prev_data = NULL;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GENKEY],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+ err = cmd_start(dev, uid, opalmethod[OPAL_GENKEY]);
if (err) {
pr_debug("Error building gen key command\n");
@@ -1105,62 +1174,39 @@ static int get_active_key_cont(struct opal_dev *dev)
static int get_active_key(struct opal_dev *dev, void *data)
{
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
+ int err;
u8 *lr = data;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
err = build_locking_range(uid, sizeof(uid), *lr);
if (err)
return err;
- err = 0;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* startCloumn */
- add_token_u8(&err, dev, 10); /* ActiveKey */
- add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* endColumn */
- add_token_u8(&err, dev, 10); /* ActiveKey */
- add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
- if (err) {
- pr_debug("Error building get active key command\n");
+ err = generic_get_column(dev, uid, OPAL_ACTIVEKEY);
+ if (err)
return err;
- }
- return finalize_and_send(dev, get_active_key_cont);
+ return get_active_key_cont(dev);
}
static int generic_lr_enable_disable(struct opal_dev *dev,
u8 *uid, bool rle, bool wle,
bool rl, bool wl)
{
- int err = 0;
+ int err;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 5); /* ReadLockEnabled */
+ add_token_u8(&err, dev, OPAL_READLOCKENABLED);
add_token_u8(&err, dev, rle);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 6); /* WriteLockEnabled */
+ add_token_u8(&err, dev, OPAL_WRITELOCKENABLED);
add_token_u8(&err, dev, wle);
add_token_u8(&err, dev, OPAL_ENDNAME);
@@ -1176,7 +1222,6 @@ static int generic_lr_enable_disable(struct opal_dev *dev,
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
return err;
}
@@ -1197,10 +1242,7 @@ static int setup_locking_range(struct opal_dev *dev, void *data)
u8 uid[OPAL_UID_LENGTH];
struct opal_user_lr_setup *setup = data;
u8 lr;
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
lr = setup->session.opal_key.lr;
err = build_locking_range(uid, sizeof(uid), lr);
@@ -1210,40 +1252,34 @@ static int setup_locking_range(struct opal_dev *dev, void *data)
if (lr == 0)
err = enable_global_lr(dev, uid, setup);
else {
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET],
- OPAL_UID_LENGTH);
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* Ranges Start */
+ add_token_u8(&err, dev, OPAL_RANGESTART);
add_token_u64(&err, dev, setup->range_start);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* Ranges length */
+ add_token_u8(&err, dev, OPAL_RANGELENGTH);
add_token_u64(&err, dev, setup->range_length);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 5); /*ReadLockEnabled */
+ add_token_u8(&err, dev, OPAL_READLOCKENABLED);
add_token_u64(&err, dev, !!setup->RLE);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 6); /*WriteLockEnabled*/
+ add_token_u8(&err, dev, OPAL_WRITELOCKENABLED);
add_token_u64(&err, dev, !!setup->WLE);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
}
if (err) {
pr_debug("Error building Setup Locking range command.\n");
@@ -1261,29 +1297,21 @@ static int start_generic_opal_session(struct opal_dev *dev,
u8 key_len)
{
u32 hsn;
- int err = 0;
+ int err;
if (key == NULL && auth != OPAL_ANYBODY_UID)
return OPAL_INVAL_PARAM;
- clear_opal_cmd(dev);
-
- set_comid(dev, dev->comid);
hsn = GENERIC_HOST_SESSION_NUM;
+ err = cmd_start(dev, opaluid[OPAL_SMUID_UID],
+ opalmethod[OPAL_STARTSESSION]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_SMUID_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_STARTSESSION],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u64(&err, dev, hsn);
add_token_bytestring(&err, dev, opaluid[sp_type], OPAL_UID_LENGTH);
add_token_u8(&err, dev, 1);
switch (auth) {
case OPAL_ANYBODY_UID:
- add_token_u8(&err, dev, OPAL_ENDLIST);
break;
case OPAL_ADMIN1_UID:
case OPAL_SID_UID:
@@ -1296,7 +1324,6 @@ static int start_generic_opal_session(struct opal_dev *dev,
add_token_bytestring(&err, dev, opaluid[auth],
OPAL_UID_LENGTH);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
break;
default:
pr_debug("Cannot start Admin SP session with auth %d\n", auth);
@@ -1324,6 +1351,7 @@ static int start_SIDASP_opal_session(struct opal_dev *dev, void *data)
if (!key) {
const struct opal_key *okey = data;
+
ret = start_generic_opal_session(dev, OPAL_SID_UID,
OPAL_ADMINSP_UID,
okey->key,
@@ -1341,6 +1369,7 @@ static int start_SIDASP_opal_session(struct opal_dev *dev, void *data)
static int start_admin1LSP_opal_session(struct opal_dev *dev, void *data)
{
struct opal_key *key = data;
+
return start_generic_opal_session(dev, OPAL_ADMIN1_UID,
OPAL_LOCKINGSP_UID,
key->key, key->key_len);
@@ -1356,30 +1385,21 @@ static int start_auth_opal_session(struct opal_dev *dev, void *data)
u8 *key = session->opal_key.key;
u32 hsn = GENERIC_HOST_SESSION_NUM;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- if (session->sum) {
+ if (session->sum)
err = build_locking_user(lk_ul_user, sizeof(lk_ul_user),
session->opal_key.lr);
- if (err)
- return err;
-
- } else if (session->who != OPAL_ADMIN1 && !session->sum) {
+ else if (session->who != OPAL_ADMIN1 && !session->sum)
err = build_locking_user(lk_ul_user, sizeof(lk_ul_user),
session->who - 1);
- if (err)
- return err;
- } else
+ else
memcpy(lk_ul_user, opaluid[OPAL_ADMIN1_UID], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_SMUID_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_STARTSESSION],
- OPAL_UID_LENGTH);
+ if (err)
+ return err;
+
+ err = cmd_start(dev, opaluid[OPAL_SMUID_UID],
+ opalmethod[OPAL_STARTSESSION]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u64(&err, dev, hsn);
add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID],
OPAL_UID_LENGTH);
@@ -1392,7 +1412,6 @@ static int start_auth_opal_session(struct opal_dev *dev, void *data)
add_token_u8(&err, dev, 3);
add_token_bytestring(&err, dev, lk_ul_user, OPAL_UID_LENGTH);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building STARTSESSION command.\n");
@@ -1404,18 +1423,10 @@ static int start_auth_opal_session(struct opal_dev *dev, void *data)
static int revert_tper(struct opal_dev *dev, void *data)
{
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_ADMINSP_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_REVERT],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+ err = cmd_start(dev, opaluid[OPAL_ADMINSP_UID],
+ opalmethod[OPAL_REVERT]);
if (err) {
pr_debug("Error building REVERT TPER command.\n");
return err;
@@ -1428,18 +1439,12 @@ static int internal_activate_user(struct opal_dev *dev, void *data)
{
struct opal_session_info *session = data;
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
memcpy(uid, opaluid[OPAL_USER1_UID], OPAL_UID_LENGTH);
uid[7] = session->who;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
@@ -1449,7 +1454,6 @@ static int internal_activate_user(struct opal_dev *dev, void *data)
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building Activate UserN command.\n");
@@ -1463,20 +1467,12 @@ static int erase_locking_range(struct opal_dev *dev, void *data)
{
struct opal_session_info *session = data;
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
if (build_locking_range(uid, sizeof(uid), session->opal_key.lr) < 0)
return -ERANGE;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_ERASE],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+ err = cmd_start(dev, uid, opalmethod[OPAL_ERASE]);
if (err) {
pr_debug("Error building Erase Locking Range Command.\n");
@@ -1488,26 +1484,20 @@ static int erase_locking_range(struct opal_dev *dev, void *data)
static int set_mbr_done(struct opal_dev *dev, void *data)
{
u8 *mbr_done_tf = data;
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ err = cmd_start(dev, opaluid[OPAL_MBRCONTROL],
+ opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_MBRCONTROL],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 2); /* Done */
+ add_token_u8(&err, dev, OPAL_MBRDONE);
add_token_u8(&err, dev, *mbr_done_tf); /* Done T or F */
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error Building set MBR Done command\n");
@@ -1520,26 +1510,20 @@ static int set_mbr_done(struct opal_dev *dev, void *data)
static int set_mbr_enable_disable(struct opal_dev *dev, void *data)
{
u8 *mbr_en_dis = data;
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ err = cmd_start(dev, opaluid[OPAL_MBRCONTROL],
+ opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_MBRCONTROL],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 1);
+ add_token_u8(&err, dev, OPAL_MBRENABLE);
add_token_u8(&err, dev, *mbr_en_dis);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error Building set MBR done command\n");
@@ -1552,26 +1536,19 @@ static int set_mbr_enable_disable(struct opal_dev *dev, void *data)
static int generic_pw_cmd(u8 *key, size_t key_len, u8 *cpin_uid,
struct opal_dev *dev)
{
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ err = cmd_start(dev, cpin_uid, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, cpin_uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* PIN */
+ add_token_u8(&err, dev, OPAL_PIN);
add_token_bytestring(&err, dev, key, key_len);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
return err;
}
@@ -1619,10 +1596,7 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
u8 lr_buffer[OPAL_UID_LENGTH];
u8 user_uid[OPAL_UID_LENGTH];
struct opal_lock_unlock *lkul = data;
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
memcpy(lr_buffer, opaluid[OPAL_LOCKINGRANGE_ACE_RDLOCKED],
OPAL_UID_LENGTH);
@@ -1637,12 +1611,8 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
user_uid[7] = lkul->session.who;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, lr_buffer, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET],
- OPAL_UID_LENGTH);
+ err = cmd_start(dev, lr_buffer, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
@@ -1680,7 +1650,6 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building add user to locking range command.\n");
@@ -1697,9 +1666,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
u8 read_locked = 1, write_locked = 1;
int err = 0;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
if (build_locking_range(lr_buffer, sizeof(lr_buffer),
lkul->session.opal_key.lr) < 0)
return -ERANGE;
@@ -1714,17 +1680,15 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
write_locked = 0;
break;
case OPAL_LK:
- /* vars are initalized to locked */
+ /* vars are initialized to locked */
break;
default:
pr_debug("Tried to set an invalid locking state... returning to uland\n");
return OPAL_INVAL_PARAM;
}
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, lr_buffer, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
+ err = cmd_start(dev, lr_buffer, opalmethod[OPAL_SET]);
+
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
@@ -1741,7 +1705,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building SET command.\n");
@@ -1775,7 +1738,7 @@ static int lock_unlock_locking_range_sum(struct opal_dev *dev, void *data)
write_locked = 0;
break;
case OPAL_LK:
- /* vars are initalized to locked */
+ /* vars are initialized to locked */
break;
default:
pr_debug("Tried to set an invalid locking state.\n");
@@ -1796,17 +1759,10 @@ static int activate_lsp(struct opal_dev *dev, void *data)
struct opal_lr_act *opal_act = data;
u8 user_lr[OPAL_UID_LENGTH];
u8 uint_3 = 0x83;
- int err = 0, i;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_ACTIVATE],
- OPAL_UID_LENGTH);
+ int err, i;
+ err = cmd_start(dev, opaluid[OPAL_LOCKINGSP_UID],
+ opalmethod[OPAL_ACTIVATE]);
if (opal_act->sum) {
err = build_locking_range(user_lr, sizeof(user_lr),
@@ -1814,7 +1770,6 @@ static int activate_lsp(struct opal_dev *dev, void *data)
if (err)
return err;
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, uint_3);
add_token_u8(&err, dev, 6);
@@ -1829,11 +1784,6 @@ static int activate_lsp(struct opal_dev *dev, void *data)
}
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
- } else {
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
}
if (err) {
@@ -1844,17 +1794,19 @@ static int activate_lsp(struct opal_dev *dev, void *data)
return finalize_and_send(dev, parse_and_check_status);
}
-static int get_lsp_lifecycle_cont(struct opal_dev *dev)
+/* Determine if we're in the Manufactured Inactive or Active state */
+static int get_lsp_lifecycle(struct opal_dev *dev, void *data)
{
u8 lc_status;
- int error = 0;
+ int err;
- error = parse_and_check_status(dev);
- if (error)
- return error;
+ err = generic_get_column(dev, opaluid[OPAL_LOCKINGSP_UID],
+ OPAL_LIFECYCLE);
+ if (err)
+ return err;
lc_status = response_get_u64(&dev->parsed, 4);
- /* 0x08 is Manufacured Inactive */
+ /* 0x08 is Manufactured Inactive */
/* 0x09 is Manufactured */
if (lc_status != OPAL_MANUFACTURED_INACTIVE) {
pr_debug("Couldn't determine the status of the Lifecycle state\n");
@@ -1864,56 +1816,19 @@ static int get_lsp_lifecycle_cont(struct opal_dev *dev)
return 0;
}
-/* Determine if we're in the Manufactured Inactive or Active state */
-static int get_lsp_lifecycle(struct opal_dev *dev, void *data)
-{
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GET], OPAL_UID_LENGTH);
-
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTLIST);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* Start Column */
- add_token_u8(&err, dev, 6); /* Lifecycle Column */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* End Column */
- add_token_u8(&err, dev, 6); /* Lifecycle Column */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_ENDLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
- if (err) {
- pr_debug("Error Building GET Lifecycle Status command\n");
- return err;
- }
-
- return finalize_and_send(dev, get_lsp_lifecycle_cont);
-}
-
-static int get_msid_cpin_pin_cont(struct opal_dev *dev)
+static int get_msid_cpin_pin(struct opal_dev *dev, void *data)
{
const char *msid_pin;
size_t strlen;
- int error = 0;
+ int err;
- error = parse_and_check_status(dev);
- if (error)
- return error;
+ err = generic_get_column(dev, opaluid[OPAL_C_PIN_MSID], OPAL_PIN);
+ if (err)
+ return err;
strlen = response_get_string(&dev->parsed, 4, &msid_pin);
if (!msid_pin) {
- pr_debug("%s: Couldn't extract PIN from response\n", __func__);
+ pr_debug("Couldn't extract MSID_CPIN from response\n");
return OPAL_INVAL_PARAM;
}
@@ -1926,42 +1841,6 @@ static int get_msid_cpin_pin_cont(struct opal_dev *dev)
return 0;
}
-static int get_msid_cpin_pin(struct opal_dev *dev, void *data)
-{
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_C_PIN_MSID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GET], OPAL_UID_LENGTH);
-
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTLIST);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* Start Column */
- add_token_u8(&err, dev, 3); /* PIN */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* End Column */
- add_token_u8(&err, dev, 3); /* Lifecycle Column */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_ENDLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
- if (err) {
- pr_debug("Error building Get MSID CPIN PIN command.\n");
- return err;
- }
-
- return finalize_and_send(dev, get_msid_cpin_pin_cont);
-}
-
static int end_opal_session(struct opal_dev *dev, void *data)
{
int err = 0;
@@ -1977,18 +1856,14 @@ static int end_opal_session(struct opal_dev *dev, void *data)
static int end_opal_session_error(struct opal_dev *dev)
{
- const struct opal_step error_end_session[] = {
- { end_opal_session, },
- { NULL, }
+ const struct opal_step error_end_session = {
+ end_opal_session,
};
- dev->steps = error_end_session;
- return next(dev);
+ return execute_step(dev, &error_end_session, 0);
}
-static inline void setup_opal_dev(struct opal_dev *dev,
- const struct opal_step *steps)
+static inline void setup_opal_dev(struct opal_dev *dev)
{
- dev->steps = steps;
dev->tsn = 0;
dev->hsn = 0;
dev->prev_data = NULL;
@@ -1996,15 +1871,11 @@ static inline void setup_opal_dev(struct opal_dev *dev,
static int check_opal_support(struct opal_dev *dev)
{
- const struct opal_step steps[] = {
- { opal_discovery0, },
- { NULL, }
- };
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = opal_discovery0_step(dev);
dev->supported = !ret;
mutex_unlock(&dev->dev_lock);
return ret;
@@ -2057,18 +1928,16 @@ static int opal_secure_erase_locking_range(struct opal_dev *dev,
struct opal_session_info *opal_session)
{
const struct opal_step erase_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, opal_session },
{ get_active_key, &opal_session->opal_key.lr },
{ gen_key, },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, erase_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, erase_steps, ARRAY_SIZE(erase_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2077,17 +1946,15 @@ static int opal_erase_locking_range(struct opal_dev *dev,
struct opal_session_info *opal_session)
{
const struct opal_step erase_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, opal_session },
{ erase_locking_range, opal_session },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, erase_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, erase_steps, ARRAY_SIZE(erase_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2095,15 +1962,16 @@ static int opal_erase_locking_range(struct opal_dev *dev,
static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
struct opal_mbr_data *opal_mbr)
{
+ u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ?
+ OPAL_TRUE : OPAL_FALSE;
+
const struct opal_step mbr_steps[] = {
- { opal_discovery0, },
{ start_admin1LSP_opal_session, &opal_mbr->key },
- { set_mbr_done, &opal_mbr->enable_disable },
+ { set_mbr_done, &enable_disable },
{ end_opal_session, },
{ start_admin1LSP_opal_session, &opal_mbr->key },
- { set_mbr_enable_disable, &opal_mbr->enable_disable },
- { end_opal_session, },
- { NULL, }
+ { set_mbr_enable_disable, &enable_disable },
+ { end_opal_session, }
};
int ret;
@@ -2112,8 +1980,8 @@ static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
return -EINVAL;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, mbr_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, mbr_steps, ARRAY_SIZE(mbr_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2130,7 +1998,7 @@ static int opal_save(struct opal_dev *dev, struct opal_lock_unlock *lk_unlk)
suspend->lr = lk_unlk->session.opal_key.lr;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, NULL);
+ setup_opal_dev(dev);
add_suspend_info(dev, suspend);
mutex_unlock(&dev->dev_lock);
return 0;
@@ -2140,11 +2008,9 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
struct opal_lock_unlock *lk_unlk)
{
const struct opal_step steps[] = {
- { opal_discovery0, },
{ start_admin1LSP_opal_session, &lk_unlk->session.opal_key },
{ add_user_to_lr, lk_unlk },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2166,8 +2032,8 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
}
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, steps, ARRAY_SIZE(steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2175,16 +2041,14 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
static int opal_reverttper(struct opal_dev *dev, struct opal_key *opal)
{
const struct opal_step revert_steps[] = {
- { opal_discovery0, },
{ start_SIDASP_opal_session, opal },
- { revert_tper, }, /* controller will terminate session */
- { NULL, }
+ { revert_tper, } /* controller will terminate session */
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, revert_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, revert_steps, ARRAY_SIZE(revert_steps));
mutex_unlock(&dev->dev_lock);
/*
@@ -2201,37 +2065,34 @@ static int __opal_lock_unlock(struct opal_dev *dev,
struct opal_lock_unlock *lk_unlk)
{
const struct opal_step unlock_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &lk_unlk->session },
{ lock_unlock_locking_range, lk_unlk },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
const struct opal_step unlock_sum_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &lk_unlk->session },
{ lock_unlock_locking_range_sum, lk_unlk },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
- dev->steps = lk_unlk->session.sum ? unlock_sum_steps : unlock_steps;
- return next(dev);
+ if (lk_unlk->session.sum)
+ return execute_steps(dev, unlock_sum_steps,
+ ARRAY_SIZE(unlock_sum_steps));
+ else
+ return execute_steps(dev, unlock_steps,
+ ARRAY_SIZE(unlock_steps));
}
static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key)
{
- u8 mbr_done_tf = 1;
- const struct opal_step mbrdone_step [] = {
- { opal_discovery0, },
+ u8 mbr_done_tf = OPAL_TRUE;
+ const struct opal_step mbrdone_step[] = {
{ start_admin1LSP_opal_session, key },
{ set_mbr_done, &mbr_done_tf },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
- dev->steps = mbrdone_step;
- return next(dev);
+ return execute_steps(dev, mbrdone_step, ARRAY_SIZE(mbrdone_step));
}
static int opal_lock_unlock(struct opal_dev *dev,
@@ -2252,14 +2113,12 @@ static int opal_lock_unlock(struct opal_dev *dev,
static int opal_take_ownership(struct opal_dev *dev, struct opal_key *opal)
{
const struct opal_step owner_steps[] = {
- { opal_discovery0, },
{ start_anybodyASP_opal_session, },
{ get_msid_cpin_pin, },
{ end_opal_session, },
{ start_SIDASP_opal_session, opal },
{ set_sid_cpin_pin, opal },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2267,21 +2126,20 @@ static int opal_take_ownership(struct opal_dev *dev, struct opal_key *opal)
return -ENODEV;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, owner_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, owner_steps, ARRAY_SIZE(owner_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
-static int opal_activate_lsp(struct opal_dev *dev, struct opal_lr_act *opal_lr_act)
+static int opal_activate_lsp(struct opal_dev *dev,
+ struct opal_lr_act *opal_lr_act)
{
const struct opal_step active_steps[] = {
- { opal_discovery0, },
{ start_SIDASP_opal_session, &opal_lr_act->key },
{ get_lsp_lifecycle, },
{ activate_lsp, opal_lr_act },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2289,8 +2147,8 @@ static int opal_activate_lsp(struct opal_dev *dev, struct opal_lr_act *opal_lr_a
return -EINVAL;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, active_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, active_steps, ARRAY_SIZE(active_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2299,17 +2157,15 @@ static int opal_setup_locking_range(struct opal_dev *dev,
struct opal_user_lr_setup *opal_lrs)
{
const struct opal_step lr_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &opal_lrs->session },
{ setup_locking_range, opal_lrs },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, lr_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, lr_steps, ARRAY_SIZE(lr_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2317,11 +2173,9 @@ static int opal_setup_locking_range(struct opal_dev *dev,
static int opal_set_new_pw(struct opal_dev *dev, struct opal_new_pw *opal_pw)
{
const struct opal_step pw_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &opal_pw->session },
{ set_new_pw, &opal_pw->new_user_pw },
- { end_opal_session, },
- { NULL }
+ { end_opal_session, }
};
int ret;
@@ -2332,8 +2186,8 @@ static int opal_set_new_pw(struct opal_dev *dev, struct opal_new_pw *opal_pw)
return -EINVAL;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, pw_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, pw_steps, ARRAY_SIZE(pw_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2342,11 +2196,9 @@ static int opal_activate_user(struct opal_dev *dev,
struct opal_session_info *opal_session)
{
const struct opal_step act_steps[] = {
- { opal_discovery0, },
{ start_admin1LSP_opal_session, &opal_session->opal_key },
{ internal_activate_user, opal_session },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2358,8 +2210,8 @@ static int opal_activate_user(struct opal_dev *dev,
}
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, act_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, act_steps, ARRAY_SIZE(act_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2376,7 +2228,7 @@ bool opal_unlock_from_suspend(struct opal_dev *dev)
return false;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, NULL);
+ setup_opal_dev(dev);
list_for_each_entry(suspend, &dev->unlk_lst, node) {
dev->tsn = 0;
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 0903e0803ec8..92b930cb3b72 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1829,6 +1829,7 @@ static int __init fd_probe_drives(void)
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive;
disk->fops = &floppy_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
sprintf(disk->disk_name, "fd%d", drive);
disk->private_data = &unit[drive];
set_capacity(disk, 880*2);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index b0dbbdfeb33e..c7b5c4671f05 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -2028,6 +2028,7 @@ static int __init atari_floppy_init (void)
unit[i].disk->first_minor = i;
sprintf(unit[i].disk->disk_name, "fd%d", i);
unit[i].disk->fops = &floppy_fops;
+ unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
unit[i].disk->private_data = &unit[i];
set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
add_disk(unit[i].disk);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 000a2f4c0e92..acd7af3630e9 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1317,10 +1317,6 @@ struct bm_extent {
#define DRBD_MAX_SECTORS_FIXED_BM \
((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
-#if !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
-#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
-#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
-#else
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
/* 16 TB in units of sectors */
#if BITS_PER_LONG == 32
@@ -1333,7 +1329,6 @@ struct bm_extent {
#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
/* corresponds to (1UL << 38) bits right now. */
#endif
-#endif
/* Estimate max bio size as 256 * PAGE_SIZE,
* so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 95f608d1a098..8072bd9881e6 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4540,6 +4540,7 @@ static int __init do_floppy_init(void)
disks[drive]->major = FLOPPY_MAJOR;
disks[drive]->first_minor = TOMINOR(drive);
disks[drive]->fops = &floppy_fops;
+ disks[drive]->events = DISK_EVENT_MEDIA_CHANGE;
sprintf(disks[drive]->disk_name, "fd%d", drive);
timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bf1c61cab8eb..102d79575895 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -264,12 +264,20 @@ lo_do_transfer(struct loop_device *lo, int cmd,
return ret;
}
+static inline void loop_iov_iter_bvec(struct iov_iter *i,
+ unsigned int direction, const struct bio_vec *bvec,
+ unsigned long nr_segs, size_t count)
+{
+ iov_iter_bvec(i, direction, bvec, nr_segs, count);
+ i->type |= ITER_BVEC_FLAG_NO_REF;
+}
+
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
struct iov_iter i;
ssize_t bw;
- iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
+ loop_iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
file_start_write(file);
bw = vfs_iter_write(file, &i, ppos, 0);
@@ -347,7 +355,7 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
ssize_t len;
rq_for_each_segment(bvec, rq, iter) {
- iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
+ loop_iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0)
return len;
@@ -388,7 +396,7 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq,
b.bv_offset = 0;
b.bv_len = bvec.bv_len;
- iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
+ loop_iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0) {
ret = len;
@@ -555,7 +563,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
}
atomic_set(&cmd->ref, 2);
- iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
+ loop_iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
iter.iov_offset = offset;
cmd->iocb.ki_pos = pos;
@@ -900,6 +908,24 @@ static int loop_prepare_queue(struct loop_device *lo)
return 0;
}
+static void loop_update_rotational(struct loop_device *lo)
+{
+ struct file *file = lo->lo_backing_file;
+ struct inode *file_inode = file->f_mapping->host;
+ struct block_device *file_bdev = file_inode->i_sb->s_bdev;
+ struct request_queue *q = lo->lo_queue;
+ bool nonrot = true;
+
+ /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
+ if (file_bdev)
+ nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev));
+
+ if (nonrot)
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+}
+
static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct block_device *bdev, unsigned int arg)
{
@@ -963,6 +989,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_write_cache(lo->lo_queue, true, false);
+ loop_update_rotational(lo);
loop_update_dio(lo);
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 6d415b20fb70..001dbdcbf355 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -343,6 +343,7 @@ static void pcd_init_units(void)
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
}
}
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 0ff9b12d0e35..6f9ad3fc716f 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -897,6 +897,7 @@ static void pd_probe_drive(struct pd_unit *disk)
p->fops = &pd_fops;
p->major = major;
p->first_minor = (disk - pd) << PD_BITS;
+ p->events = DISK_EVENT_MEDIA_CHANGE;
disk->gd = p;
p->private_data = disk;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 35e6e271b219..1e9c50a7256c 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -319,6 +319,7 @@ static void __init pf_init_units(void)
disk->first_minor = unit;
strcpy(disk->disk_name, pf->name);
disk->fops = &pf_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
if (!(*drives[unit])[D_PRT])
pf_drive_count++;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index f5a71023f76c..024060165afa 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2761,7 +2761,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
- disk->async_events = pd->bdev->bd_disk->async_events;
add_disk(disk);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 4e1d9b31f60c..cc61c5ce3ad5 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -102,7 +102,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
rq_for_each_segment(bvec, req, iter) {
unsigned long flags;
- dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
+ dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %llu\n",
__func__, __LINE__, i, bio_sectors(iter.bio),
iter.bio->bi_iter.bi_sector);
@@ -496,7 +496,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
dev->regions[dev->region_idx].size*priv->blocking_factor);
dev_info(&dev->sbd.core,
- "%s is a %s (%llu MiB total, %lu MiB for OtherOS)\n",
+ "%s is a %s (%llu MiB total, %llu MiB for OtherOS)\n",
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
get_capacity(gendisk) >> 11);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 3fa6fcc34790..67b5ec281c6d 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -862,6 +862,7 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].disk->first_minor = drive;
sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
swd->unit[drive].disk->fops = &floppy_fops;
+ swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
swd->unit[drive].disk->private_data = &swd->unit[drive];
set_capacity(swd->unit[drive].disk, 2880);
add_disk(swd->unit[drive].disk);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 1e2ae90d7715..cf42729c788e 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1216,6 +1216,7 @@ static int swim3_attach(struct macio_dev *mdev,
disk->first_minor = floppy_count;
disk->fops = &floppy_fops;
disk->private_data = fs;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 2a7ca4a1e6f7..f1d90cd3dc47 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -693,7 +693,8 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
- return blk_mq_virtio_map_queues(&set->map[0], vblk->vdev, 0);
+ return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
+ vblk->vdev, 0);
}
#ifdef CONFIG_VIRTIO_BLK_SCSI
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 32a21b8d1d85..464c9092bc8b 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1032,6 +1032,7 @@ static int ace_setup(struct ace_device *ace)
ace->gd->major = ace_major;
ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
ace->gd->fops = &ace_fops;
+ ace->gd->events = DISK_EVENT_MEDIA_CHANGE;
ace->gd->queue = ace->queue;
ace->gd->private_data = ace;
snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index f8b7345fe1cb..5cf3bade0d57 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -786,6 +786,7 @@ static int probe_gdrom(struct platform_device *devptr)
goto probe_fail_cdrom_register;
}
gd.disk->fops = &gdrom_bdops;
+ gd.disk->events = DISK_EVENT_MEDIA_CHANGE;
/* latch on to the interrupt */
err = gdrom_set_interrupt_handlers();
if (err)
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 1f03884a6808..3b15adc6ce98 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1797,6 +1797,7 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ g->events = DISK_EVENT_MEDIA_CHANGE;
device_add_disk(&drive->gendev, g, NULL);
return 0;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 4a6e1a413ead..46f2df288c6a 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -82,8 +82,9 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
/*
* ide-cd always generates media changed event if media is missing, which
- * makes it impossible to use for proper event reporting, so disk->events
- * is cleared to 0 and the following function is used only to trigger
+ * makes it impossible to use for proper event reporting, so
+ * DISK_EVENT_FLAG_UEVENT is cleared in disk->event_flags
+ * and the following function is used only to trigger
* revalidation and never propagated to userland.
*/
unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 04e008e8f6f9..f233b34ea0c0 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -299,8 +299,9 @@ static unsigned int ide_gd_check_events(struct gendisk *disk,
/*
* The following is used to force revalidation on the first open on
* removeable devices, and never gets reported to userland as
- * genhd->events is 0. This is intended as removeable ide disk
- * can't really detect MEDIA_CHANGE events.
+ * DISK_EVENT_FLAG_UEVENT isn't set in genhd->event_flags.
+ * This is intended as removable ide disk can't really detect
+ * MEDIA_CHANGE events.
*/
ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
@@ -416,6 +417,7 @@ static int ide_gd_probe(ide_drive_t *drive)
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
+ g->events = DISK_EVENT_MEDIA_CHANGE;
device_add_disk(&drive->gendev, g, NULL);
return 0;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 12b5216c2cfe..721efc493942 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -135,9 +135,8 @@ struct dm_dev *dm_snap_cow(struct dm_snapshot *snap);
/*
* Funtions to manipulate consecutive chunks
*/
-# if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64)
-# define DM_CHUNK_CONSECUTIVE_BITS 8
-# define DM_CHUNK_NUMBER_BITS 56
+#define DM_CHUNK_CONSECUTIVE_BITS 8
+#define DM_CHUNK_NUMBER_BITS 56
static inline chunk_t dm_chunk_number(chunk_t chunk)
{
@@ -163,29 +162,6 @@ static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
e->new_chunk -= (1ULL << DM_CHUNK_NUMBER_BITS);
}
-# else
-# define DM_CHUNK_CONSECUTIVE_BITS 0
-
-static inline chunk_t dm_chunk_number(chunk_t chunk)
-{
- return chunk;
-}
-
-static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
-{
- return 0;
-}
-
-static inline void dm_consecutive_chunk_count_inc(struct dm_exception *e)
-{
-}
-
-static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
-{
-}
-
-# endif
-
/*
* Return the number of sectors in the device.
*/
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 7c678f50aaa3..05075613af93 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -88,14 +88,10 @@ struct journal_entry {
#if BITS_PER_LONG == 64
#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
-#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
-#elif defined(CONFIG_LBDAF)
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
-#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#else
-#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
-#define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
+#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
#endif
+#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
#define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 1cd4f991792c..3a62a46b75c7 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -490,10 +490,10 @@ void md_bitmap_print_sb(struct bitmap *bitmap)
pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic));
pr_debug(" version: %d\n", le32_to_cpu(sb->version));
pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
- le32_to_cpu(*(__u32 *)(sb->uuid+0)),
- le32_to_cpu(*(__u32 *)(sb->uuid+4)),
- le32_to_cpu(*(__u32 *)(sb->uuid+8)),
- le32_to_cpu(*(__u32 *)(sb->uuid+12)));
+ le32_to_cpu(*(__le32 *)(sb->uuid+0)),
+ le32_to_cpu(*(__le32 *)(sb->uuid+4)),
+ le32_to_cpu(*(__le32 *)(sb->uuid+8)),
+ le32_to_cpu(*(__le32 *)(sb->uuid+12)));
pr_debug(" events: %llu\n",
(unsigned long long) le64_to_cpu(sb->events));
pr_debug("events cleared: %llu\n",
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 05ffffb8b769..541015373f6a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -88,8 +88,7 @@ static struct kobj_type md_ktype;
struct md_cluster_operations *md_cluster_ops;
EXPORT_SYMBOL(md_cluster_ops);
-struct module *md_cluster_mod;
-EXPORT_SYMBOL(md_cluster_mod);
+static struct module *md_cluster_mod;
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
static struct workqueue_struct *md_wq;
@@ -132,24 +131,6 @@ static inline int speed_max(struct mddev *mddev)
mddev->sync_speed_max : sysctl_speed_limit_max;
}
-static void * flush_info_alloc(gfp_t gfp_flags, void *data)
-{
- return kzalloc(sizeof(struct flush_info), gfp_flags);
-}
-static void flush_info_free(void *flush_info, void *data)
-{
- kfree(flush_info);
-}
-
-static void * flush_bio_alloc(gfp_t gfp_flags, void *data)
-{
- return kzalloc(sizeof(struct flush_bio), gfp_flags);
-}
-static void flush_bio_free(void *flush_bio, void *data)
-{
- kfree(flush_bio);
-}
-
static struct ctl_table_header *raid_table_header;
static struct ctl_table raid_table[] = {
@@ -423,54 +404,31 @@ static int md_congested(void *data, int bits)
/*
* Generic flush handling for md
*/
-static void submit_flushes(struct work_struct *ws)
-{
- struct flush_info *fi = container_of(ws, struct flush_info, flush_work);
- struct mddev *mddev = fi->mddev;
- struct bio *bio = fi->bio;
-
- bio->bi_opf &= ~REQ_PREFLUSH;
- md_handle_request(mddev, bio);
-
- mempool_free(fi, mddev->flush_pool);
-}
-static void md_end_flush(struct bio *fbio)
+static void md_end_flush(struct bio *bio)
{
- struct flush_bio *fb = fbio->bi_private;
- struct md_rdev *rdev = fb->rdev;
- struct flush_info *fi = fb->fi;
- struct bio *bio = fi->bio;
- struct mddev *mddev = fi->mddev;
+ struct md_rdev *rdev = bio->bi_private;
+ struct mddev *mddev = rdev->mddev;
rdev_dec_pending(rdev, mddev);
- if (atomic_dec_and_test(&fi->flush_pending)) {
- if (bio->bi_iter.bi_size == 0) {
- /* an empty barrier - all done */
- bio_endio(bio);
- mempool_free(fi, mddev->flush_pool);
- } else {
- INIT_WORK(&fi->flush_work, submit_flushes);
- queue_work(md_wq, &fi->flush_work);
- }
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ /* The pre-request flush has finished */
+ queue_work(md_wq, &mddev->flush_work);
}
-
- mempool_free(fb, mddev->flush_bio_pool);
- bio_put(fbio);
+ bio_put(bio);
}
-void md_flush_request(struct mddev *mddev, struct bio *bio)
+static void md_submit_flush_data(struct work_struct *ws);
+
+static void submit_flushes(struct work_struct *ws)
{
+ struct mddev *mddev = container_of(ws, struct mddev, flush_work);
struct md_rdev *rdev;
- struct flush_info *fi;
-
- fi = mempool_alloc(mddev->flush_pool, GFP_NOIO);
-
- fi->bio = bio;
- fi->mddev = mddev;
- atomic_set(&fi->flush_pending, 1);
+ mddev->start_flush = ktime_get_boottime();
+ INIT_WORK(&mddev->flush_work, md_submit_flush_data);
+ atomic_set(&mddev->flush_pending, 1);
rcu_read_lock();
rdev_for_each_rcu(rdev, mddev)
if (rdev->raid_disk >= 0 &&
@@ -480,37 +438,74 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
* we reclaim rcu_read_lock
*/
struct bio *bi;
- struct flush_bio *fb;
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
-
- fb = mempool_alloc(mddev->flush_bio_pool, GFP_NOIO);
- fb->fi = fi;
- fb->rdev = rdev;
-
bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
- bio_set_dev(bi, rdev->bdev);
bi->bi_end_io = md_end_flush;
- bi->bi_private = fb;
+ bi->bi_private = rdev;
+ bio_set_dev(bi, rdev->bdev);
bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
-
- atomic_inc(&fi->flush_pending);
+ atomic_inc(&mddev->flush_pending);
submit_bio(bi);
-
rcu_read_lock();
rdev_dec_pending(rdev, mddev);
}
rcu_read_unlock();
+ if (atomic_dec_and_test(&mddev->flush_pending))
+ queue_work(md_wq, &mddev->flush_work);
+}
+
+static void md_submit_flush_data(struct work_struct *ws)
+{
+ struct mddev *mddev = container_of(ws, struct mddev, flush_work);
+ struct bio *bio = mddev->flush_bio;
+
+ /*
+ * must reset flush_bio before calling into md_handle_request to avoid a
+ * deadlock, because other bios passed md_handle_request suspend check
+ * could wait for this and below md_handle_request could wait for those
+ * bios because of suspend check
+ */
+ mddev->last_flush = mddev->start_flush;
+ mddev->flush_bio = NULL;
+ wake_up(&mddev->sb_wait);
+
+ if (bio->bi_iter.bi_size == 0) {
+ /* an empty barrier - all done */
+ bio_endio(bio);
+ } else {
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ md_handle_request(mddev, bio);
+ }
+}
- if (atomic_dec_and_test(&fi->flush_pending)) {
- if (bio->bi_iter.bi_size == 0) {
+void md_flush_request(struct mddev *mddev, struct bio *bio)
+{
+ ktime_t start = ktime_get_boottime();
+ spin_lock_irq(&mddev->lock);
+ wait_event_lock_irq(mddev->sb_wait,
+ !mddev->flush_bio ||
+ ktime_after(mddev->last_flush, start),
+ mddev->lock);
+ if (!ktime_after(mddev->last_flush, start)) {
+ WARN_ON(mddev->flush_bio);
+ mddev->flush_bio = bio;
+ bio = NULL;
+ }
+ spin_unlock_irq(&mddev->lock);
+
+ if (!bio) {
+ INIT_WORK(&mddev->flush_work, submit_flushes);
+ queue_work(md_wq, &mddev->flush_work);
+ } else {
+ /* flush was performed for some other bio while we waited. */
+ if (bio->bi_iter.bi_size == 0)
/* an empty barrier - all done */
bio_endio(bio);
- mempool_free(fi, mddev->flush_pool);
- } else {
- INIT_WORK(&fi->flush_work, submit_flushes);
- queue_work(md_wq, &fi->flush_work);
+ else {
+ bio->bi_opf &= ~REQ_PREFLUSH;
+ mddev->pers->make_request(mddev, bio);
}
}
}
@@ -560,6 +555,7 @@ void mddev_init(struct mddev *mddev)
atomic_set(&mddev->openers, 0);
atomic_set(&mddev->active_io, 0);
spin_lock_init(&mddev->lock);
+ atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
init_waitqueue_head(&mddev->recovery_wait);
mddev->reshape_position = MaxSector;
@@ -1109,8 +1105,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
* (not needed for Linear and RAID0 as metadata doesn't
* record this size)
*/
- if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
- sb->level >= 1)
+ if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
rdev->sectors = (sector_t)(2ULL << 32) - 2;
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
@@ -1408,8 +1403,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
/* Limit to 4TB as metadata cannot record more than that.
* 4TB == 2^32 KB, or 2*2^32 sectors.
*/
- if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
- rdev->mddev->level >= 1)
+ if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
num_sectors = (sector_t)(2ULL << 32) - 2;
do {
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -1553,7 +1547,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
*/
s32 offset;
sector_t bb_sector;
- u64 *bbp;
+ __le64 *bbp;
int i;
int sectors = le16_to_cpu(sb->bblog_size);
if (sectors > (PAGE_SIZE / 512))
@@ -1565,7 +1559,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
if (!sync_page_io(rdev, bb_sector, sectors << 9,
rdev->bb_page, REQ_OP_READ, 0, true))
return -EIO;
- bbp = (u64 *)page_address(rdev->bb_page);
+ bbp = (__le64 *)page_address(rdev->bb_page);
rdev->badblocks.shift = sb->bblog_shift;
for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
u64 bb = le64_to_cpu(*bbp);
@@ -1877,7 +1871,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
md_error(mddev, rdev);
else {
struct badblocks *bb = &rdev->badblocks;
- u64 *bbp = (u64 *)page_address(rdev->bb_page);
+ __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
u64 *p = bb->page;
sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
if (bb->changed) {
@@ -2855,8 +2849,10 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
}
} else if (cmd_match(buf, "re-add")) {
- if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
- rdev->saved_raid_disk >= 0) {
+ if (!rdev->mddev->pers)
+ err = -EINVAL;
+ else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
+ rdev->saved_raid_disk >= 0) {
/* clear_bit is performed _after_ all the devices
* have their local Faulty bit cleared. If any writes
* happen in the meantime in the local node, they
@@ -5511,22 +5507,6 @@ int md_run(struct mddev *mddev)
if (err)
return err;
}
- if (mddev->flush_pool == NULL) {
- mddev->flush_pool = mempool_create(NR_FLUSH_INFOS, flush_info_alloc,
- flush_info_free, mddev);
- if (!mddev->flush_pool) {
- err = -ENOMEM;
- goto abort;
- }
- }
- if (mddev->flush_bio_pool == NULL) {
- mddev->flush_bio_pool = mempool_create(NR_FLUSH_BIOS, flush_bio_alloc,
- flush_bio_free, mddev);
- if (!mddev->flush_bio_pool) {
- err = -ENOMEM;
- goto abort;
- }
- }
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
@@ -5686,11 +5666,8 @@ int md_run(struct mddev *mddev)
return 0;
abort:
- mempool_destroy(mddev->flush_bio_pool);
- mddev->flush_bio_pool = NULL;
- mempool_destroy(mddev->flush_pool);
- mddev->flush_pool = NULL;
-
+ bioset_exit(&mddev->bio_set);
+ bioset_exit(&mddev->sync_set);
return err;
}
EXPORT_SYMBOL_GPL(md_run);
@@ -5894,14 +5871,6 @@ static void __md_stop(struct mddev *mddev)
mddev->to_remove = &md_redundancy_group;
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- if (mddev->flush_bio_pool) {
- mempool_destroy(mddev->flush_bio_pool);
- mddev->flush_bio_pool = NULL;
- }
- if (mddev->flush_pool) {
- mempool_destroy(mddev->flush_pool);
- mddev->flush_pool = NULL;
- }
}
void md_stop(struct mddev *mddev)
@@ -9257,7 +9226,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
* reshape is happening in the remote node, we need to
* update reshape_position and call start_reshape.
*/
- mddev->reshape_position = sb->reshape_position;
+ mddev->reshape_position = le64_to_cpu(sb->reshape_position);
if (mddev->pers->update_reshape_pos)
mddev->pers->update_reshape_pos(mddev);
if (mddev->pers->start_reshape)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index c52afb52c776..257cb4c9e22b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -252,19 +252,6 @@ enum mddev_sb_flags {
MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
};
-#define NR_FLUSH_INFOS 8
-#define NR_FLUSH_BIOS 64
-struct flush_info {
- struct bio *bio;
- struct mddev *mddev;
- struct work_struct flush_work;
- atomic_t flush_pending;
-};
-struct flush_bio {
- struct flush_info *fi;
- struct md_rdev *rdev;
-};
-
struct mddev {
void *private;
struct md_personality *pers;
@@ -470,8 +457,16 @@ struct mddev {
* metadata and bitmap writes
*/
- mempool_t *flush_pool;
- mempool_t *flush_bio_pool;
+ /* Generic flush handling.
+ * The last to finish preflush schedules a worker to submit
+ * the rest of the request (without the REQ_PREFLUSH flag).
+ */
+ struct bio *flush_bio;
+ atomic_t flush_pending;
+ ktime_t start_flush, last_flush; /* last_flush is when the last completed
+ * flush was started.
+ */
+ struct work_struct flush_work;
struct work_struct event_work; /* used by dm to report failure event */
void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
struct md_cluster_info *cluster_info;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c033bfcb209e..2b0a715e70c9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -711,6 +711,8 @@ static bool is_full_stripe_write(struct stripe_head *sh)
}
static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+ __acquires(&sh1->stripe_lock)
+ __acquires(&sh2->stripe_lock)
{
if (sh1 > sh2) {
spin_lock_irq(&sh2->stripe_lock);
@@ -722,6 +724,8 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
}
static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+ __releases(&sh1->stripe_lock)
+ __releases(&sh2->stripe_lock)
{
spin_unlock(&sh1->stripe_lock);
spin_unlock_irq(&sh2->stripe_lock);
@@ -4223,26 +4227,15 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
case check_state_check_result:
sh->check_state = check_state_idle;
+ if (s->failed > 1)
+ break;
/* handle a successful check operation, if parity is correct
* we are done. Otherwise update the mismatch count and repair
* parity if !MD_RECOVERY_CHECK
*/
if (sh->ops.zero_sum_result == 0) {
- /* both parities are correct */
- if (!s->failed)
- set_bit(STRIPE_INSYNC, &sh->state);
- else {
- /* in contrast to the raid5 case we can validate
- * parity, but still have a failure to write
- * back
- */
- sh->check_state = check_state_compute_result;
- /* Returning at this point means that we may go
- * off and bring p and/or q uptodate again so
- * we make sure to check zero_sum_result again
- * to verify if p or q need writeback
- */
- }
+ /* Any parity checked was correct */
+ set_bit(STRIPE_INSYNC, &sh->state);
} else {
atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches);
if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
@@ -6166,6 +6159,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
static int handle_active_stripes(struct r5conf *conf, int group,
struct r5worker *worker,
struct list_head *temp_inactive_list)
+ __releases(&conf->device_lock)
+ __acquires(&conf->device_lock)
{
struct stripe_head *batch[MAX_STRIPE_BATCH], *sh;
int i, batch_size = 0, hash;
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index d271bd731af7..01f40672507f 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -391,7 +391,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
bb_present = badblocks_check(&nd_region->bb, meta_start,
meta_num, &first_bad, &num_bad);
if (bb_present) {
- dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %lx\n",
+ dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
num_bad, first_bad);
nsoff = ALIGN_DOWN((nd_region->ndr_start
+ (first_bad << 9)) - nsio->res.start,
@@ -410,7 +410,7 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
}
if (rc) {
dev_err(&nd_pfn->dev,
- "error clearing %x badblocks at %lx\n",
+ "error clearing %x badblocks at %llx\n",
num_bad, first_bad);
return rc;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2c43e12b70af..248ff3b48041 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1105,7 +1105,7 @@ static struct nvme_id_ns *nvme_identify_ns(struct nvme_ctrl *ctrl,
error = nvme_submit_sync_cmd(ctrl->admin_q, &c, id, sizeof(*id));
if (error) {
- dev_warn(ctrl->device, "Identify namespace failed\n");
+ dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error);
kfree(id);
return NULL;
}
@@ -1588,7 +1588,7 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id)
{
- sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
+ sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9);
unsigned short bs = 1 << ns->lba_shift;
blk_mq_freeze_queue(disk->queue);
@@ -2549,7 +2549,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->crdt[2] = le16_to_cpu(id->crdt3);
ctrl->oacs = le16_to_cpu(id->oacs);
- ctrl->oncs = le16_to_cpup(&id->oncs);
+ ctrl->oncs = le16_to_cpu(id->oncs);
ctrl->oaes = le32_to_cpu(id->oaes);
atomic_set(&ctrl->abort_limit, id->acl + 1);
ctrl->vwc = id->vwc;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a90cf5d63aac..c1eecde6b853 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -177,7 +177,6 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
* commands and one for I/O commands).
*/
struct nvme_queue {
- struct device *q_dmadev;
struct nvme_dev *dev;
spinlock_t sq_lock;
struct nvme_command *sq_cmds;
@@ -189,7 +188,7 @@ struct nvme_queue {
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
u16 q_depth;
- s16 cq_vector;
+ u16 cq_vector;
u16 sq_tail;
u16 last_sq_tail;
u16 cq_head;
@@ -200,6 +199,7 @@ struct nvme_queue {
#define NVMEQ_ENABLED 0
#define NVMEQ_SQ_CMB 1
#define NVMEQ_DELETE_ERROR 2
+#define NVMEQ_POLLED 3
u32 *dbbuf_sq_db;
u32 *dbbuf_cq_db;
u32 *dbbuf_sq_ei;
@@ -208,10 +208,10 @@ struct nvme_queue {
};
/*
- * The nvme_iod describes the data in an I/O, including the list of PRP
- * entries. You can't see it in this data structure because C doesn't let
- * me express that. Use nvme_init_iod to ensure there's enough space
- * allocated to store the PRP list.
+ * The nvme_iod describes the data in an I/O.
+ *
+ * The sg pointer contains the list of PRP/SGL chunk allocations in addition
+ * to the actual struct scatterlist.
*/
struct nvme_iod {
struct nvme_request req;
@@ -220,11 +220,10 @@ struct nvme_iod {
int aborted;
int npages; /* In the PRP list. 0 means small pool in use */
int nents; /* Used in scatterlist */
- int length; /* Of data, in bytes */
dma_addr_t first_dma;
- struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
+ unsigned int dma_len; /* length of single DMA segment mapping */
+ dma_addr_t meta_dma;
struct scatterlist *sg;
- struct scatterlist inline_sg[0];
};
/*
@@ -372,12 +371,6 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
}
/*
- * Max size of iod being embedded in the request payload
- */
-#define NVME_INT_PAGES 2
-#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->ctrl.page_size)
-
-/*
* Will slightly overestimate the number of pages needed. This is OK
* as it only leads to a small amount of wasted memory for the lifetime of
* the I/O.
@@ -411,15 +404,6 @@ static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev,
return alloc_size + sizeof(struct scatterlist) * nseg;
}
-static unsigned int nvme_pci_cmd_size(struct nvme_dev *dev, bool use_sgl)
-{
- unsigned int alloc_size = nvme_pci_iod_alloc_size(dev,
- NVME_INT_BYTES(dev), NVME_INT_PAGES,
- use_sgl);
-
- return sizeof(struct nvme_iod) + alloc_size;
-}
-
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@@ -584,37 +568,26 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
return true;
}
-static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
- int nseg = blk_rq_nr_phys_segments(rq);
- unsigned int size = blk_rq_payload_bytes(rq);
-
- iod->use_sgl = nvme_pci_use_sgls(dev, rq);
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ enum dma_data_direction dma_dir = rq_data_dir(req) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
+ dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+ int i;
- if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
- iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
- if (!iod->sg)
- return BLK_STS_RESOURCE;
- } else {
- iod->sg = iod->inline_sg;
+ if (iod->dma_len) {
+ dma_unmap_page(dev->dev, dma_addr, iod->dma_len, dma_dir);
+ return;
}
- iod->aborted = 0;
- iod->npages = -1;
- iod->nents = 0;
- iod->length = size;
+ WARN_ON_ONCE(!iod->nents);
- return BLK_STS_OK;
-}
+ /* P2PDMA requests do not need to be unmapped */
+ if (!is_pci_p2pdma_page(sg_page(iod->sg)))
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
-static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- const int last_prp = dev->ctrl.page_size / sizeof(__le64) - 1;
- dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
-
- int i;
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
@@ -638,8 +611,7 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
dma_addr = next_dma_addr;
}
- if (iod->sg != iod->inline_sg)
- mempool_free(iod->sg, dev->iod_mempool);
+ mempool_free(iod->sg, dev->iod_mempool);
}
static void nvme_print_sgl(struct scatterlist *sgl, int nents)
@@ -829,80 +801,103 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
return BLK_STS_OK;
}
+static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmnd,
+ struct bio_vec *bv)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ unsigned int first_prp_len = dev->ctrl.page_size - bv->bv_offset;
+
+ iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->first_dma))
+ return BLK_STS_RESOURCE;
+ iod->dma_len = bv->bv_len;
+
+ cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
+ if (bv->bv_len > first_prp_len)
+ cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
+ return 0;
+}
+
+static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
+ struct request *req, struct nvme_rw_command *cmnd,
+ struct bio_vec *bv)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->first_dma))
+ return BLK_STS_RESOURCE;
+ iod->dma_len = bv->bv_len;
+
+ cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
+ cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
+ cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
+ return 0;
+}
+
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct request_queue *q = req->q;
- enum dma_data_direction dma_dir = rq_data_dir(req) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
- blk_status_t ret = BLK_STS_IOERR;
+ blk_status_t ret = BLK_STS_RESOURCE;
int nr_mapped;
+ if (blk_rq_nr_phys_segments(req) == 1) {
+ struct bio_vec bv = req_bvec(req);
+
+ if (!is_pci_p2pdma_page(bv.bv_page)) {
+ if (bv.bv_offset + bv.bv_len <= dev->ctrl.page_size * 2)
+ return nvme_setup_prp_simple(dev, req,
+ &cmnd->rw, &bv);
+
+ if (iod->nvmeq->qid &&
+ dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
+ return nvme_setup_sgl_simple(dev, req,
+ &cmnd->rw, &bv);
+ }
+ }
+
+ iod->dma_len = 0;
+ iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
+ if (!iod->sg)
+ return BLK_STS_RESOURCE;
sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nents = blk_rq_map_sg(q, req, iod->sg);
+ iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
if (!iod->nents)
goto out;
- ret = BLK_STS_RESOURCE;
-
if (is_pci_p2pdma_page(sg_page(iod->sg)))
nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents,
- dma_dir);
+ rq_dma_dir(req));
else
nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
- dma_dir, DMA_ATTR_NO_WARN);
+ rq_dma_dir(req), DMA_ATTR_NO_WARN);
if (!nr_mapped)
goto out;
+ iod->use_sgl = nvme_pci_use_sgls(dev, req);
if (iod->use_sgl)
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
-
- if (ret != BLK_STS_OK)
- goto out_unmap;
-
- ret = BLK_STS_IOERR;
- if (blk_integrity_rq(req)) {
- if (blk_rq_count_integrity_sg(q, req->bio) != 1)
- goto out_unmap;
-
- sg_init_table(&iod->meta_sg, 1);
- if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
- goto out_unmap;
-
- if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
- goto out_unmap;
-
- cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
- }
-
- return BLK_STS_OK;
-
-out_unmap:
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
out:
+ if (ret != BLK_STS_OK)
+ nvme_unmap_data(dev, req);
return ret;
}
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
+ struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- enum dma_data_direction dma_dir = rq_data_dir(req) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- if (iod->nents) {
- /* P2PDMA requests do not need to be unmapped */
- if (!is_pci_p2pdma_page(sg_page(iod->sg)))
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
- if (blk_integrity_rq(req))
- dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
- }
-
- nvme_cleanup_cmd(req);
- nvme_free_iod(dev, req);
+ iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
+ rq_dma_dir(req), 0);
+ if (dma_mapping_error(dev->dev, iod->meta_dma))
+ return BLK_STS_IOERR;
+ cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
+ return 0;
}
/*
@@ -915,9 +910,14 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_queue *nvmeq = hctx->driver_data;
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_command cmnd;
blk_status_t ret;
+ iod->aborted = 0;
+ iod->npages = -1;
+ iod->nents = 0;
+
/*
* We should not need to do this, but we're still using this to
* ensure we can drain requests on a dying queue.
@@ -929,21 +929,23 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret)
return ret;
- ret = nvme_init_iod(req, dev);
- if (ret)
- goto out_free_cmd;
-
if (blk_rq_nr_phys_segments(req)) {
ret = nvme_map_data(dev, req, &cmnd);
if (ret)
- goto out_cleanup_iod;
+ goto out_free_cmd;
+ }
+
+ if (blk_integrity_rq(req)) {
+ ret = nvme_map_metadata(dev, req, &cmnd);
+ if (ret)
+ goto out_unmap_data;
}
blk_mq_start_request(req);
nvme_submit_cmd(nvmeq, &cmnd, bd->last);
return BLK_STS_OK;
-out_cleanup_iod:
- nvme_free_iod(dev, req);
+out_unmap_data:
+ nvme_unmap_data(dev, req);
out_free_cmd:
nvme_cleanup_cmd(req);
return ret;
@@ -952,8 +954,14 @@ out_free_cmd:
static void nvme_pci_complete_rq(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_dev *dev = iod->nvmeq->dev;
- nvme_unmap_data(iod->nvmeq->dev, req);
+ nvme_cleanup_cmd(req);
+ if (blk_integrity_rq(req))
+ dma_unmap_page(dev->dev, iod->meta_dma,
+ rq_integrity_vec(req)->bv_len, rq_data_dir(req));
+ if (blk_rq_nr_phys_segments(req))
+ nvme_unmap_data(dev, req);
nvme_complete_rq(req);
}
@@ -1088,7 +1096,7 @@ static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
* using the CQ lock. For normal interrupt driven threads we have
* to disable the interrupt to avoid racing with it.
*/
- if (nvmeq->cq_vector == -1) {
+ if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) {
spin_lock(&nvmeq->cq_poll_lock);
found = nvme_process_cq(nvmeq, &start, &end, tag);
spin_unlock(&nvmeq->cq_poll_lock);
@@ -1148,7 +1156,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
struct nvme_command c;
int flags = NVME_QUEUE_PHYS_CONTIG;
- if (vector != -1)
+ if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
flags |= NVME_CQ_IRQ_ENABLED;
/*
@@ -1161,10 +1169,7 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
c.create_cq.cqid = cpu_to_le16(qid);
c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
c.create_cq.cq_flags = cpu_to_le16(flags);
- if (vector != -1)
- c.create_cq.irq_vector = cpu_to_le16(vector);
- else
- c.create_cq.irq_vector = 0;
+ c.create_cq.irq_vector = cpu_to_le16(vector);
return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
}
@@ -1371,16 +1376,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
- dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+ dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
if (!nvmeq->sq_cmds)
return;
if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
- pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev),
+ pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth));
} else {
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
}
}
@@ -1410,10 +1415,8 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
nvmeq->dev->online_queues--;
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
- if (nvmeq->cq_vector == -1)
- return 0;
- pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
- nvmeq->cq_vector = -1;
+ if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
+ pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
return 0;
}
@@ -1498,7 +1501,6 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
goto free_cqdma;
- nvmeq->q_dmadev = dev->dev;
nvmeq->dev = dev;
spin_lock_init(&nvmeq->sq_lock);
spin_lock_init(&nvmeq->cq_poll_lock);
@@ -1507,7 +1509,6 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
nvmeq->qid = qid;
- nvmeq->cq_vector = -1;
dev->ctrl.queue_count++;
return 0;
@@ -1552,7 +1553,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
{
struct nvme_dev *dev = nvmeq->dev;
int result;
- s16 vector;
+ u16 vector = 0;
clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
@@ -1563,7 +1564,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
if (!polled)
vector = dev->num_vecs == 1 ? 0 : qid;
else
- vector = -1;
+ set_bit(NVMEQ_POLLED, &nvmeq->flags);
result = adapter_alloc_cq(dev, qid, nvmeq, vector);
if (result)
@@ -1578,7 +1579,8 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
nvmeq->cq_vector = vector;
nvme_init_queue(nvmeq, qid);
- if (vector != -1) {
+ if (!polled) {
+ nvmeq->cq_vector = vector;
result = queue_request_irq(nvmeq);
if (result < 0)
goto release_sq;
@@ -1588,7 +1590,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
return result;
release_sq:
- nvmeq->cq_vector = -1;
dev->online_queues--;
adapter_delete_sq(dev, qid);
release_cq:
@@ -1639,7 +1640,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
dev->admin_tagset.numa_node = dev_to_node(dev->dev);
- dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false);
+ dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
dev->admin_tagset.driver_data = dev;
@@ -1730,7 +1731,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
nvme_init_queue(nvmeq, 0);
result = queue_request_irq(nvmeq);
if (result) {
- nvmeq->cq_vector = -1;
+ dev->online_queues--;
return result;
}
@@ -2171,10 +2172,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* number of interrupts.
*/
result = queue_request_irq(adminq);
- if (result) {
- adminq->cq_vector = -1;
+ if (result)
return result;
- }
set_bit(NVMEQ_ENABLED, &adminq->flags);
result = nvme_create_io_queues(dev);
@@ -2286,11 +2285,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
dev->tagset.numa_node = dev_to_node(dev->dev);
dev->tagset.queue_depth =
min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
- dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false);
- if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) {
- dev->tagset.cmd_size = max(dev->tagset.cmd_size,
- nvme_pci_cmd_size(dev, true));
- }
+ dev->tagset.cmd_size = sizeof(struct nvme_iod);
dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
dev->tagset.driver_data = dev;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b3e765a95af8..4d8dd29479c0 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -214,6 +214,8 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
{
struct nvmet_ctrl *ctrl;
+ lockdep_assert_held(&subsys->lock);
+
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
@@ -494,13 +496,14 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
int ret;
mutex_lock(&subsys->lock);
- ret = -EMFILE;
- if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
- goto out_unlock;
ret = 0;
if (ns->enabled)
goto out_unlock;
+ ret = -EMFILE;
+ if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
+ goto out_unlock;
+
ret = nvmet_bdev_ns_enable(ns);
if (ret == -ENOTBLK)
ret = nvmet_file_ns_enable(ns);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 98b7b1f4ee96..9369a11fe7a9 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -128,12 +128,12 @@ struct nvmet_fc_tgt_queue {
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
struct nvmet_fc_tgt_assoc *assoc;
- struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
struct list_head fod_list;
struct list_head pending_cmd_list;
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
+ struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
} __aligned(sizeof(unsigned long long));
struct nvmet_fc_tgt_assoc {
@@ -588,9 +588,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (qid > NVMET_NR_QUEUES)
return NULL;
- queue = kzalloc((sizeof(*queue) +
- (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
- GFP_KERNEL);
+ queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
if (!queue)
return NULL;
@@ -603,7 +601,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (!queue->work_q)
goto out_a_put;
- queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
queue->qid = qid;
queue->sqsize = sqsize;
queue->assoc = assoc;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index a065dbfc43b1..3efc52f9c309 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -196,7 +196,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
GFP_KERNEL, 0, bio);
if (ret && ret != -EOPNOTSUPP) {
req->error_slba = le64_to_cpu(range->slba);
- return blk_to_nvme_status(req, errno_to_blk_status(ret));
+ return errno_to_nvme_status(req, ret);
}
return NVME_SC_SUCCESS;
}
@@ -252,7 +252,6 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
{
struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
struct bio *bio = NULL;
- u16 status = NVME_SC_SUCCESS;
sector_t sector;
sector_t nr_sector;
int ret;
@@ -264,13 +263,12 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
GFP_KERNEL, &bio, 0);
- status = blk_to_nvme_status(req, errno_to_blk_status(ret));
if (bio) {
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
submit_bio(bio);
} else {
- nvmet_req_complete(req, status);
+ nvmet_req_complete(req, errno_to_nvme_status(req, ret));
}
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index ad0df786fe93..0a941abf56ec 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -371,7 +371,8 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
cmd->state = NVMET_TCP_SEND_DATA_PDU;
pdu->hdr.type = nvme_tcp_c2h_data;
- pdu->hdr.flags = NVME_TCP_F_DATA_LAST;
+ pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
+ NVME_TCP_F_DATA_SUCCESS : 0);
pdu->hdr.hlen = sizeof(*pdu);
pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
pdu->hdr.plen =
@@ -542,8 +543,19 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
cmd->state = NVMET_TCP_SEND_DDGST;
cmd->offset = 0;
} else {
- nvmet_setup_response_pdu(cmd);
+ if (queue->nvme_sq.sqhd_disabled) {
+ cmd->queue->snd_cmd = NULL;
+ nvmet_tcp_put_cmd(cmd);
+ } else {
+ nvmet_setup_response_pdu(cmd);
+ }
}
+
+ if (queue->nvme_sq.sqhd_disabled) {
+ kfree(cmd->iov);
+ sgl_free(cmd->req.sg);
+ }
+
return 1;
}
@@ -619,7 +631,13 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
return ret;
cmd->offset += ret;
- nvmet_setup_response_pdu(cmd);
+
+ if (queue->nvme_sq.sqhd_disabled) {
+ cmd->queue->snd_cmd = NULL;
+ nvmet_tcp_put_cmd(cmd);
+ } else {
+ nvmet_setup_response_pdu(cmd);
+ }
return 1;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2b2bc4b49d78..ebc80354714c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2256,22 +2256,6 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
#define READ_CAPACITY_RETRIES_ON_RESET 10
-/*
- * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
- * and the reported logical block size is bigger than 512 bytes. Note
- * that last_sector is a u64 and therefore logical_to_sectors() is not
- * applicable.
- */
-static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
-{
- u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
-
- if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
- return false;
-
- return true;
-}
-
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@@ -2337,14 +2321,6 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
return -ENODEV;
}
- if (!sd_addressable_capacity(lba, sector_size)) {
- sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
- "kernel compiled with support for large block "
- "devices.\n");
- sdkp->capacity = 0;
- return -EOVERFLOW;
- }
-
/* Logical blocks per physical block exponent */
sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
@@ -2426,14 +2402,6 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
return sector_size;
}
- if (!sd_addressable_capacity(lba, sector_size)) {
- sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
- "kernel compiled with support for large block "
- "devices.\n");
- sdkp->capacity = 0;
- return -EOVERFLOW;
- }
-
sdkp->capacity = lba + 1;
sdkp->physical_block_size = sector_size;
return sector_size;
@@ -3325,6 +3293,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
gd->events |= DISK_EVENT_MEDIA_CHANGE;
+ gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
}
blk_pm_runtime_init(sdp->request_queue, dev);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 039c27c2d7b3..c3f443d5aea8 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -716,6 +716,7 @@ static int sr_probe(struct device *dev)
disk->fops = &sr_bdops;
disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
+ disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index f3fbb700f569..05a286d24f14 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -4,12 +4,13 @@
#include <xen/xen.h>
#include <xen/page.h>
+/* check if @page can be merged with 'vec1' */
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2)
+ const struct page *page)
{
#if XEN_PAGE_SIZE == PAGE_SIZE
unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
- unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
+ unsigned long bfn2 = pfn_to_bfn(page_to_pfn(page));
return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
#else
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index e7ae26e36c9c..38faf661e237 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1760,8 +1760,6 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
ext4_msg(sb, KERN_ERR,
"filesystem too large to resize to %llu blocks safely",
n_blocks_count);
- if (sizeof(sector_t) < 8)
- ext4_warning(sb, "CONFIG_LBDAF not enabled");
return -EINVAL;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 6ed4eb81e674..d10e9e724bdd 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2706,13 +2706,9 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
loff_t res;
loff_t upper_limit = MAX_LFS_FILESIZE;
- /* small i_blocks in vfs inode? */
- if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
- /*
- * CONFIG_LBDAF is not enabled implies the inode
- * i_block represent total blocks in 512 bytes
- * 32 == size of vfs inode i_blocks * 8
- */
+ BUILD_BUG_ON(sizeof(blkcnt_t) < sizeof(u64));
+
+ if (!has_huge_files) {
upper_limit = (1LL << 32) - 1;
/* total blocks in file system block size */
@@ -2753,11 +2749,11 @@ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
* number of 512-byte sectors of the file.
*/
- if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
+ if (!has_huge_files) {
/*
- * !has_huge_files or CONFIG_LBDAF not enabled implies that
- * the inode i_block field represents total file blocks in
- * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
+ * !has_huge_files or implies that the inode i_block field
+ * represents total file blocks in 2^32 512-byte sectors ==
+ * size of vfs inode i_blocks * 8
*/
upper_limit = (1LL << 32) - 1;
@@ -2897,18 +2893,6 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
~EXT4_FEATURE_RO_COMPAT_SUPP));
return 0;
}
- /*
- * Large file size enabled file system can only be mounted
- * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
- */
- if (ext4_has_feature_huge_file(sb)) {
- if (sizeof(blkcnt_t) < sizeof(u64)) {
- ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
- "cannot be mounted RDWR without "
- "CONFIG_LBDAF");
- return 0;
- }
- }
if (ext4_has_feature_bigalloc(sb) && !ext4_has_feature_extents(sb)) {
ext4_msg(sb, KERN_ERR,
"Can't support bigalloc feature without "
@@ -4057,8 +4041,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (err) {
ext4_msg(sb, KERN_ERR, "filesystem"
" too large to mount safely on this system");
- if (sizeof(sector_t) < 8)
- ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
goto failed_mount;
}
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 3ed2b088dcfd..6a1e499543f5 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -1,6 +1,5 @@
config GFS2_FS
tristate "GFS2 file system support"
- depends on (64BIT || LBDAF)
select FS_POSIX_ACL
select CRC32
select LIBCRC32C
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 5f93cfacb3d1..69d02cf8cf37 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -121,7 +121,6 @@ config PNFS_FILE_LAYOUT
config PNFS_BLOCK
tristate
depends on NFS_V4_1 && BLK_DEV_DM
- depends on 64BIT || LBDAF
default NFS_V4
config PNFS_FLEXFILE_LAYOUT
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 96ae7cedd487..fc3d29eceb2f 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -600,7 +600,6 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
*/
#if BITS_PER_LONG == 32
-# if defined(CONFIG_LBDAF)
BUILD_BUG_ON(sizeof(sector_t) != 8);
/*
* We might be limited by page cache size.
@@ -614,15 +613,6 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
*/
bitshift = 31;
}
-# else
- /*
- * We are limited by the size of sector_t. Use block size, as
- * that's what we expose to the VFS.
- */
- bytes = 1 << bbits;
- trim = 1;
- bitshift = 31;
-# endif
#endif
/*
diff --git a/fs/stack.c b/fs/stack.c
index a54e33ed10f1..664ed35558bd 100644
--- a/fs/stack.c
+++ b/fs/stack.c
@@ -21,11 +21,10 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
i_size = i_size_read(src);
/*
- * But if CONFIG_LBDAF (on 32-bit), we ought to make an effort to
- * keep the two halves of i_blocks in sync despite SMP or PREEMPT -
- * though stat's generic_fillattr() doesn't bother, and we won't be
- * applying quotas (where i_blocks does become important) at the
- * upper level.
+ * But on 32-bit, we ought to make an effort to keep the two halves of
+ * i_blocks in sync despite SMP or PREEMPT - though stat's
+ * generic_fillattr() doesn't bother, and we won't be applying quotas
+ * (where i_blocks does become important) at the upper level.
*
* We don't actually know what locking is used at the lower level;
* but if it's a filesystem that supports quotas, it will be using
@@ -44,9 +43,9 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
* include/linux/fs.h). We don't necessarily hold i_mutex when this
* is called, so take i_lock for that case.
*
- * And if CONFIG_LBDAF (on 32-bit), continue our effort to keep the
- * two halves of i_blocks in sync despite SMP or PREEMPT: use i_lock
- * for that case too, and do both at once by combining the tests.
+ * And if on 32-bit, continue our effort to keep the two halves of
+ * i_blocks in sync despite SMP or PREEMPT: use i_lock for that case
+ * too, and do both at once by combining the tests.
*
* There is none of this locking overhead in the 64-bit case.
*/
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index 457ac9f97377..99af5e5bda9f 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -1,7 +1,6 @@
config XFS_FS
tristate "XFS filesystem support"
depends on BLOCK
- depends on (64BIT || LBDAF)
select EXPORTFS
select LIBCRC32C
select FS_IOMAP
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index f093ea244849..703b6be063ef 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -539,26 +539,18 @@ xfs_max_file_offset(
/* Figure out maximum filesize, on Linux this can depend on
* the filesystem blocksize (on 32 bit platforms).
- * __block_write_begin does this in an [unsigned] long...
+ * __block_write_begin does this in an [unsigned] long long...
* page->index << (PAGE_SHIFT - bbits)
* So, for page sized blocks (4K on 32 bit platforms),
* this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
* (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
* but for smaller blocksizes it is less (bbits = log2 bsize).
- * Note1: get_block_t takes a long (implicit cast from above)
- * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
- * can optionally convert the [unsigned] long from above into
- * an [unsigned] long long.
*/
#if BITS_PER_LONG == 32
-# if defined(CONFIG_LBDAF)
ASSERT(sizeof(sector_t) == 8);
pagefactor = PAGE_SIZE;
bitshift = BITS_PER_LONG;
-# else
- pagefactor = PAGE_SIZE >> (PAGE_SHIFT - blockshift);
-# endif
#endif
return (((uint64_t)pagefactor) << bitshift) - 1;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index e584673c1881..9577ad8f6e28 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -436,6 +436,9 @@ void bio_chain(struct bio *, struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
+extern int __bio_add_pc_page(struct request_queue *, struct bio *,
+ struct page *, unsigned int, unsigned int,
+ bool);
bool __bio_try_merge_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off, bool same_page);
void __bio_add_page(struct bio *bio, struct page *page,
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 791fee35df88..be418275763c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -215,21 +215,24 @@ struct bio {
/*
* bio flags
*/
-#define BIO_NO_PAGE_REF 0 /* don't put release vec pages */
-#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
-#define BIO_CLONED 2 /* doesn't own data */
-#define BIO_BOUNCED 3 /* bio is a bounce bio */
-#define BIO_USER_MAPPED 4 /* contains user pages */
-#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
-#define BIO_QUIET 6 /* Make BIO Quiet */
-#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
-#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
-#define BIO_THROTTLED 9 /* This bio has already been subjected to
+enum {
+ BIO_NO_PAGE_REF, /* don't put release vec pages */
+ BIO_SEG_VALID, /* bi_phys_segments valid */
+ BIO_CLONED, /* doesn't own data */
+ BIO_BOUNCED, /* bio is a bounce bio */
+ BIO_USER_MAPPED, /* contains user pages */
+ BIO_NULL_MAPPED, /* contains invalid user pages */
+ BIO_QUIET, /* Make BIO Quiet */
+ BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
+ BIO_REFFED, /* bio has elevated ->bi_cnt */
+ BIO_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */
-#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
+ BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
* of this bio. */
-#define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */
-#define BIO_TRACKED 12 /* set if bio goes through the rq_qos path */
+ BIO_QUEUE_ENTERED, /* can use blk_queue_enter_live() */
+ BIO_TRACKED, /* set if bio goes through the rq_qos path */
+ BIO_FLAG_LAST
+};
/* See BVEC_POOL_OFFSET below before adding new flags */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 317ab30d2904..99aa98f60b9e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -640,6 +640,13 @@ static inline bool blk_account_rq(struct request *rq)
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
+#define rq_dma_dir(rq) \
+ (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+#define dma_map_bvec(dev, bv, dir, attrs) \
+ dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
+ (dir), (attrs))
+
static inline bool queue_is_mq(struct request_queue *q)
{
return q->mq_ops;
@@ -931,6 +938,17 @@ static inline unsigned int blk_rq_payload_bytes(struct request *rq)
return blk_rq_bytes(rq);
}
+/*
+ * Return the first full biovec in the request. The caller needs to check that
+ * there are any bvecs before calling this helper.
+ */
+static inline struct bio_vec req_bvec(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec;
+ return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
+}
+
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
@@ -1547,6 +1565,17 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
}
+/*
+ * Return the first bvec that contains integrity data. Only drivers that are
+ * limited to a single integrity segment should use this helper.
+ */
+static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+{
+ if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
+ return NULL;
+ return rq->bio->bi_integrity->bip_vec;
+}
+
#else /* CONFIG_BLK_DEV_INTEGRITY */
struct bio;
@@ -1621,6 +1650,11 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
return 0;
}
+static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+{
+ return NULL;
+}
+
#endif /* CONFIG_BLK_DEV_INTEGRITY */
struct block_device_operations {
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ff13cbc1887d..a4811410e4fc 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -51,11 +51,6 @@ struct bvec_iter_all {
unsigned done;
};
-static inline struct page *bvec_nth_page(struct page *page, int idx)
-{
- return idx == 0 ? page : nth_page(page, idx);
-}
-
/*
* various member access, note that bio_data should of course not be used
* on highmem page vectors
@@ -92,8 +87,8 @@ static inline struct page *bvec_nth_page(struct page *page, int idx)
PAGE_SIZE - bvec_iter_offset((bvec), (iter)))
#define bvec_iter_page(bvec, iter) \
- bvec_nth_page(mp_bvec_iter_page((bvec), (iter)), \
- mp_bvec_iter_page_idx((bvec), (iter)))
+ (mp_bvec_iter_page((bvec), (iter)) + \
+ mp_bvec_iter_page_idx((bvec), (iter)))
#define bvec_iter_bvec(bvec, iter) \
((struct bio_vec) { \
@@ -151,13 +146,18 @@ static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
return &iter_all->bv;
}
+static inline struct page *bvec_nth_page(struct page *page, int idx)
+{
+ return idx == 0 ? page : nth_page(page, idx);
+}
+
static inline void bvec_advance(const struct bio_vec *bvec,
struct bvec_iter_all *iter_all)
{
struct bio_vec *bv = &iter_all->bv;
if (iter_all->done) {
- bv->bv_page = nth_page(bv->bv_page, 1);
+ bv->bv_page++;
bv->bv_offset = 0;
} else {
bv->bv_page = bvec_nth_page(bvec->bv_page, bvec->bv_offset /
@@ -184,7 +184,7 @@ static inline void mp_bvec_last_segment(const struct bio_vec *bvec,
unsigned total = bvec->bv_offset + bvec->bv_len;
unsigned last_page = (total - 1) / PAGE_SIZE;
- seg->bv_page = bvec_nth_page(bvec->bv_page, last_page);
+ seg->bv_page = bvec->bv_page + last_page;
/* the whole segment is inside the last page */
if (bvec->bv_offset >= last_page * PAGE_SIZE) {
@@ -196,9 +196,4 @@ static inline void mp_bvec_last_segment(const struct bio_vec *bvec,
}
}
-#define mp_bvec_for_each_page(pg, bv, i) \
- for (i = (bv)->bv_offset / PAGE_SIZE; \
- (i <= (((bv)->bv_offset + (bv)->bv_len - 1) / PAGE_SIZE)) && \
- (pg = bvec_nth_page((bv)->bv_page, i)); i += 1)
-
#endif /* __LINUX_BVEC_ITER_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 06c0fd594097..6547c9256d5c 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -150,6 +150,13 @@ enum {
DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
};
+enum {
+ /* Poll even if events_poll_msecs is unset */
+ DISK_EVENT_FLAG_POLL = 1 << 0,
+ /* Forward events to udev */
+ DISK_EVENT_FLAG_UEVENT = 1 << 1,
+};
+
struct disk_part_tbl {
struct rcu_head rcu_head;
int len;
@@ -184,8 +191,8 @@ struct gendisk {
char disk_name[DISK_NAME_LEN]; /* name of major driver */
char *(*devnode)(struct gendisk *gd, umode_t *mode);
- unsigned int events; /* supported events */
- unsigned int async_events; /* async events, subset of all */
+ unsigned short events; /* supported events */
+ unsigned short event_flags; /* flags related to event processing */
/* Array of pointers to partitions indexed by partno.
* Protected with matching bdev lock but stat and other
@@ -714,7 +721,7 @@ static inline void hd_free_part(struct hd_struct *part)
*/
static inline sector_t part_nr_sects_read(struct hd_struct *part)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
sector_t nr_sects;
unsigned seq;
do {
@@ -722,7 +729,7 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part)
nr_sects = part->nr_sects;
} while (read_seqcount_retry(&part->nr_sects_seq, seq));
return nr_sects;
-#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
sector_t nr_sects;
preempt_disable();
@@ -741,11 +748,11 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part)
*/
static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_begin(&part->nr_sects_seq);
part->nr_sects = size;
write_seqcount_end(&part->nr_sects_seq);
-#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
preempt_disable();
part->nr_sects = size;
preempt_enable();
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2d14e21c16c0..a3b59d143afb 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -17,6 +17,7 @@
#include <asm/byteorder.h>
#include <asm/div64.h>
#include <uapi/linux/kernel.h>
+#include <asm/div64.h>
#define STACK_MAGIC 0xdeadbeef
@@ -175,18 +176,7 @@
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
-#ifdef CONFIG_LBDAF
-# define sector_div(a, b) do_div(a, b)
-#else
-# define sector_div(n, b)( \
-{ \
- int _res; \
- _res = (n) % (b); \
- (n) /= (b); \
- _res; \
-} \
-)
-#endif
+#define sector_div(a, b) do_div(a, b)
/**
* upper_32_bits - return bits 32-63 of a number
diff --git a/include/linux/types.h b/include/linux/types.h
index cc0dbbe551d5..231114ae38f4 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -127,13 +127,8 @@ typedef s64 int64_t;
*
* blkcnt_t is the type of the inode's block count.
*/
-#ifdef CONFIG_LBDAF
typedef u64 sector_t;
typedef u64 blkcnt_t;
-#else
-typedef unsigned long sector_t;
-typedef unsigned long blkcnt_t;
-#endif
/*
* The type of an index into the pagecache.
diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h
index 627624d35030..e092e124dd16 100644
--- a/include/uapi/linux/sed-opal.h
+++ b/include/uapi/linux/sed-opal.h
@@ -58,7 +58,7 @@ struct opal_key {
struct opal_lr_act {
struct opal_key key;
__u32 sum;
- __u8 num_lrs;
+ __u8 num_lrs;
__u8 lr[OPAL_MAX_LRS];
__u8 align[2]; /* Align to 8 byte boundary */
};
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 19d032373de5..19a72f591e2b 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -43,8 +43,10 @@ extern struct hvm_start_info pvh_start_info;
#endif /* CONFIG_XEN_DOM0 */
struct bio_vec;
+struct page;
+
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2);
+ const struct page *page);
#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
extern u64 xen_saved_max_mem_size;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 00dbcdbc9a0d..caedd54fa545 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1927,7 +1927,6 @@ config TEST_STATIC_KEYS
config TEST_KMOD
tristate "kmod stress tester"
depends on m
- depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
depends on NETDEVICES && NET_CORE && INET # for TUN
select TEST_LKM
select XFS_FS
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h
index d27285f8ee82..8bc960e5e713 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h
@@ -59,11 +59,7 @@ typedef __u32 uint32_t;
*
* blkcnt_t is the type of the inode's block count.
*/
-#ifdef CONFIG_LBDAF
typedef u64 sector_t;
-#else
-typedef unsigned long sector_t;
-#endif
/*
* The type of an index into the pagecache.