summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-07-29 20:50:17 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-07-29 20:50:17 +0200
commit9c5891bd4342349a200676d33f742dd1b864822c (patch)
treeb14c1698f2d8ce5276e1befd562f6398a46b48b9 /drivers
parentpcmcia: synclink_cs: replace sum of bitmasks with OR operation. (diff)
parentLinux 3.11-rc3 (diff)
downloadlinux-9c5891bd4342349a200676d33f742dd1b864822c.tar.xz
linux-9c5891bd4342349a200676d33f742dd1b864822c.zip
Merge 3.11-rc3 into char-misc-next.
This resolves a merge issue with: drivers/misc/mei/init.c Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/video.c67
-rw-r--r--drivers/acpi/video_detect.c15
-rw-r--r--drivers/ata/Kconfig11
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c9
-rw-r--r--drivers/ata/ahci_imx.c236
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/sata_inic162x.c14
-rw-r--r--drivers/block/Kconfig4
-rw-r--r--drivers/block/drbd/drbd_actlog.c21
-rw-r--r--drivers/block/drbd/drbd_int.h15
-rw-r--r--drivers/block/drbd/drbd_main.c61
-rw-r--r--drivers/block/drbd/drbd_nl.c185
-rw-r--r--drivers/block/drbd/drbd_receiver.c12
-rw-r--r--drivers/block/drbd/drbd_state.c4
-rw-r--r--drivers/block/rsxx/core.c359
-rw-r--r--drivers/block/rsxx/cregs.c14
-rw-r--r--drivers/block/rsxx/dev.c33
-rw-r--r--drivers/block/rsxx/dma.c185
-rw-r--r--drivers/block/rsxx/rsxx_priv.h10
-rw-r--r--drivers/block/xen-blkback/blkback.c872
-rw-r--r--drivers/block/xen-blkback/common.h147
-rw-r--r--drivers/block/xen-blkback/xenbus.c85
-rw-r--r--drivers/block/xen-blkfront.c532
-rw-r--r--drivers/cpufreq/intel_pstate.c12
-rw-r--r--drivers/crypto/caam/caamhash.c2
-rw-r--r--drivers/edac/edac_mc.c9
-rw-r--r--drivers/edac/edac_mc_sysfs.c28
-rw-r--r--drivers/edac/i5100_edac.c2
-rw-r--r--drivers/gpio/gpio-msm-v2.c2
-rw-r--r--drivers/gpio/gpio-omap.c84
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c21
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c18
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c69
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c19
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c12
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c6
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/falcon.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/falcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c69
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c46
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c42
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c263
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h76
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c184
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c111
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c319
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c212
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c43
-rw-r--r--drivers/gpu/drm/radeon/r600.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c159
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c8
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c41
-rw-r--r--drivers/hv/hv_balloon.c21
-rw-r--r--drivers/hv/vmbus_drv.c8
-rw-r--r--drivers/md/bcache/alloc.c46
-rw-r--r--drivers/md/bcache/bcache.h61
-rw-r--r--drivers/md/bcache/bset.c56
-rw-r--r--drivers/md/bcache/bset.h4
-rw-r--r--drivers/md/bcache/btree.c451
-rw-r--r--drivers/md/bcache/btree.h35
-rw-r--r--drivers/md/bcache/closure.c6
-rw-r--r--drivers/md/bcache/debug.c178
-rw-r--r--drivers/md/bcache/debug.h11
-rw-r--r--drivers/md/bcache/io.c68
-rw-r--r--drivers/md/bcache/journal.c25
-rw-r--r--drivers/md/bcache/movinggc.c24
-rw-r--r--drivers/md/bcache/request.c197
-rw-r--r--drivers/md/bcache/request.h2
-rw-r--r--drivers/md/bcache/super.c171
-rw-r--r--drivers/md/bcache/sysfs.c68
-rw-r--r--drivers/md/bcache/trace.c47
-rw-r--r--drivers/md/bcache/util.c17
-rw-r--r--drivers/md/bcache/util.h6
-rw-r--r--drivers/md/bcache/writeback.c133
-rw-r--r--drivers/md/bcache/writeback.h64
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c15
-rw-r--r--drivers/md/raid5.h1
-rw-r--r--drivers/misc/atmel-ssc.c11
-rw-r--r--drivers/misc/mei/hbm.c2
-rw-r--r--drivers/misc/mei/hw-me.c14
-rw-r--r--drivers/misc/mei/init.c5
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/of/irq.c6
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c1
-rw-r--r--drivers/pinctrl/core.c1
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c1
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas6.c24
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/isci/task.c9
-rw-r--r--drivers/scsi/mvsas/mv_sas.c11
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c11
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/staging/android/logger.c4
-rw-r--r--drivers/staging/comedi/TODO2
-rw-r--r--drivers/staging/comedi/comedi_fops.c32
-rw-r--r--drivers/staging/frontier/alphatrack.c2
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c2
-rw-r--r--drivers/staging/imx-drm/Kconfig1
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c7
-rw-r--r--drivers/staging/zram/zram_drv.c6
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c18
-rw-r--r--drivers/tty/serial/8250/8250_early.c3
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/synclinkmp.c2
-rw-r--r--drivers/usb/core/hub.c48
-rw-r--r--drivers/usb/core/hub.h3
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/core.h4
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/Kconfig5
-rw-r--r--drivers/usb/gadget/at91_udc.c16
-rw-r--r--drivers/usb/gadget/f_ecm.c7
-rw-r--r--drivers/usb/gadget/f_eem.c7
-rw-r--r--drivers/usb/gadget/f_ncm.c7
-rw-r--r--drivers/usb/gadget/f_phonet.c7
-rw-r--r--drivers/usb/gadget/f_rndis.c7
-rw-r--r--drivers/usb/gadget/f_subset.c7
-rw-r--r--drivers/usb/gadget/fotg210-udc.c4
-rw-r--r--drivers/usb/gadget/mv_u3d_core.c4
-rw-r--r--drivers/usb/gadget/udc-core.c6
-rw-r--r--drivers/usb/host/ehci-hub.c1
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci.c17
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/phy/phy-omap-usb3.c2
-rw-r--r--drivers/usb/phy/phy-samsung-usb2.c2
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c4
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/mos7840.c25
-rw-r--r--drivers/usb/serial/option.c23
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vhost/net.c37
-rw-r--r--drivers/vhost/scsi.c17
-rw-r--r--drivers/vhost/test.c6
-rw-r--r--drivers/vhost/vhost.h10
-rw-r--r--drivers/video/backlight/max8925_bl.c41
171 files changed, 4810 insertions, 2526 deletions
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 227aca77ee1e..5da44e81dd4d 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -169,10 +169,8 @@ int acpi_create_platform_device(struct acpi_device *adev,
-------------------------------------------------------------------------- */
#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
bool acpi_video_backlight_quirks(void);
-bool acpi_video_verify_backlight_support(void);
#else
static inline bool acpi_video_backlight_quirks(void) { return false; }
-static inline bool acpi_video_verify_backlight_support(void) { return false; }
#endif
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 6dd237e79b4f..0ec434d2586d 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -911,7 +911,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (acpi_video_init_brightness(device))
return;
- if (acpi_video_verify_backlight_support()) {
+ if (acpi_video_backlight_support()) {
struct backlight_properties props;
struct pci_dev *pdev;
acpi_handle acpi_parent;
@@ -1366,8 +1366,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
unsigned long long level_current, level_next;
int result = -EINVAL;
- /* no warning message if acpi_backlight=vendor or a quirk is used */
- if (!acpi_video_verify_backlight_support())
+ /* no warning message if acpi_backlight=vendor is used */
+ if (!acpi_video_backlight_support())
return 0;
if (!device->brightness)
@@ -1875,46 +1875,6 @@ static int acpi_video_bus_remove(struct acpi_device *device)
return 0;
}
-static acpi_status video_unregister_backlight(acpi_handle handle, u32 lvl,
- void *context, void **rv)
-{
- struct acpi_device *acpi_dev;
- struct acpi_video_bus *video;
- struct acpi_video_device *dev, *next;
-
- if (acpi_bus_get_device(handle, &acpi_dev))
- return AE_OK;
-
- if (acpi_match_device_ids(acpi_dev, video_device_ids))
- return AE_OK;
-
- video = acpi_driver_data(acpi_dev);
- if (!video)
- return AE_OK;
-
- acpi_video_bus_stop_devices(video);
- mutex_lock(&video->device_list_lock);
- list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
- if (dev->backlight) {
- backlight_device_unregister(dev->backlight);
- dev->backlight = NULL;
- kfree(dev->brightness->levels);
- kfree(dev->brightness);
- }
- if (dev->cooling_dev) {
- sysfs_remove_link(&dev->dev->dev.kobj,
- "thermal_cooling");
- sysfs_remove_link(&dev->cooling_dev->device.kobj,
- "device");
- thermal_cooling_device_unregister(dev->cooling_dev);
- dev->cooling_dev = NULL;
- }
- }
- mutex_unlock(&video->device_list_lock);
- acpi_video_bus_start_devices(video);
- return AE_OK;
-}
-
static int __init is_i740(struct pci_dev *dev)
{
if (dev->device == 0x00D1)
@@ -1946,25 +1906,14 @@ static int __init intel_opregion_present(void)
return opregion;
}
-int __acpi_video_register(bool backlight_quirks)
+int acpi_video_register(void)
{
- bool no_backlight;
- int result;
-
- no_backlight = backlight_quirks ? acpi_video_backlight_quirks() : false;
-
+ int result = 0;
if (register_count) {
/*
- * If acpi_video_register() has been called already, don't try
- * to register acpi_video_bus, but unregister backlight devices
- * if no backlight support is requested.
+ * if the function of acpi_video_register is already called,
+ * don't register the acpi_vide_bus again and return no error.
*/
- if (no_backlight)
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- video_unregister_backlight,
- NULL, NULL, NULL);
-
return 0;
}
@@ -1980,7 +1929,7 @@ int __acpi_video_register(bool backlight_quirks)
return 0;
}
-EXPORT_SYMBOL(__acpi_video_register);
+EXPORT_SYMBOL(acpi_video_register);
void acpi_video_unregister(void)
{
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 826e52def080..c3397748ba46 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -238,12 +238,7 @@ static void acpi_video_caps_check(void)
bool acpi_video_backlight_quirks(void)
{
- if (acpi_gbl_osi_data >= ACPI_OSI_WIN_8) {
- acpi_video_caps_check();
- acpi_video_support |= ACPI_VIDEO_SKIP_BACKLIGHT;
- return true;
- }
- return false;
+ return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
}
EXPORT_SYMBOL(acpi_video_backlight_quirks);
@@ -291,14 +286,6 @@ int acpi_video_backlight_support(void)
}
EXPORT_SYMBOL(acpi_video_backlight_support);
-/* For the ACPI video driver use only. */
-bool acpi_video_verify_backlight_support(void)
-{
- return (acpi_video_support & ACPI_VIDEO_SKIP_BACKLIGHT) ?
- false : acpi_video_backlight_support();
-}
-EXPORT_SYMBOL(acpi_video_verify_backlight_support);
-
/*
* Use acpi_backlight=vendor/video to force that backlight switching
* is processed by vendor specific acpi drivers or video.ko driver.
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 80dc988f01e4..4e737728aee2 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -97,6 +97,15 @@ config SATA_AHCI_PLATFORM
If unsure, say N.
+config AHCI_IMX
+ tristate "Freescale i.MX AHCI SATA support"
+ depends on SATA_AHCI_PLATFORM && MFD_SYSCON
+ help
+ This option enables support for the Freescale i.MX SoC's
+ onboard AHCI SATA.
+
+ If unsure, say N.
+
config SATA_FSL
tristate "Freescale 3.0Gbps SATA support"
depends on FSL_SOC
@@ -107,7 +116,7 @@ config SATA_FSL
If unsure, say N.
config SATA_INIC162X
- tristate "Initio 162x SATA support"
+ tristate "Initio 162x SATA support (Very Experimental)"
depends on PCI
help
This option enables support for Initio 162x Serial ATA.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index c04d0fd038a3..46518c622460 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
+obj-$(CONFIG_AHCI_IMX) += ahci_imx.o
# SFF w/ custom DMA
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5064f3ea20f1..db4380d70031 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1146,11 +1146,18 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
return rc;
for (i = 0; i < host->n_ports; i++) {
+ const char* desc;
struct ahci_port_priv *pp = host->ports[i]->private_data;
+ /* pp is NULL for dummy ports */
+ if (pp)
+ desc = pp->irq_desc;
+ else
+ desc = dev_driver_string(host->dev);
+
rc = devm_request_threaded_irq(host->dev,
irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
- pp->irq_desc, host->ports[i]);
+ desc, host->ports[i]);
if (rc)
goto out_free_irqs;
}
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
new file mode 100644
index 000000000000..58debb0acc3a
--- /dev/null
+++ b/drivers/ata/ahci_imx.c
@@ -0,0 +1,236 @@
+/*
+ * Freescale IMX AHCI SATA platform driver
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/ahci_platform.h>
+#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include "ahci.h"
+
+enum {
+ HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
+};
+
+struct imx_ahci_priv {
+ struct platform_device *ahci_pdev;
+ struct clk *sata_ref_clk;
+ struct clk *ahb_clk;
+ struct regmap *gpr;
+};
+
+static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
+{
+ int ret = 0;
+ unsigned int reg_val;
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+
+ imxpriv->gpr =
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imxpriv->gpr)) {
+ dev_err(dev, "failed to find fsl,imx6q-iomux-gpr regmap\n");
+ return PTR_ERR(imxpriv->gpr);
+ }
+
+ ret = clk_prepare_enable(imxpriv->sata_ref_clk);
+ if (ret < 0) {
+ dev_err(dev, "prepare-enable sata_ref clock err:%d\n", ret);
+ return ret;
+ }
+
+ /*
+ * set PHY Paremeters, two steps to configure the GPR13,
+ * one write for rest of parameters, mask of first write
+ * is 0x07fffffd, and the other one write for setting
+ * the mpll_clk_en.
+ */
+ regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
+ | IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK
+ | IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK
+ | IMX6Q_GPR13_SATA_SPD_MODE_MASK
+ | IMX6Q_GPR13_SATA_MPLL_SS_EN
+ | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
+ | IMX6Q_GPR13_SATA_TX_BOOST_MASK
+ | IMX6Q_GPR13_SATA_TX_LVL_MASK
+ | IMX6Q_GPR13_SATA_TX_EDGE_RATE
+ , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
+ | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
+ | IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F
+ | IMX6Q_GPR13_SATA_SPD_MODE_3P0G
+ | IMX6Q_GPR13_SATA_MPLL_SS_EN
+ | IMX6Q_GPR13_SATA_TX_ATTEN_9_16
+ | IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB
+ | IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
+ regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ usleep_range(100, 200);
+
+ /*
+ * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
+ * and IP vendor specific register HOST_TIMER1MS.
+ * Configure CAP_SSS (support stagered spin up).
+ * Implement the port0.
+ * Get the ahb clock rate, and configure the TIMER1MS register.
+ */
+ reg_val = readl(mmio + HOST_CAP);
+ if (!(reg_val & HOST_CAP_SSS)) {
+ reg_val |= HOST_CAP_SSS;
+ writel(reg_val, mmio + HOST_CAP);
+ }
+ reg_val = readl(mmio + HOST_PORTS_IMPL);
+ if (!(reg_val & 0x1)) {
+ reg_val |= 0x1;
+ writel(reg_val, mmio + HOST_PORTS_IMPL);
+ }
+
+ reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
+ writel(reg_val, mmio + HOST_TIMER1MS);
+
+ return 0;
+}
+
+static void imx6q_sata_exit(struct device *dev)
+{
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+
+ regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ clk_disable_unprepare(imxpriv->sata_ref_clk);
+}
+
+static struct ahci_platform_data imx6q_sata_pdata = {
+ .init = imx6q_sata_init,
+ .exit = imx6q_sata_exit,
+};
+
+static const struct of_device_id imx_ahci_of_match[] = {
+ { .compatible = "fsl,imx6q-ahci", .data = &imx6q_sata_pdata},
+ {},
+};
+MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
+
+static int imx_ahci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *mem, *irq, res[2];
+ const struct of_device_id *of_id;
+ const struct ahci_platform_data *pdata = NULL;
+ struct imx_ahci_priv *imxpriv;
+ struct device *ahci_dev;
+ struct platform_device *ahci_pdev;
+ int ret;
+
+ imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
+ if (!imxpriv) {
+ dev_err(dev, "can't alloc ahci_host_priv\n");
+ return -ENOMEM;
+ }
+
+ ahci_pdev = platform_device_alloc("ahci", -1);
+ if (!ahci_pdev)
+ return -ENODEV;
+
+ ahci_dev = &ahci_pdev->dev;
+ ahci_dev->parent = dev;
+
+ imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(imxpriv->ahb_clk)) {
+ dev_err(dev, "can't get ahb clock.\n");
+ ret = PTR_ERR(imxpriv->ahb_clk);
+ goto err_out;
+ }
+
+ imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
+ if (IS_ERR(imxpriv->sata_ref_clk)) {
+ dev_err(dev, "can't get sata_ref clock.\n");
+ ret = PTR_ERR(imxpriv->sata_ref_clk);
+ goto err_out;
+ }
+
+ imxpriv->ahci_pdev = ahci_pdev;
+ platform_set_drvdata(pdev, imxpriv);
+
+ of_id = of_match_device(imx_ahci_of_match, dev);
+ if (of_id) {
+ pdata = of_id->data;
+ } else {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mem || !irq) {
+ dev_err(dev, "no mmio/irq resource\n");
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ res[0] = *mem;
+ res[1] = *irq;
+
+ ahci_dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ahci_dev->dma_mask = &ahci_dev->coherent_dma_mask;
+ ahci_dev->of_node = dev->of_node;
+
+ ret = platform_device_add_resources(ahci_pdev, res, 2);
+ if (ret)
+ goto err_out;
+
+ ret = platform_device_add_data(ahci_pdev, pdata, sizeof(*pdata));
+ if (ret)
+ goto err_out;
+
+ ret = platform_device_add(ahci_pdev);
+ if (ret) {
+err_out:
+ platform_device_put(ahci_pdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx_ahci_remove(struct platform_device *pdev)
+{
+ struct imx_ahci_priv *imxpriv = platform_get_drvdata(pdev);
+ struct platform_device *ahci_pdev = imxpriv->ahci_pdev;
+
+ platform_device_unregister(ahci_pdev);
+ return 0;
+}
+
+static struct platform_driver imx_ahci_driver = {
+ .probe = imx_ahci_probe,
+ .remove = imx_ahci_remove,
+ .driver = {
+ .name = "ahci-imx",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_ahci_of_match,
+ },
+};
+module_platform_driver(imx_ahci_driver);
+
+MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
+MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ahci:imx");
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index b52a10c8eeb9..513ad7ed0c99 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -330,7 +330,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Wellsburg) */
- { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Wellsburg) */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 83c08907e042..b1e880a3c3da 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -206,8 +206,10 @@ static ssize_t ata_scsi_park_store(struct device *device,
unsigned long flags;
int rc;
- rc = strict_strtol(buf, 10, &input);
- if (rc || input < -2)
+ rc = kstrtol(buf, 10, &input);
+ if (rc)
+ return rc;
+ if (input < -2)
return -EINVAL;
if (input > ATA_TMOUT_MAX_PARK) {
rc = -EOVERFLOW;
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index e45131748248..5c54d957370a 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -6,6 +6,18 @@
*
* This file is released under GPL v2.
*
+ * **** WARNING ****
+ *
+ * This driver never worked properly and unfortunately data corruption is
+ * relatively common. There isn't anyone working on the driver and there's
+ * no support from the vendor. Do not use this driver in any production
+ * environment.
+ *
+ * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
+ * https://bugzilla.kernel.org/show_bug.cgi?id=60565
+ *
+ * *****************
+ *
* This controller is eccentric and easily locks up if something isn't
* right. Documentation is available at initio's website but it only
* documents registers (not programming model).
@@ -807,6 +819,8 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ata_print_version_once(&pdev->dev, DRV_VERSION);
+ dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
+
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index b81ddfea1da0..e07a5fd58ad7 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -532,11 +532,11 @@ config BLK_DEV_RBD
If unsure, say N.
config BLK_DEV_RSXX
- tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver"
+ tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
depends on PCI
help
Device driver for IBM's high speed PCIe SSD
- storage devices: FlashSystem-70 and FlashSystem-80.
+ storage device: Flash Adapter 900GB Full Height.
To compile this driver as a module, choose M here: the
module will be called rsxx.
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 6608076dc39e..28c73ca320a8 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -659,6 +659,27 @@ void drbd_al_shrink(struct drbd_conf *mdev)
wake_up(&mdev->al_wait);
}
+int drbd_initialize_al(struct drbd_conf *mdev, void *buffer)
+{
+ struct al_transaction_on_disk *al = buffer;
+ struct drbd_md *md = &mdev->ldev->md;
+ sector_t al_base = md->md_offset + md->al_offset;
+ int al_size_4k = md->al_stripes * md->al_stripe_size_4k;
+ int i;
+
+ memset(al, 0, 4096);
+ al->magic = cpu_to_be32(DRBD_AL_MAGIC);
+ al->transaction_type = cpu_to_be16(AL_TR_INITIALIZED);
+ al->crc32c = cpu_to_be32(crc32c(0, al, 4096));
+
+ for (i = 0; i < al_size_4k; i++) {
+ int err = drbd_md_sync_page_io(mdev, mdev->ldev, al_base + i * 8, WRITE);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
static int w_update_odbm(struct drbd_work *w, int unused)
{
struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index f943aacfdad8..2d7f608d181c 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -832,6 +832,7 @@ struct drbd_tconn { /* is a resource from the config file */
unsigned susp_nod:1; /* IO suspended because no data */
unsigned susp_fen:1; /* IO suspended because fence peer handler runs */
struct mutex cstate_mutex; /* Protects graceful disconnects */
+ unsigned int connect_cnt; /* Inc each time a connection is established */
unsigned long flags;
struct net_conf *net_conf; /* content protected by rcu */
@@ -1132,6 +1133,7 @@ extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
extern void conn_md_sync(struct drbd_tconn *tconn);
+extern void drbd_md_write(struct drbd_conf *mdev, void *buffer);
extern void drbd_md_sync(struct drbd_conf *mdev);
extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
@@ -1466,8 +1468,16 @@ extern void drbd_suspend_io(struct drbd_conf *mdev);
extern void drbd_resume_io(struct drbd_conf *mdev);
extern char *ppsize(char *buf, unsigned long long size);
extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, sector_t, int);
-enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
-extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
+enum determine_dev_size {
+ DS_ERROR_SHRINK = -3,
+ DS_ERROR_SPACE_MD = -2,
+ DS_ERROR = -1,
+ DS_UNCHANGED = 0,
+ DS_SHRUNK = 1,
+ DS_GREW = 2
+};
+extern enum determine_dev_size
+drbd_determine_dev_size(struct drbd_conf *, enum dds_flags, struct resize_parms *) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *);
extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev);
extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
@@ -1633,6 +1643,7 @@ extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
#define drbd_set_out_of_sync(mdev, sector, size) \
__drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
extern void drbd_al_shrink(struct drbd_conf *mdev);
+extern int drbd_initialize_al(struct drbd_conf *, void *);
/* drbd_nl.c */
/* state info broadcast */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index a5dca6affcbb..55635edf563b 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2762,8 +2762,6 @@ int __init drbd_init(void)
/*
* allocate all necessary structs
*/
- err = -ENOMEM;
-
init_waitqueue_head(&drbd_pp_wait);
drbd_proc = NULL; /* play safe for drbd_cleanup */
@@ -2773,6 +2771,7 @@ int __init drbd_init(void)
if (err)
goto fail;
+ err = -ENOMEM;
drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
if (!drbd_proc) {
printk(KERN_ERR "drbd: unable to register proc file\n");
@@ -2803,7 +2802,6 @@ int __init drbd_init(void)
fail:
drbd_cleanup();
if (err == -ENOMEM)
- /* currently always the case */
printk(KERN_ERR "drbd: ran out of memory\n");
else
printk(KERN_ERR "drbd: initialization failure\n");
@@ -2881,34 +2879,14 @@ struct meta_data_on_disk {
u8 reserved_u8[4096 - (7*8 + 10*4)];
} __packed;
-/**
- * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
- * @mdev: DRBD device.
- */
-void drbd_md_sync(struct drbd_conf *mdev)
+
+
+void drbd_md_write(struct drbd_conf *mdev, void *b)
{
- struct meta_data_on_disk *buffer;
+ struct meta_data_on_disk *buffer = b;
sector_t sector;
int i;
- /* Don't accidentally change the DRBD meta data layout. */
- BUILD_BUG_ON(UI_SIZE != 4);
- BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
-
- del_timer(&mdev->md_sync_timer);
- /* timer may be rearmed by drbd_md_mark_dirty() now. */
- if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
- return;
-
- /* We use here D_FAILED and not D_ATTACHING because we try to write
- * metadata even if we detach due to a disk failure! */
- if (!get_ldev_if_state(mdev, D_FAILED))
- return;
-
- buffer = drbd_md_get_buffer(mdev);
- if (!buffer)
- goto out;
-
memset(buffer, 0, sizeof(*buffer));
buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
@@ -2937,6 +2915,35 @@ void drbd_md_sync(struct drbd_conf *mdev)
dev_err(DEV, "meta data update failed!\n");
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
}
+}
+
+/**
+ * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
+ * @mdev: DRBD device.
+ */
+void drbd_md_sync(struct drbd_conf *mdev)
+{
+ struct meta_data_on_disk *buffer;
+
+ /* Don't accidentally change the DRBD meta data layout. */
+ BUILD_BUG_ON(UI_SIZE != 4);
+ BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
+
+ del_timer(&mdev->md_sync_timer);
+ /* timer may be rearmed by drbd_md_mark_dirty() now. */
+ if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
+ return;
+
+ /* We use here D_FAILED and not D_ATTACHING because we try to write
+ * metadata even if we detach due to a disk failure! */
+ if (!get_ldev_if_state(mdev, D_FAILED))
+ return;
+
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ goto out;
+
+ drbd_md_write(mdev, buffer);
/* Update mdev->ldev->md.la_size_sect,
* since we updated it on metadata. */
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 9e3f441e7e84..8cc1e640f485 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -417,6 +417,7 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
bool conn_try_outdate_peer(struct drbd_tconn *tconn)
{
+ unsigned int connect_cnt;
union drbd_state mask = { };
union drbd_state val = { };
enum drbd_fencing_p fp;
@@ -428,6 +429,10 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
return false;
}
+ spin_lock_irq(&tconn->req_lock);
+ connect_cnt = tconn->connect_cnt;
+ spin_unlock_irq(&tconn->req_lock);
+
fp = highest_fencing_policy(tconn);
switch (fp) {
case FP_NOT_AVAIL:
@@ -492,8 +497,14 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
here, because we might were able to re-establish the connection in the
meantime. */
spin_lock_irq(&tconn->req_lock);
- if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
- _conn_request_state(tconn, mask, val, CS_VERBOSE);
+ if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) {
+ if (tconn->connect_cnt != connect_cnt)
+ /* In case the connection was established and droped
+ while the fence-peer handler was running, ignore it */
+ conn_info(tconn, "Ignoring fence-peer exit code\n");
+ else
+ _conn_request_state(tconn, mask, val, CS_VERBOSE);
+ }
spin_unlock_irq(&tconn->req_lock);
return conn_highest_pdsk(tconn) <= D_OUTDATED;
@@ -816,15 +827,20 @@ void drbd_resume_io(struct drbd_conf *mdev)
* Returns 0 on success, negative return values indicate errors.
* You should call drbd_md_sync() after calling this function.
*/
-enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
+enum determine_dev_size
+drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
sector_t la_size_sect, u_size;
+ struct drbd_md *md = &mdev->ldev->md;
+ u32 prev_al_stripe_size_4k;
+ u32 prev_al_stripes;
sector_t size;
char ppb[10];
+ void *buffer;
int md_moved, la_size_changed;
- enum determine_dev_size rv = unchanged;
+ enum determine_dev_size rv = DS_UNCHANGED;
/* race:
* application request passes inc_ap_bio,
@@ -836,6 +852,11 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
* still lock the act_log to not trigger ASSERTs there.
*/
drbd_suspend_io(mdev);
+ buffer = drbd_md_get_buffer(mdev); /* Lock meta-data IO */
+ if (!buffer) {
+ drbd_resume_io(mdev);
+ return DS_ERROR;
+ }
/* no wait necessary anymore, actually we could assert that */
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
@@ -844,7 +865,17 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
prev_size = mdev->ldev->md.md_size_sect;
la_size_sect = mdev->ldev->md.la_size_sect;
- /* TODO: should only be some assert here, not (re)init... */
+ if (rs) {
+ /* rs is non NULL if we should change the AL layout only */
+
+ prev_al_stripes = md->al_stripes;
+ prev_al_stripe_size_4k = md->al_stripe_size_4k;
+
+ md->al_stripes = rs->al_stripes;
+ md->al_stripe_size_4k = rs->al_stripe_size / 4;
+ md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
+ }
+
drbd_md_set_sector_offsets(mdev, mdev->ldev);
rcu_read_lock();
@@ -852,6 +883,21 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
rcu_read_unlock();
size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
+ if (size < la_size_sect) {
+ if (rs && u_size == 0) {
+ /* Remove "rs &&" later. This check should always be active, but
+ right now the receiver expects the permissive behavior */
+ dev_warn(DEV, "Implicit shrink not allowed. "
+ "Use --size=%llus for explicit shrink.\n",
+ (unsigned long long)size);
+ rv = DS_ERROR_SHRINK;
+ }
+ if (u_size > size)
+ rv = DS_ERROR_SPACE_MD;
+ if (rv != DS_UNCHANGED)
+ goto err_out;
+ }
+
if (drbd_get_capacity(mdev->this_bdev) != size ||
drbd_bm_capacity(mdev) != size) {
int err;
@@ -867,7 +913,7 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
"Leaving size unchanged at size = %lu KB\n",
(unsigned long)size);
}
- rv = dev_size_error;
+ rv = DS_ERROR;
}
/* racy, see comments above. */
drbd_set_my_capacity(mdev, size);
@@ -875,38 +921,57 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
(unsigned long long)size>>1);
}
- if (rv == dev_size_error)
- goto out;
+ if (rv <= DS_ERROR)
+ goto err_out;
la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
|| prev_size != mdev->ldev->md.md_size_sect;
- if (la_size_changed || md_moved) {
- int err;
+ if (la_size_changed || md_moved || rs) {
+ u32 prev_flags;
drbd_al_shrink(mdev); /* All extents inactive. */
+
+ prev_flags = md->flags;
+ md->flags &= ~MDF_PRIMARY_IND;
+ drbd_md_write(mdev, buffer);
+
dev_info(DEV, "Writing the whole bitmap, %s\n",
la_size_changed && md_moved ? "size changed and md moved" :
la_size_changed ? "size changed" : "md moved");
/* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
- err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
- "size changed", BM_LOCKED_MASK);
- if (err) {
- rv = dev_size_error;
- goto out;
- }
- drbd_md_mark_dirty(mdev);
+ drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
+ "size changed", BM_LOCKED_MASK);
+ drbd_initialize_al(mdev, buffer);
+
+ md->flags = prev_flags;
+ drbd_md_write(mdev, buffer);
+
+ if (rs)
+ dev_info(DEV, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
+ md->al_stripes, md->al_stripe_size_4k * 4);
}
if (size > la_size_sect)
- rv = grew;
+ rv = DS_GREW;
if (size < la_size_sect)
- rv = shrunk;
-out:
+ rv = DS_SHRUNK;
+
+ if (0) {
+ err_out:
+ if (rs) {
+ md->al_stripes = prev_al_stripes;
+ md->al_stripe_size_4k = prev_al_stripe_size_4k;
+ md->al_size_4k = (u64)prev_al_stripes * prev_al_stripe_size_4k;
+
+ drbd_md_set_sector_offsets(mdev, mdev->ldev);
+ }
+ }
lc_unlock(mdev->act_log);
wake_up(&mdev->al_wait);
+ drbd_md_put_buffer(mdev);
drbd_resume_io(mdev);
return rv;
@@ -1607,11 +1672,11 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
!drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
set_bit(USE_DEGR_WFC_T, &mdev->flags);
- dd = drbd_determine_dev_size(mdev, 0);
- if (dd == dev_size_error) {
+ dd = drbd_determine_dev_size(mdev, 0, NULL);
+ if (dd <= DS_ERROR) {
retcode = ERR_NOMEM_BITMAP;
goto force_diskless_dec;
- } else if (dd == grew)
+ } else if (dd == DS_GREW)
set_bit(RESYNC_AFTER_NEG, &mdev->flags);
if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
@@ -2305,6 +2370,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
struct drbd_conf *mdev;
enum drbd_ret_code retcode;
enum determine_dev_size dd;
+ bool change_al_layout = false;
enum dds_flags ddsf;
sector_t u_size;
int err;
@@ -2315,31 +2381,33 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR)
goto fail;
+ mdev = adm_ctx.mdev;
+ if (!get_ldev(mdev)) {
+ retcode = ERR_NO_DISK;
+ goto fail;
+ }
+
memset(&rs, 0, sizeof(struct resize_parms));
+ rs.al_stripes = mdev->ldev->md.al_stripes;
+ rs.al_stripe_size = mdev->ldev->md.al_stripe_size_4k * 4;
if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
err = resize_parms_from_attrs(&rs, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(from_attrs_err_to_txt(err));
- goto fail;
+ goto fail_ldev;
}
}
- mdev = adm_ctx.mdev;
if (mdev->state.conn > C_CONNECTED) {
retcode = ERR_RESIZE_RESYNC;
- goto fail;
+ goto fail_ldev;
}
if (mdev->state.role == R_SECONDARY &&
mdev->state.peer == R_SECONDARY) {
retcode = ERR_NO_PRIMARY;
- goto fail;
- }
-
- if (!get_ldev(mdev)) {
- retcode = ERR_NO_DISK;
- goto fail;
+ goto fail_ldev;
}
if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
@@ -2358,6 +2426,28 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
}
}
+ if (mdev->ldev->md.al_stripes != rs.al_stripes ||
+ mdev->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
+ u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
+
+ if (al_size_k > (16 * 1024 * 1024)) {
+ retcode = ERR_MD_LAYOUT_TOO_BIG;
+ goto fail_ldev;
+ }
+
+ if (al_size_k < MD_32kB_SECT/2) {
+ retcode = ERR_MD_LAYOUT_TOO_SMALL;
+ goto fail_ldev;
+ }
+
+ if (mdev->state.conn != C_CONNECTED) {
+ retcode = ERR_MD_LAYOUT_CONNECTED;
+ goto fail_ldev;
+ }
+
+ change_al_layout = true;
+ }
+
if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
@@ -2373,16 +2463,22 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
}
ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
- dd = drbd_determine_dev_size(mdev, ddsf);
+ dd = drbd_determine_dev_size(mdev, ddsf, change_al_layout ? &rs : NULL);
drbd_md_sync(mdev);
put_ldev(mdev);
- if (dd == dev_size_error) {
+ if (dd == DS_ERROR) {
retcode = ERR_NOMEM_BITMAP;
goto fail;
+ } else if (dd == DS_ERROR_SPACE_MD) {
+ retcode = ERR_MD_LAYOUT_NO_FIT;
+ goto fail;
+ } else if (dd == DS_ERROR_SHRINK) {
+ retcode = ERR_IMPLICIT_SHRINK;
+ goto fail;
}
if (mdev->state.conn == C_CONNECTED) {
- if (dd == grew)
+ if (dd == DS_GREW)
set_bit(RESIZE_PENDING, &mdev->flags);
drbd_send_uuids(mdev);
@@ -2658,7 +2754,6 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
const struct sib_info *sib)
{
struct state_info *si = NULL; /* for sizeof(si->member); */
- struct net_conf *nc;
struct nlattr *nla;
int got_ldev;
int err = 0;
@@ -2688,13 +2783,19 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
goto nla_put_failure;
rcu_read_lock();
- if (got_ldev)
- if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
- goto nla_put_failure;
+ if (got_ldev) {
+ struct disk_conf *disk_conf;
- nc = rcu_dereference(mdev->tconn->net_conf);
- if (nc)
- err = net_conf_to_skb(skb, nc, exclude_sensitive);
+ disk_conf = rcu_dereference(mdev->ldev->disk_conf);
+ err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
+ }
+ if (!err) {
+ struct net_conf *nc;
+
+ nc = rcu_dereference(mdev->tconn->net_conf);
+ if (nc)
+ err = net_conf_to_skb(skb, nc, exclude_sensitive);
+ }
rcu_read_unlock();
if (err)
goto nla_put_failure;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 4222affff488..cc29cd3bf78b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1039,6 +1039,8 @@ randomize:
rcu_read_lock();
idr_for_each_entry(&tconn->volumes, mdev, vnr) {
kref_get(&mdev->kref);
+ rcu_read_unlock();
+
/* Prevent a race between resync-handshake and
* being promoted to Primary.
*
@@ -1049,8 +1051,6 @@ randomize:
mutex_lock(mdev->state_mutex);
mutex_unlock(mdev->state_mutex);
- rcu_read_unlock();
-
if (discard_my_data)
set_bit(DISCARD_MY_DATA, &mdev->flags);
else
@@ -3545,7 +3545,7 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
{
struct drbd_conf *mdev;
struct p_sizes *p = pi->data;
- enum determine_dev_size dd = unchanged;
+ enum determine_dev_size dd = DS_UNCHANGED;
sector_t p_size, p_usize, my_usize;
int ldsc = 0; /* local disk size changed */
enum dds_flags ddsf;
@@ -3617,9 +3617,9 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
ddsf = be16_to_cpu(p->dds_flags);
if (get_ldev(mdev)) {
- dd = drbd_determine_dev_size(mdev, ddsf);
+ dd = drbd_determine_dev_size(mdev, ddsf, NULL);
put_ldev(mdev);
- if (dd == dev_size_error)
+ if (dd == DS_ERROR)
return -EIO;
drbd_md_sync(mdev);
} else {
@@ -3647,7 +3647,7 @@ static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
drbd_send_sizes(mdev, 0, ddsf);
}
if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
- (dd == grew && mdev->state.conn == C_CONNECTED)) {
+ (dd == DS_GREW && mdev->state.conn == C_CONNECTED)) {
if (mdev->state.pdsk >= D_INCONSISTENT &&
mdev->state.disk >= D_INCONSISTENT) {
if (ddsf & DDSF_NO_RESYNC)
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index 90c5be2b1d30..216d47b7e88b 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -1115,8 +1115,10 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
drbd_thread_restart_nowait(&mdev->tconn->receiver);
/* Resume AL writing if we get a connection */
- if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
+ if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
drbd_resume_al(mdev);
+ mdev->tconn->connect_cnt++;
+ }
/* remember last attach time so request_timer_fn() won't
* kill newly established sessions while we are still trying to thaw
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 5af21f2db29c..6e85e21445eb 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -31,6 +31,8 @@
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <linux/genhd.h>
#include <linux/idr.h>
@@ -39,8 +41,9 @@
#include "rsxx_cfg.h"
#define NO_LEGACY 0
+#define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */
-MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver");
+MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver");
MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
@@ -49,9 +52,282 @@ static unsigned int force_legacy = NO_LEGACY;
module_param(force_legacy, uint, 0444);
MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
+static unsigned int sync_start = 1;
+module_param(sync_start, uint, 0444);
+MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete "
+ "until the card startup has completed.");
+
static DEFINE_IDA(rsxx_disk_ida);
static DEFINE_SPINLOCK(rsxx_ida_lock);
+/* --------------------Debugfs Setup ------------------- */
+
+struct rsxx_cram {
+ u32 f_pos;
+ u32 offset;
+ void *i_private;
+};
+
+static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p)
+{
+ struct rsxx_cardinfo *card = m->private;
+
+ seq_printf(m, "HWID 0x%08x\n",
+ ioread32(card->regmap + HWID));
+ seq_printf(m, "SCRATCH 0x%08x\n",
+ ioread32(card->regmap + SCRATCH));
+ seq_printf(m, "IER 0x%08x\n",
+ ioread32(card->regmap + IER));
+ seq_printf(m, "IPR 0x%08x\n",
+ ioread32(card->regmap + IPR));
+ seq_printf(m, "CREG_CMD 0x%08x\n",
+ ioread32(card->regmap + CREG_CMD));
+ seq_printf(m, "CREG_ADD 0x%08x\n",
+ ioread32(card->regmap + CREG_ADD));
+ seq_printf(m, "CREG_CNT 0x%08x\n",
+ ioread32(card->regmap + CREG_CNT));
+ seq_printf(m, "CREG_STAT 0x%08x\n",
+ ioread32(card->regmap + CREG_STAT));
+ seq_printf(m, "CREG_DATA0 0x%08x\n",
+ ioread32(card->regmap + CREG_DATA0));
+ seq_printf(m, "CREG_DATA1 0x%08x\n",
+ ioread32(card->regmap + CREG_DATA1));
+ seq_printf(m, "CREG_DATA2 0x%08x\n",
+ ioread32(card->regmap + CREG_DATA2));
+ seq_printf(m, "CREG_DATA3 0x%08x\n",
+ ioread32(card->regmap + CREG_DATA3));
+ seq_printf(m, "CREG_DATA4 0x%08x\n",
+ ioread32(card->regmap + CREG_DATA4));
+ seq_printf(m, "CREG_DATA5 0x%08x\n",
+ ioread32(card->regmap + CREG_DATA5));
+ seq_printf(m, "CREG_DATA6 0x%08x\n",
+ ioread32(card->regmap + CREG_DATA6));
+ seq_printf(m, "CREG_DATA7 0x%08x\n",
+ ioread32(card->regmap + CREG_DATA7));
+ seq_printf(m, "INTR_COAL 0x%08x\n",
+ ioread32(card->regmap + INTR_COAL));
+ seq_printf(m, "HW_ERROR 0x%08x\n",
+ ioread32(card->regmap + HW_ERROR));
+ seq_printf(m, "DEBUG0 0x%08x\n",
+ ioread32(card->regmap + PCI_DEBUG0));
+ seq_printf(m, "DEBUG1 0x%08x\n",
+ ioread32(card->regmap + PCI_DEBUG1));
+ seq_printf(m, "DEBUG2 0x%08x\n",
+ ioread32(card->regmap + PCI_DEBUG2));
+ seq_printf(m, "DEBUG3 0x%08x\n",
+ ioread32(card->regmap + PCI_DEBUG3));
+ seq_printf(m, "DEBUG4 0x%08x\n",
+ ioread32(card->regmap + PCI_DEBUG4));
+ seq_printf(m, "DEBUG5 0x%08x\n",
+ ioread32(card->regmap + PCI_DEBUG5));
+ seq_printf(m, "DEBUG6 0x%08x\n",
+ ioread32(card->regmap + PCI_DEBUG6));
+ seq_printf(m, "DEBUG7 0x%08x\n",
+ ioread32(card->regmap + PCI_DEBUG7));
+ seq_printf(m, "RECONFIG 0x%08x\n",
+ ioread32(card->regmap + PCI_RECONFIG));
+
+ return 0;
+}
+
+static int rsxx_attr_stats_show(struct seq_file *m, void *p)
+{
+ struct rsxx_cardinfo *card = m->private;
+ int i;
+
+ for (i = 0; i < card->n_targets; i++) {
+ seq_printf(m, "Ctrl %d CRC Errors = %d\n",
+ i, card->ctrl[i].stats.crc_errors);
+ seq_printf(m, "Ctrl %d Hard Errors = %d\n",
+ i, card->ctrl[i].stats.hard_errors);
+ seq_printf(m, "Ctrl %d Soft Errors = %d\n",
+ i, card->ctrl[i].stats.soft_errors);
+ seq_printf(m, "Ctrl %d Writes Issued = %d\n",
+ i, card->ctrl[i].stats.writes_issued);
+ seq_printf(m, "Ctrl %d Writes Failed = %d\n",
+ i, card->ctrl[i].stats.writes_failed);
+ seq_printf(m, "Ctrl %d Reads Issued = %d\n",
+ i, card->ctrl[i].stats.reads_issued);
+ seq_printf(m, "Ctrl %d Reads Failed = %d\n",
+ i, card->ctrl[i].stats.reads_failed);
+ seq_printf(m, "Ctrl %d Reads Retried = %d\n",
+ i, card->ctrl[i].stats.reads_retried);
+ seq_printf(m, "Ctrl %d Discards Issued = %d\n",
+ i, card->ctrl[i].stats.discards_issued);
+ seq_printf(m, "Ctrl %d Discards Failed = %d\n",
+ i, card->ctrl[i].stats.discards_failed);
+ seq_printf(m, "Ctrl %d DMA SW Errors = %d\n",
+ i, card->ctrl[i].stats.dma_sw_err);
+ seq_printf(m, "Ctrl %d DMA HW Faults = %d\n",
+ i, card->ctrl[i].stats.dma_hw_fault);
+ seq_printf(m, "Ctrl %d DMAs Cancelled = %d\n",
+ i, card->ctrl[i].stats.dma_cancelled);
+ seq_printf(m, "Ctrl %d SW Queue Depth = %d\n",
+ i, card->ctrl[i].stats.sw_q_depth);
+ seq_printf(m, "Ctrl %d HW Queue Depth = %d\n",
+ i, atomic_read(&card->ctrl[i].stats.hw_q_depth));
+ }
+
+ return 0;
+}
+
+static int rsxx_attr_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rsxx_attr_stats_show, inode->i_private);
+}
+
+static int rsxx_attr_pci_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rsxx_attr_pci_regs_show, inode->i_private);
+}
+
+static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct rsxx_cram *info = fp->private_data;
+ struct rsxx_cardinfo *card = info->i_private;
+ char *buf;
+ int st;
+
+ buf = kzalloc(sizeof(*buf) * cnt, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ info->f_pos = (u32)*ppos + info->offset;
+
+ st = rsxx_creg_read(card, CREG_ADD_CRAM + info->f_pos, cnt, buf, 1);
+ if (st)
+ return st;
+
+ st = copy_to_user(ubuf, buf, cnt);
+ if (st)
+ return st;
+
+ info->offset += cnt;
+
+ kfree(buf);
+
+ return cnt;
+}
+
+static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct rsxx_cram *info = fp->private_data;
+ struct rsxx_cardinfo *card = info->i_private;
+ char *buf;
+ int st;
+
+ buf = kzalloc(sizeof(*buf) * cnt, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ st = copy_from_user(buf, ubuf, cnt);
+ if (st)
+ return st;
+
+ info->f_pos = (u32)*ppos + info->offset;
+
+ st = rsxx_creg_write(card, CREG_ADD_CRAM + info->f_pos, cnt, buf, 1);
+ if (st)
+ return st;
+
+ info->offset += cnt;
+
+ kfree(buf);
+
+ return cnt;
+}
+
+static int rsxx_cram_open(struct inode *inode, struct file *file)
+{
+ struct rsxx_cram *info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->i_private = inode->i_private;
+ info->f_pos = file->f_pos;
+ file->private_data = info;
+
+ return 0;
+}
+
+static int rsxx_cram_release(struct inode *inode, struct file *file)
+{
+ struct rsxx_cram *info = file->private_data;
+
+ if (!info)
+ return 0;
+
+ kfree(info);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations debugfs_cram_fops = {
+ .owner = THIS_MODULE,
+ .open = rsxx_cram_open,
+ .read = rsxx_cram_read,
+ .write = rsxx_cram_write,
+ .release = rsxx_cram_release,
+};
+
+static const struct file_operations debugfs_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = rsxx_attr_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debugfs_pci_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = rsxx_attr_pci_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card)
+{
+ struct dentry *debugfs_stats;
+ struct dentry *debugfs_pci_regs;
+ struct dentry *debugfs_cram;
+
+ card->debugfs_dir = debugfs_create_dir(card->gendisk->disk_name, NULL);
+ if (IS_ERR_OR_NULL(card->debugfs_dir))
+ goto failed_debugfs_dir;
+
+ debugfs_stats = debugfs_create_file("stats", S_IRUGO,
+ card->debugfs_dir, card,
+ &debugfs_stats_fops);
+ if (IS_ERR_OR_NULL(debugfs_stats))
+ goto failed_debugfs_stats;
+
+ debugfs_pci_regs = debugfs_create_file("pci_regs", S_IRUGO,
+ card->debugfs_dir, card,
+ &debugfs_pci_regs_fops);
+ if (IS_ERR_OR_NULL(debugfs_pci_regs))
+ goto failed_debugfs_pci_regs;
+
+ debugfs_cram = debugfs_create_file("cram", S_IRUGO | S_IWUSR,
+ card->debugfs_dir, card,
+ &debugfs_cram_fops);
+ if (IS_ERR_OR_NULL(debugfs_cram))
+ goto failed_debugfs_cram;
+
+ return;
+failed_debugfs_cram:
+ debugfs_remove(debugfs_pci_regs);
+failed_debugfs_pci_regs:
+ debugfs_remove(debugfs_stats);
+failed_debugfs_stats:
+ debugfs_remove(card->debugfs_dir);
+failed_debugfs_dir:
+ card->debugfs_dir = NULL;
+}
+
/*----------------- Interrupt Control & Handling -------------------*/
static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
@@ -163,12 +439,13 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
}
if (isr & CR_INTR_CREG) {
- schedule_work(&card->creg_ctrl.done_work);
+ queue_work(card->creg_ctrl.creg_wq,
+ &card->creg_ctrl.done_work);
handled++;
}
if (isr & CR_INTR_EVENT) {
- schedule_work(&card->event_work);
+ queue_work(card->event_wq, &card->event_work);
rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
handled++;
}
@@ -329,7 +606,7 @@ static int rsxx_eeh_frozen(struct pci_dev *dev)
int i;
int st;
- dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n");
+ dev_warn(&dev->dev, "IBM Flash Adapter PCI: preparing for slot reset.\n");
card->eeh_state = 1;
rsxx_mask_interrupts(card);
@@ -367,15 +644,26 @@ static void rsxx_eeh_failure(struct pci_dev *dev)
{
struct rsxx_cardinfo *card = pci_get_drvdata(dev);
int i;
+ int cnt = 0;
- dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n");
+ dev_err(&dev->dev, "IBM Flash Adapter PCI: disabling failed card.\n");
card->eeh_state = 1;
+ card->halt = 1;
- for (i = 0; i < card->n_targets; i++)
- del_timer_sync(&card->ctrl[i].activity_timer);
+ for (i = 0; i < card->n_targets; i++) {
+ spin_lock_bh(&card->ctrl[i].queue_lock);
+ cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
+ &card->ctrl[i].queue);
+ spin_unlock_bh(&card->ctrl[i].queue_lock);
+
+ cnt += rsxx_dma_cancel(&card->ctrl[i]);
- rsxx_eeh_cancel_dmas(card);
+ if (cnt)
+ dev_info(CARD_TO_DEV(card),
+ "Freed %d queued DMAs on channel %d\n",
+ cnt, card->ctrl[i].id);
+ }
}
static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
@@ -432,7 +720,7 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
int st;
dev_warn(&dev->dev,
- "IBM FlashSystem PCI: recovering from slot reset.\n");
+ "IBM Flash Adapter PCI: recovering from slot reset.\n");
st = pci_enable_device(dev);
if (st)
@@ -485,7 +773,7 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
&card->ctrl[i].issue_dma_work);
}
- dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n");
+ dev_info(&dev->dev, "IBM Flash Adapter PCI: recovery complete.\n");
return PCI_ERS_RESULT_RECOVERED;
@@ -528,6 +816,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
{
struct rsxx_cardinfo *card;
int st;
+ unsigned int sync_timeout;
dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
@@ -610,7 +899,11 @@ static int rsxx_pci_probe(struct pci_dev *dev,
}
/************* Setup Processor Command Interface *************/
- rsxx_creg_setup(card);
+ st = rsxx_creg_setup(card);
+ if (st) {
+ dev_err(CARD_TO_DEV(card), "Failed to setup creg interface.\n");
+ goto failed_creg_setup;
+ }
spin_lock_irq(&card->irq_lock);
rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
@@ -650,6 +943,12 @@ static int rsxx_pci_probe(struct pci_dev *dev,
}
/************* Setup Card Event Handler *************/
+ card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
+ if (!card->event_wq) {
+ dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
+ goto failed_event_handler;
+ }
+
INIT_WORK(&card->event_work, card_event_handler);
st = rsxx_setup_dev(card);
@@ -676,6 +975,33 @@ static int rsxx_pci_probe(struct pci_dev *dev,
if (st)
dev_crit(CARD_TO_DEV(card),
"Failed issuing card startup\n");
+ if (sync_start) {
+ sync_timeout = SYNC_START_TIMEOUT;
+
+ dev_info(CARD_TO_DEV(card),
+ "Waiting for card to startup\n");
+
+ do {
+ ssleep(1);
+ sync_timeout--;
+
+ rsxx_get_card_state(card, &card->state);
+ } while (sync_timeout &&
+ (card->state == CARD_STATE_STARTING));
+
+ if (card->state == CARD_STATE_STARTING) {
+ dev_warn(CARD_TO_DEV(card),
+ "Card startup timed out\n");
+ card->size8 = 0;
+ } else {
+ dev_info(CARD_TO_DEV(card),
+ "card state: %s\n",
+ rsxx_card_state_to_str(card->state));
+ st = rsxx_get_card_size8(card, &card->size8);
+ if (st)
+ card->size8 = 0;
+ }
+ }
} else if (card->state == CARD_STATE_GOOD ||
card->state == CARD_STATE_RD_ONLY_FAULT) {
st = rsxx_get_card_size8(card, &card->size8);
@@ -685,12 +1011,21 @@ static int rsxx_pci_probe(struct pci_dev *dev,
rsxx_attach_dev(card);
+ /************* Setup Debugfs *************/
+ rsxx_debugfs_dev_new(card);
+
return 0;
failed_create_dev:
+ destroy_workqueue(card->event_wq);
+ card->event_wq = NULL;
+failed_event_handler:
rsxx_dma_destroy(card);
failed_dma_setup:
failed_compatiblity_check:
+ destroy_workqueue(card->creg_ctrl.creg_wq);
+ card->creg_ctrl.creg_wq = NULL;
+failed_creg_setup:
spin_lock_irq(&card->irq_lock);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
spin_unlock_irq(&card->irq_lock);
@@ -756,6 +1091,8 @@ static void rsxx_pci_remove(struct pci_dev *dev)
/* Prevent work_structs from re-queuing themselves. */
card->halt = 1;
+ debugfs_remove_recursive(card->debugfs_dir);
+
free_irq(dev->irq, card);
if (!force_legacy)
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
index 4b5c020a0a65..926dce9c452f 100644
--- a/drivers/block/rsxx/cregs.c
+++ b/drivers/block/rsxx/cregs.c
@@ -431,6 +431,15 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card,
*hw_stat = completion.creg_status;
if (completion.st) {
+ /*
+ * This read is needed to verify that there has not been any
+ * extreme errors that might have occurred, i.e. EEH. The
+ * function iowrite32 will not detect EEH errors, so it is
+ * necessary that we recover if such an error is the reason
+ * for the timeout. This is a dummy read.
+ */
+ ioread32(card->regmap + SCRATCH);
+
dev_warn(CARD_TO_DEV(card),
"creg command failed(%d x%08x)\n",
completion.st, addr);
@@ -727,6 +736,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card)
{
card->creg_ctrl.active_cmd = NULL;
+ card->creg_ctrl.creg_wq =
+ create_singlethread_workqueue(DRIVER_NAME"_creg");
+ if (!card->creg_ctrl.creg_wq)
+ return -ENOMEM;
+
INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
mutex_init(&card->creg_ctrl.reset_lock);
INIT_LIST_HEAD(&card->creg_ctrl.queue);
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 4346d17d2949..d7af441880be 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -155,7 +155,8 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
atomic_set(&meta->error, 1);
if (atomic_dec_and_test(&meta->pending_dmas)) {
- disk_stats_complete(card, meta->bio, meta->start_time);
+ if (!card->eeh_state && card->gendisk)
+ disk_stats_complete(card, meta->bio, meta->start_time);
bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0);
kmem_cache_free(bio_meta_pool, meta);
@@ -170,6 +171,12 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
might_sleep();
+ if (!card)
+ goto req_err;
+
+ if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk))
+ goto req_err;
+
if (unlikely(card->halt)) {
st = -EFAULT;
goto req_err;
@@ -196,7 +203,8 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
atomic_set(&bio_meta->pending_dmas, 0);
bio_meta->start_time = jiffies;
- disk_stats_start(card, bio);
+ if (!unlikely(card->halt))
+ disk_stats_start(card, bio);
dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
bio_data_dir(bio) ? 'W' : 'R', bio_meta,
@@ -225,24 +233,6 @@ static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
return (pci_rev >= RSXX_DISCARD_SUPPORT);
}
-static unsigned short rsxx_get_logical_block_size(
- struct rsxx_cardinfo *card)
-{
- u32 capabilities = 0;
- int st;
-
- st = rsxx_get_card_capabilities(card, &capabilities);
- if (st)
- dev_warn(CARD_TO_DEV(card),
- "Failed reading card capabilities register\n");
-
- /* Earlier firmware did not have support for 512 byte accesses */
- if (capabilities & CARD_CAP_SUBPAGE_WRITES)
- return 512;
- else
- return RSXX_HW_BLK_SIZE;
-}
-
int rsxx_attach_dev(struct rsxx_cardinfo *card)
{
mutex_lock(&card->dev_lock);
@@ -305,7 +295,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
return -ENOMEM;
}
- blk_size = rsxx_get_logical_block_size(card);
+ blk_size = card->config.data.block_size;
blk_queue_make_request(card->queue, rsxx_make_request);
blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
@@ -347,6 +337,7 @@ void rsxx_destroy_dev(struct rsxx_cardinfo *card)
card->gendisk = NULL;
blk_cleanup_queue(card->queue);
+ card->queue->queuedata = NULL;
unregister_blkdev(card->major, DRIVER_NAME);
}
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 0607513cfb41..bed32f16b084 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -245,6 +245,22 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
kmem_cache_free(rsxx_dma_pool, dma);
}
+int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
+ struct list_head *q)
+{
+ struct rsxx_dma *dma;
+ struct rsxx_dma *tmp;
+ int cnt = 0;
+
+ list_for_each_entry_safe(dma, tmp, q, list) {
+ list_del(&dma->list);
+ rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+ cnt++;
+ }
+
+ return cnt;
+}
+
static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
struct rsxx_dma *dma)
{
@@ -252,9 +268,10 @@ static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
* Requeued DMAs go to the front of the queue so they are issued
* first.
*/
- spin_lock(&ctrl->queue_lock);
+ spin_lock_bh(&ctrl->queue_lock);
+ ctrl->stats.sw_q_depth++;
list_add(&dma->list, &ctrl->queue);
- spin_unlock(&ctrl->queue_lock);
+ spin_unlock_bh(&ctrl->queue_lock);
}
static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
@@ -329,6 +346,7 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
static void dma_engine_stalled(unsigned long data)
{
struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
+ int cnt;
if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
unlikely(ctrl->card->eeh_state))
@@ -349,18 +367,28 @@ static void dma_engine_stalled(unsigned long data)
"DMA channel %d has stalled, faulting interface.\n",
ctrl->id);
ctrl->card->dma_fault = 1;
+
+ /* Clean up the DMA queue */
+ spin_lock(&ctrl->queue_lock);
+ cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
+ spin_unlock(&ctrl->queue_lock);
+
+ cnt += rsxx_dma_cancel(ctrl);
+
+ if (cnt)
+ dev_info(CARD_TO_DEV(ctrl->card),
+ "Freed %d queued DMAs on channel %d\n",
+ cnt, ctrl->id);
}
}
-static void rsxx_issue_dmas(struct work_struct *work)
+static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
{
- struct rsxx_dma_ctrl *ctrl;
struct rsxx_dma *dma;
int tag;
int cmds_pending = 0;
struct hw_cmd *hw_cmd_buf;
- ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
hw_cmd_buf = ctrl->cmd.buf;
if (unlikely(ctrl->card->halt) ||
@@ -368,22 +396,22 @@ static void rsxx_issue_dmas(struct work_struct *work)
return;
while (1) {
- spin_lock(&ctrl->queue_lock);
+ spin_lock_bh(&ctrl->queue_lock);
if (list_empty(&ctrl->queue)) {
- spin_unlock(&ctrl->queue_lock);
+ spin_unlock_bh(&ctrl->queue_lock);
break;
}
- spin_unlock(&ctrl->queue_lock);
+ spin_unlock_bh(&ctrl->queue_lock);
tag = pop_tracker(ctrl->trackers);
if (tag == -1)
break;
- spin_lock(&ctrl->queue_lock);
+ spin_lock_bh(&ctrl->queue_lock);
dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
list_del(&dma->list);
ctrl->stats.sw_q_depth--;
- spin_unlock(&ctrl->queue_lock);
+ spin_unlock_bh(&ctrl->queue_lock);
/*
* This will catch any DMAs that slipped in right before the
@@ -440,9 +468,8 @@ static void rsxx_issue_dmas(struct work_struct *work)
}
}
-static void rsxx_dma_done(struct work_struct *work)
+static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl)
{
- struct rsxx_dma_ctrl *ctrl;
struct rsxx_dma *dma;
unsigned long flags;
u16 count;
@@ -450,7 +477,6 @@ static void rsxx_dma_done(struct work_struct *work)
u8 tag;
struct hw_status *hw_st_buf;
- ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
hw_st_buf = ctrl->status.buf;
if (unlikely(ctrl->card->halt) ||
@@ -520,33 +546,32 @@ static void rsxx_dma_done(struct work_struct *work)
rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
- spin_lock(&ctrl->queue_lock);
+ spin_lock_bh(&ctrl->queue_lock);
if (ctrl->stats.sw_q_depth)
queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
- spin_unlock(&ctrl->queue_lock);
+ spin_unlock_bh(&ctrl->queue_lock);
}
-static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card,
- struct list_head *q)
+static void rsxx_schedule_issue(struct work_struct *work)
{
- struct rsxx_dma *dma;
- struct rsxx_dma *tmp;
- int cnt = 0;
+ struct rsxx_dma_ctrl *ctrl;
- list_for_each_entry_safe(dma, tmp, q, list) {
- list_del(&dma->list);
+ ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
- if (dma->dma_addr)
- pci_unmap_page(card->dev, dma->dma_addr,
- get_dma_size(dma),
- (dma->cmd == HW_CMD_BLK_WRITE) ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- kmem_cache_free(rsxx_dma_pool, dma);
- cnt++;
- }
+ mutex_lock(&ctrl->work_lock);
+ rsxx_issue_dmas(ctrl);
+ mutex_unlock(&ctrl->work_lock);
+}
- return cnt;
+static void rsxx_schedule_done(struct work_struct *work)
+{
+ struct rsxx_dma_ctrl *ctrl;
+
+ ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
+
+ mutex_lock(&ctrl->work_lock);
+ rsxx_dma_done(ctrl);
+ mutex_unlock(&ctrl->work_lock);
}
static int rsxx_queue_discard(struct rsxx_cardinfo *card,
@@ -698,10 +723,10 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
for (i = 0; i < card->n_targets; i++) {
if (!list_empty(&dma_list[i])) {
- spin_lock(&card->ctrl[i].queue_lock);
+ spin_lock_bh(&card->ctrl[i].queue_lock);
card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
- spin_unlock(&card->ctrl[i].queue_lock);
+ spin_unlock_bh(&card->ctrl[i].queue_lock);
queue_work(card->ctrl[i].issue_wq,
&card->ctrl[i].issue_dma_work);
@@ -711,8 +736,11 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
return 0;
bvec_err:
- for (i = 0; i < card->n_targets; i++)
- rsxx_cleanup_dma_queue(card, &dma_list[i]);
+ for (i = 0; i < card->n_targets; i++) {
+ spin_lock_bh(&card->ctrl[i].queue_lock);
+ rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]);
+ spin_unlock_bh(&card->ctrl[i].queue_lock);
+ }
return st;
}
@@ -780,6 +808,7 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
spin_lock_init(&ctrl->trackers->lock);
spin_lock_init(&ctrl->queue_lock);
+ mutex_init(&ctrl->work_lock);
INIT_LIST_HEAD(&ctrl->queue);
setup_timer(&ctrl->activity_timer, dma_engine_stalled,
@@ -793,8 +822,8 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
if (!ctrl->done_wq)
return -ENOMEM;
- INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
- INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
+ INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue);
+ INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done);
st = rsxx_hw_buffers_init(dev, ctrl);
if (st)
@@ -918,13 +947,30 @@ failed_dma_setup:
return st;
}
+int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl)
+{
+ struct rsxx_dma *dma;
+ int i;
+ int cnt = 0;
+
+ /* Clean up issued DMAs */
+ for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
+ dma = get_tracker_dma(ctrl->trackers, i);
+ if (dma) {
+ atomic_dec(&ctrl->stats.hw_q_depth);
+ rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
+ push_tracker(ctrl->trackers, i);
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
void rsxx_dma_destroy(struct rsxx_cardinfo *card)
{
struct rsxx_dma_ctrl *ctrl;
- struct rsxx_dma *dma;
- int i, j;
- int cnt = 0;
+ int i;
for (i = 0; i < card->n_targets; i++) {
ctrl = &card->ctrl[i];
@@ -943,33 +989,11 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
del_timer_sync(&ctrl->activity_timer);
/* Clean up the DMA queue */
- spin_lock(&ctrl->queue_lock);
- cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue);
- spin_unlock(&ctrl->queue_lock);
-
- if (cnt)
- dev_info(CARD_TO_DEV(card),
- "Freed %d queued DMAs on channel %d\n",
- cnt, i);
-
- /* Clean up issued DMAs */
- for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
- dma = get_tracker_dma(ctrl->trackers, j);
- if (dma) {
- pci_unmap_page(card->dev, dma->dma_addr,
- get_dma_size(dma),
- (dma->cmd == HW_CMD_BLK_WRITE) ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
- kmem_cache_free(rsxx_dma_pool, dma);
- cnt++;
- }
- }
+ spin_lock_bh(&ctrl->queue_lock);
+ rsxx_cleanup_dma_queue(ctrl, &ctrl->queue);
+ spin_unlock_bh(&ctrl->queue_lock);
- if (cnt)
- dev_info(CARD_TO_DEV(card),
- "Freed %d pending DMAs on channel %d\n",
- cnt, i);
+ rsxx_dma_cancel(ctrl);
vfree(ctrl->trackers);
@@ -1013,7 +1037,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
cnt++;
}
- spin_lock(&card->ctrl[i].queue_lock);
+ spin_lock_bh(&card->ctrl[i].queue_lock);
list_splice(&issued_dmas[i], &card->ctrl[i].queue);
atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
@@ -1028,7 +1052,7 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
PCI_DMA_TODEVICE :
PCI_DMA_FROMDEVICE);
}
- spin_unlock(&card->ctrl[i].queue_lock);
+ spin_unlock_bh(&card->ctrl[i].queue_lock);
}
kfree(issued_dmas);
@@ -1036,30 +1060,13 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
return 0;
}
-void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card)
-{
- struct rsxx_dma *dma;
- struct rsxx_dma *tmp;
- int i;
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock(&card->ctrl[i].queue_lock);
- list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) {
- list_del(&dma->list);
-
- rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED);
- }
- spin_unlock(&card->ctrl[i].queue_lock);
- }
-}
-
int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
{
struct rsxx_dma *dma;
int i;
for (i = 0; i < card->n_targets; i++) {
- spin_lock(&card->ctrl[i].queue_lock);
+ spin_lock_bh(&card->ctrl[i].queue_lock);
list_for_each_entry(dma, &card->ctrl[i].queue, list) {
dma->dma_addr = pci_map_page(card->dev, dma->page,
dma->pg_off, get_dma_size(dma),
@@ -1067,12 +1074,12 @@ int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
PCI_DMA_TODEVICE :
PCI_DMA_FROMDEVICE);
if (!dma->dma_addr) {
- spin_unlock(&card->ctrl[i].queue_lock);
+ spin_unlock_bh(&card->ctrl[i].queue_lock);
kmem_cache_free(rsxx_dma_pool, dma);
return -ENOMEM;
}
}
- spin_unlock(&card->ctrl[i].queue_lock);
+ spin_unlock_bh(&card->ctrl[i].queue_lock);
}
return 0;
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index 382e8bf5c03b..5ad5055a4104 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -39,6 +39,7 @@
#include <linux/vmalloc.h>
#include <linux/timer.h>
#include <linux/ioctl.h>
+#include <linux/delay.h>
#include "rsxx.h"
#include "rsxx_cfg.h"
@@ -114,6 +115,7 @@ struct rsxx_dma_ctrl {
struct timer_list activity_timer;
struct dma_tracker_list *trackers;
struct rsxx_dma_stats stats;
+ struct mutex work_lock;
};
struct rsxx_cardinfo {
@@ -134,6 +136,7 @@ struct rsxx_cardinfo {
spinlock_t lock;
bool active;
struct creg_cmd *active_cmd;
+ struct workqueue_struct *creg_wq;
struct work_struct done_work;
struct list_head queue;
unsigned int q_depth;
@@ -154,6 +157,7 @@ struct rsxx_cardinfo {
int buf_len;
} log;
+ struct workqueue_struct *event_wq;
struct work_struct event_work;
unsigned int state;
u64 size8;
@@ -181,6 +185,8 @@ struct rsxx_cardinfo {
int n_targets;
struct rsxx_dma_ctrl *ctrl;
+
+ struct dentry *debugfs_dir;
};
enum rsxx_pci_regmap {
@@ -283,6 +289,7 @@ enum rsxx_creg_addr {
CREG_ADD_CAPABILITIES = 0x80001050,
CREG_ADD_LOG = 0x80002000,
CREG_ADD_NUM_TARGETS = 0x80003000,
+ CREG_ADD_CRAM = 0xA0000000,
CREG_ADD_CONFIG = 0xB0000000,
};
@@ -372,6 +379,8 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
int rsxx_dma_setup(struct rsxx_cardinfo *card);
void rsxx_dma_destroy(struct rsxx_cardinfo *card);
int rsxx_dma_init(void);
+int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q);
+int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
void rsxx_dma_cleanup(void);
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
int rsxx_dma_configure(struct rsxx_cardinfo *card);
@@ -382,7 +391,6 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
void *cb_data);
int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
-void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card);
int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
/***** cregs.c *****/
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index dd5b2fed97e9..bf4b9d282c04 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -50,110 +50,118 @@
#include "common.h"
/*
- * These are rather arbitrary. They are fairly large because adjacent requests
- * pulled from a communication ring are quite likely to end up being part of
- * the same scatter/gather request at the disc.
+ * Maximum number of unused free pages to keep in the internal buffer.
+ * Setting this to a value too low will reduce memory used in each backend,
+ * but can have a performance penalty.
*
- * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
- *
- * This will increase the chances of being able to write whole tracks.
- * 64 should be enough to keep us competitive with Linux.
+ * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
+ * be set to a lower value that might degrade performance on some intensive
+ * IO workloads.
*/
-static int xen_blkif_reqs = 64;
-module_param_named(reqs, xen_blkif_reqs, int, 0);
-MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
-/* Run-time switchable: /sys/module/blkback/parameters/ */
-static unsigned int log_stats;
-module_param(log_stats, int, 0644);
+static int xen_blkif_max_buffer_pages = 1024;
+module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
+MODULE_PARM_DESC(max_buffer_pages,
+"Maximum number of free pages to keep in each block backend buffer");
/*
- * Each outstanding request that we've passed to the lower device layers has a
- * 'pending_req' allocated to it. Each buffer_head that completes decrements
- * the pendcnt towards zero. When it hits zero, the specified domain has a
- * response queued for it, with the saved 'id' passed back.
+ * Maximum number of grants to map persistently in blkback. For maximum
+ * performance this should be the total numbers of grants that can be used
+ * to fill the ring, but since this might become too high, specially with
+ * the use of indirect descriptors, we set it to a value that provides good
+ * performance without using too much memory.
+ *
+ * When the list of persistent grants is full we clean it up using a LRU
+ * algorithm.
*/
-struct pending_req {
- struct xen_blkif *blkif;
- u64 id;
- int nr_pages;
- atomic_t pendcnt;
- unsigned short operation;
- int status;
- struct list_head free_list;
- DECLARE_BITMAP(unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-};
-#define BLKBACK_INVALID_HANDLE (~0)
+static int xen_blkif_max_pgrants = 1056;
+module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
+MODULE_PARM_DESC(max_persistent_grants,
+ "Maximum number of grants to map persistently");
-struct xen_blkbk {
- struct pending_req *pending_reqs;
- /* List of all 'pending_req' available */
- struct list_head pending_free;
- /* And its spinlock. */
- spinlock_t pending_free_lock;
- wait_queue_head_t pending_free_wq;
- /* The list of all pages that are available. */
- struct page **pending_pages;
- /* And the grant handles that are available. */
- grant_handle_t *pending_grant_handles;
-};
-
-static struct xen_blkbk *blkbk;
+/*
+ * The LRU mechanism to clean the lists of persistent grants needs to
+ * be executed periodically. The time interval between consecutive executions
+ * of the purge mechanism is set in ms.
+ */
+#define LRU_INTERVAL 100
/*
- * Maximum number of grant pages that can be mapped in blkback.
- * BLKIF_MAX_SEGMENTS_PER_REQUEST * RING_SIZE is the maximum number of
- * pages that blkback will persistently map.
- * Currently, this is:
- * RING_SIZE = 32 (for all known ring types)
- * BLKIF_MAX_SEGMENTS_PER_REQUEST = 11
- * sizeof(struct persistent_gnt) = 48
- * So the maximum memory used to store the grants is:
- * 32 * 11 * 48 = 16896 bytes
+ * When the persistent grants list is full we will remove unused grants
+ * from the list. The percent number of grants to be removed at each LRU
+ * execution.
*/
-static inline unsigned int max_mapped_grant_pages(enum blkif_protocol protocol)
+#define LRU_PERCENT_CLEAN 5
+
+/* Run-time switchable: /sys/module/blkback/parameters/ */
+static unsigned int log_stats;
+module_param(log_stats, int, 0644);
+
+#define BLKBACK_INVALID_HANDLE (~0)
+
+/* Number of free pages to remove on each call to free_xenballooned_pages */
+#define NUM_BATCH_FREE_PAGES 10
+
+static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
{
- switch (protocol) {
- case BLKIF_PROTOCOL_NATIVE:
- return __CONST_RING_SIZE(blkif, PAGE_SIZE) *
- BLKIF_MAX_SEGMENTS_PER_REQUEST;
- case BLKIF_PROTOCOL_X86_32:
- return __CONST_RING_SIZE(blkif_x86_32, PAGE_SIZE) *
- BLKIF_MAX_SEGMENTS_PER_REQUEST;
- case BLKIF_PROTOCOL_X86_64:
- return __CONST_RING_SIZE(blkif_x86_64, PAGE_SIZE) *
- BLKIF_MAX_SEGMENTS_PER_REQUEST;
- default:
- BUG();
+ unsigned long flags;
+
+ spin_lock_irqsave(&blkif->free_pages_lock, flags);
+ if (list_empty(&blkif->free_pages)) {
+ BUG_ON(blkif->free_pages_num != 0);
+ spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
+ return alloc_xenballooned_pages(1, page, false);
}
+ BUG_ON(blkif->free_pages_num == 0);
+ page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
+ list_del(&page[0]->lru);
+ blkif->free_pages_num--;
+ spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
+
return 0;
}
-
-/*
- * Little helpful macro to figure out the index and virtual address of the
- * pending_pages[..]. For each 'pending_req' we have have up to
- * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
- * 10 and would index in the pending_pages[..].
- */
-static inline int vaddr_pagenr(struct pending_req *req, int seg)
+static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
+ int num)
{
- return (req - blkbk->pending_reqs) *
- BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
-}
+ unsigned long flags;
+ int i;
-#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
+ spin_lock_irqsave(&blkif->free_pages_lock, flags);
+ for (i = 0; i < num; i++)
+ list_add(&page[i]->lru, &blkif->free_pages);
+ blkif->free_pages_num += num;
+ spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
+}
-static inline unsigned long vaddr(struct pending_req *req, int seg)
+static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
{
- unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
- return (unsigned long)pfn_to_kaddr(pfn);
-}
+ /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
+ struct page *page[NUM_BATCH_FREE_PAGES];
+ unsigned int num_pages = 0;
+ unsigned long flags;
-#define pending_handle(_req, _seg) \
- (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
+ spin_lock_irqsave(&blkif->free_pages_lock, flags);
+ while (blkif->free_pages_num > num) {
+ BUG_ON(list_empty(&blkif->free_pages));
+ page[num_pages] = list_first_entry(&blkif->free_pages,
+ struct page, lru);
+ list_del(&page[num_pages]->lru);
+ blkif->free_pages_num--;
+ if (++num_pages == NUM_BATCH_FREE_PAGES) {
+ spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
+ free_xenballooned_pages(num_pages, page);
+ spin_lock_irqsave(&blkif->free_pages_lock, flags);
+ num_pages = 0;
+ }
+ }
+ spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
+ if (num_pages != 0)
+ free_xenballooned_pages(num_pages, page);
+}
+#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
static int do_block_io_op(struct xen_blkif *blkif);
static int dispatch_rw_block_io(struct xen_blkif *blkif,
@@ -170,13 +178,29 @@ static void make_response(struct xen_blkif *blkif, u64 id,
(n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
-static void add_persistent_gnt(struct rb_root *root,
+/*
+ * We don't need locking around the persistent grant helpers
+ * because blkback uses a single-thread for each backed, so we
+ * can be sure that this functions will never be called recursively.
+ *
+ * The only exception to that is put_persistent_grant, that can be called
+ * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
+ * bit operations to modify the flags of a persistent grant and to count
+ * the number of used grants.
+ */
+static int add_persistent_gnt(struct xen_blkif *blkif,
struct persistent_gnt *persistent_gnt)
{
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct rb_node **new = NULL, *parent = NULL;
struct persistent_gnt *this;
+ if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
+ if (!blkif->vbd.overflow_max_grants)
+ blkif->vbd.overflow_max_grants = 1;
+ return -EBUSY;
+ }
/* Figure out where to put new node */
+ new = &blkif->persistent_gnts.rb_node;
while (*new) {
this = container_of(*new, struct persistent_gnt, node);
@@ -186,22 +210,28 @@ static void add_persistent_gnt(struct rb_root *root,
else if (persistent_gnt->gnt > this->gnt)
new = &((*new)->rb_right);
else {
- pr_alert(DRV_PFX " trying to add a gref that's already in the tree\n");
- BUG();
+ pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
+ return -EINVAL;
}
}
+ bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
+ set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
/* Add new node and rebalance tree. */
rb_link_node(&(persistent_gnt->node), parent, new);
- rb_insert_color(&(persistent_gnt->node), root);
+ rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
+ blkif->persistent_gnt_c++;
+ atomic_inc(&blkif->persistent_gnt_in_use);
+ return 0;
}
-static struct persistent_gnt *get_persistent_gnt(struct rb_root *root,
+static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
grant_ref_t gref)
{
struct persistent_gnt *data;
- struct rb_node *node = root->rb_node;
+ struct rb_node *node = NULL;
+ node = blkif->persistent_gnts.rb_node;
while (node) {
data = container_of(node, struct persistent_gnt, node);
@@ -209,13 +239,31 @@ static struct persistent_gnt *get_persistent_gnt(struct rb_root *root,
node = node->rb_left;
else if (gref > data->gnt)
node = node->rb_right;
- else
+ else {
+ if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
+ pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
+ return NULL;
+ }
+ set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
+ atomic_inc(&blkif->persistent_gnt_in_use);
return data;
+ }
}
return NULL;
}
-static void free_persistent_gnts(struct rb_root *root, unsigned int num)
+static void put_persistent_gnt(struct xen_blkif *blkif,
+ struct persistent_gnt *persistent_gnt)
+{
+ if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
+ pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
+ set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
+ clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
+ atomic_dec(&blkif->persistent_gnt_in_use);
+}
+
+static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
+ unsigned int num)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -240,7 +288,7 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
ret = gnttab_unmap_refs(unmap, NULL, pages,
segs_to_unmap);
BUG_ON(ret);
- free_xenballooned_pages(segs_to_unmap, pages);
+ put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
}
@@ -251,21 +299,148 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
BUG_ON(num != 0);
}
+static void unmap_purged_grants(struct work_struct *work)
+{
+ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct persistent_gnt *persistent_gnt;
+ int ret, segs_to_unmap = 0;
+ struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
+
+ while(!list_empty(&blkif->persistent_purge_list)) {
+ persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
+ struct persistent_gnt,
+ remove_node);
+ list_del(&persistent_gnt->remove_node);
+
+ gnttab_set_unmap_op(&unmap[segs_to_unmap],
+ vaddr(persistent_gnt->page),
+ GNTMAP_host_map,
+ persistent_gnt->handle);
+
+ pages[segs_to_unmap] = persistent_gnt->page;
+
+ if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+ ret = gnttab_unmap_refs(unmap, NULL, pages,
+ segs_to_unmap);
+ BUG_ON(ret);
+ put_free_pages(blkif, pages, segs_to_unmap);
+ segs_to_unmap = 0;
+ }
+ kfree(persistent_gnt);
+ }
+ if (segs_to_unmap > 0) {
+ ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
+ BUG_ON(ret);
+ put_free_pages(blkif, pages, segs_to_unmap);
+ }
+}
+
+static void purge_persistent_gnt(struct xen_blkif *blkif)
+{
+ struct persistent_gnt *persistent_gnt;
+ struct rb_node *n;
+ unsigned int num_clean, total;
+ bool scan_used = false, clean_used = false;
+ struct rb_root *root;
+
+ if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
+ (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
+ !blkif->vbd.overflow_max_grants)) {
+ return;
+ }
+
+ if (work_pending(&blkif->persistent_purge_work)) {
+ pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
+ return;
+ }
+
+ num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
+ num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
+ num_clean = min(blkif->persistent_gnt_c, num_clean);
+ if ((num_clean == 0) ||
+ (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
+ return;
+
+ /*
+ * At this point, we can assure that there will be no calls
+ * to get_persistent_grant (because we are executing this code from
+ * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
+ * which means that the number of currently used grants will go down,
+ * but never up, so we will always be able to remove the requested
+ * number of grants.
+ */
+
+ total = num_clean;
+
+ pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
+
+ INIT_LIST_HEAD(&blkif->persistent_purge_list);
+ root = &blkif->persistent_gnts;
+purge_list:
+ foreach_grant_safe(persistent_gnt, n, root, node) {
+ BUG_ON(persistent_gnt->handle ==
+ BLKBACK_INVALID_HANDLE);
+
+ if (clean_used) {
+ clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
+ continue;
+ }
+
+ if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
+ continue;
+ if (!scan_used &&
+ (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
+ continue;
+
+ rb_erase(&persistent_gnt->node, root);
+ list_add(&persistent_gnt->remove_node,
+ &blkif->persistent_purge_list);
+ if (--num_clean == 0)
+ goto finished;
+ }
+ /*
+ * If we get here it means we also need to start cleaning
+ * grants that were used since last purge in order to cope
+ * with the requested num
+ */
+ if (!scan_used && !clean_used) {
+ pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
+ scan_used = true;
+ goto purge_list;
+ }
+finished:
+ if (!clean_used) {
+ pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n");
+ clean_used = true;
+ goto purge_list;
+ }
+
+ blkif->persistent_gnt_c -= (total - num_clean);
+ blkif->vbd.overflow_max_grants = 0;
+
+ /* We can defer this work */
+ INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
+ schedule_work(&blkif->persistent_purge_work);
+ pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
+ return;
+}
+
/*
* Retrieve from the 'pending_reqs' a free pending_req structure to be used.
*/
-static struct pending_req *alloc_req(void)
+static struct pending_req *alloc_req(struct xen_blkif *blkif)
{
struct pending_req *req = NULL;
unsigned long flags;
- spin_lock_irqsave(&blkbk->pending_free_lock, flags);
- if (!list_empty(&blkbk->pending_free)) {
- req = list_entry(blkbk->pending_free.next, struct pending_req,
+ spin_lock_irqsave(&blkif->pending_free_lock, flags);
+ if (!list_empty(&blkif->pending_free)) {
+ req = list_entry(blkif->pending_free.next, struct pending_req,
free_list);
list_del(&req->free_list);
}
- spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
+ spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
return req;
}
@@ -273,17 +448,17 @@ static struct pending_req *alloc_req(void)
* Return the 'pending_req' structure back to the freepool. We also
* wake up the thread if it was waiting for a free page.
*/
-static void free_req(struct pending_req *req)
+static void free_req(struct xen_blkif *blkif, struct pending_req *req)
{
unsigned long flags;
int was_empty;
- spin_lock_irqsave(&blkbk->pending_free_lock, flags);
- was_empty = list_empty(&blkbk->pending_free);
- list_add(&req->free_list, &blkbk->pending_free);
- spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
+ spin_lock_irqsave(&blkif->pending_free_lock, flags);
+ was_empty = list_empty(&blkif->pending_free);
+ list_add(&req->free_list, &blkif->pending_free);
+ spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
if (was_empty)
- wake_up(&blkbk->pending_free_wq);
+ wake_up(&blkif->pending_free_wq);
}
/*
@@ -382,10 +557,12 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
static void print_stats(struct xen_blkif *blkif)
{
pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
- " | ds %4llu\n",
+ " | ds %4llu | pg: %4u/%4d\n",
current->comm, blkif->st_oo_req,
blkif->st_rd_req, blkif->st_wr_req,
- blkif->st_f_req, blkif->st_ds_req);
+ blkif->st_f_req, blkif->st_ds_req,
+ blkif->persistent_gnt_c,
+ xen_blkif_max_pgrants);
blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
blkif->st_rd_req = 0;
blkif->st_wr_req = 0;
@@ -397,6 +574,8 @@ int xen_blkif_schedule(void *arg)
{
struct xen_blkif *blkif = arg;
struct xen_vbd *vbd = &blkif->vbd;
+ unsigned long timeout;
+ int ret;
xen_blkif_get(blkif);
@@ -406,27 +585,52 @@ int xen_blkif_schedule(void *arg)
if (unlikely(vbd->size != vbd_sz(vbd)))
xen_vbd_resize(blkif);
- wait_event_interruptible(
+ timeout = msecs_to_jiffies(LRU_INTERVAL);
+
+ timeout = wait_event_interruptible_timeout(
blkif->wq,
- blkif->waiting_reqs || kthread_should_stop());
- wait_event_interruptible(
- blkbk->pending_free_wq,
- !list_empty(&blkbk->pending_free) ||
- kthread_should_stop());
+ blkif->waiting_reqs || kthread_should_stop(),
+ timeout);
+ if (timeout == 0)
+ goto purge_gnt_list;
+ timeout = wait_event_interruptible_timeout(
+ blkif->pending_free_wq,
+ !list_empty(&blkif->pending_free) ||
+ kthread_should_stop(),
+ timeout);
+ if (timeout == 0)
+ goto purge_gnt_list;
blkif->waiting_reqs = 0;
smp_mb(); /* clear flag *before* checking for work */
- if (do_block_io_op(blkif))
+ ret = do_block_io_op(blkif);
+ if (ret > 0)
blkif->waiting_reqs = 1;
+ if (ret == -EACCES)
+ wait_event_interruptible(blkif->shutdown_wq,
+ kthread_should_stop());
+
+purge_gnt_list:
+ if (blkif->vbd.feature_gnt_persistent &&
+ time_after(jiffies, blkif->next_lru)) {
+ purge_persistent_gnt(blkif);
+ blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
+ }
+
+ /* Shrink if we have more than xen_blkif_max_buffer_pages */
+ shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
if (log_stats && time_after(jiffies, blkif->st_print))
print_stats(blkif);
}
+ /* Since we are shutting down remove all pages from the buffer */
+ shrink_free_pagepool(blkif, 0 /* All */);
+
/* Free all persistent grant pages */
if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
- free_persistent_gnts(&blkif->persistent_gnts,
+ free_persistent_gnts(blkif, &blkif->persistent_gnts,
blkif->persistent_gnt_c);
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
@@ -441,148 +645,98 @@ int xen_blkif_schedule(void *arg)
return 0;
}
-struct seg_buf {
- unsigned int offset;
- unsigned int nsec;
-};
/*
* Unmap the grant references, and also remove the M2P over-rides
* used in the 'pending_req'.
*/
-static void xen_blkbk_unmap(struct pending_req *req)
+static void xen_blkbk_unmap(struct xen_blkif *blkif,
+ struct grant_page *pages[],
+ int num)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int i, invcount = 0;
- grant_handle_t handle;
int ret;
- for (i = 0; i < req->nr_pages; i++) {
- if (!test_bit(i, req->unmap_seg))
+ for (i = 0; i < num; i++) {
+ if (pages[i]->persistent_gnt != NULL) {
+ put_persistent_gnt(blkif, pages[i]->persistent_gnt);
continue;
- handle = pending_handle(req, i);
- if (handle == BLKBACK_INVALID_HANDLE)
+ }
+ if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
continue;
- gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
- GNTMAP_host_map, handle);
- pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
- pages[invcount] = virt_to_page(vaddr(req, i));
- invcount++;
+ unmap_pages[invcount] = pages[i]->page;
+ gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
+ GNTMAP_host_map, pages[i]->handle);
+ pages[i]->handle = BLKBACK_INVALID_HANDLE;
+ if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+ ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
+ invcount);
+ BUG_ON(ret);
+ put_free_pages(blkif, unmap_pages, invcount);
+ invcount = 0;
+ }
+ }
+ if (invcount) {
+ ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
+ BUG_ON(ret);
+ put_free_pages(blkif, unmap_pages, invcount);
}
-
- ret = gnttab_unmap_refs(unmap, NULL, pages, invcount);
- BUG_ON(ret);
}
-static int xen_blkbk_map(struct blkif_request *req,
- struct pending_req *pending_req,
- struct seg_buf seg[],
- struct page *pages[])
+static int xen_blkbk_map(struct xen_blkif *blkif,
+ struct grant_page *pages[],
+ int num, bool ro)
{
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- struct persistent_gnt *persistent_gnts[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt = NULL;
- struct xen_blkif *blkif = pending_req->blkif;
phys_addr_t addr = 0;
- int i, j;
- bool new_map;
- int nseg = req->u.rw.nr_segments;
+ int i, seg_idx, new_map_idx;
int segs_to_map = 0;
int ret = 0;
+ int last_map = 0, map_until = 0;
int use_persistent_gnts;
use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
- BUG_ON(blkif->persistent_gnt_c >
- max_mapped_grant_pages(pending_req->blkif->blk_protocol));
-
/*
* Fill out preq.nr_sects with proper amount of sectors, and setup
* assign map[..] with the PFN of the page in our domain with the
* corresponding grant reference for each page.
*/
- for (i = 0; i < nseg; i++) {
+again:
+ for (i = map_until; i < num; i++) {
uint32_t flags;
if (use_persistent_gnts)
persistent_gnt = get_persistent_gnt(
- &blkif->persistent_gnts,
- req->u.rw.seg[i].gref);
+ blkif,
+ pages[i]->gref);
if (persistent_gnt) {
/*
* We are using persistent grants and
* the grant is already mapped
*/
- new_map = false;
- } else if (use_persistent_gnts &&
- blkif->persistent_gnt_c <
- max_mapped_grant_pages(blkif->blk_protocol)) {
- /*
- * We are using persistent grants, the grant is
- * not mapped but we have room for it
- */
- new_map = true;
- persistent_gnt = kmalloc(
- sizeof(struct persistent_gnt),
- GFP_KERNEL);
- if (!persistent_gnt)
- return -ENOMEM;
- if (alloc_xenballooned_pages(1, &persistent_gnt->page,
- false)) {
- kfree(persistent_gnt);
- return -ENOMEM;
- }
- persistent_gnt->gnt = req->u.rw.seg[i].gref;
- persistent_gnt->handle = BLKBACK_INVALID_HANDLE;
-
- pages_to_gnt[segs_to_map] =
- persistent_gnt->page;
- addr = (unsigned long) pfn_to_kaddr(
- page_to_pfn(persistent_gnt->page));
-
- add_persistent_gnt(&blkif->persistent_gnts,
- persistent_gnt);
- blkif->persistent_gnt_c++;
- pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
- persistent_gnt->gnt, blkif->persistent_gnt_c,
- max_mapped_grant_pages(blkif->blk_protocol));
+ pages[i]->page = persistent_gnt->page;
+ pages[i]->persistent_gnt = persistent_gnt;
} else {
- /*
- * We are either using persistent grants and
- * hit the maximum limit of grants mapped,
- * or we are not using persistent grants.
- */
- if (use_persistent_gnts &&
- !blkif->vbd.overflow_max_grants) {
- blkif->vbd.overflow_max_grants = 1;
- pr_alert(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
- blkif->domid, blkif->vbd.handle);
- }
- new_map = true;
- pages[i] = blkbk->pending_page(pending_req, i);
- addr = vaddr(pending_req, i);
- pages_to_gnt[segs_to_map] =
- blkbk->pending_page(pending_req, i);
- }
-
- if (persistent_gnt) {
- pages[i] = persistent_gnt->page;
- persistent_gnts[i] = persistent_gnt;
- } else {
- persistent_gnts[i] = NULL;
- }
-
- if (new_map) {
+ if (get_free_page(blkif, &pages[i]->page))
+ goto out_of_memory;
+ addr = vaddr(pages[i]->page);
+ pages_to_gnt[segs_to_map] = pages[i]->page;
+ pages[i]->persistent_gnt = NULL;
flags = GNTMAP_host_map;
- if (!persistent_gnt &&
- (pending_req->operation != BLKIF_OP_READ))
+ if (!use_persistent_gnts && ro)
flags |= GNTMAP_readonly;
gnttab_set_map_op(&map[segs_to_map++], addr,
- flags, req->u.rw.seg[i].gref,
+ flags, pages[i]->gref,
blkif->domid);
}
+ map_until = i + 1;
+ if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
+ break;
}
if (segs_to_map) {
@@ -595,49 +749,133 @@ static int xen_blkbk_map(struct blkif_request *req,
* so that when we access vaddr(pending_req,i) it has the contents of
* the page from the other domain.
*/
- bitmap_zero(pending_req->unmap_seg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
- for (i = 0, j = 0; i < nseg; i++) {
- if (!persistent_gnts[i] ||
- persistent_gnts[i]->handle == BLKBACK_INVALID_HANDLE) {
+ for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
+ if (!pages[seg_idx]->persistent_gnt) {
/* This is a newly mapped grant */
- BUG_ON(j >= segs_to_map);
- if (unlikely(map[j].status != 0)) {
+ BUG_ON(new_map_idx >= segs_to_map);
+ if (unlikely(map[new_map_idx].status != 0)) {
pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
- map[j].handle = BLKBACK_INVALID_HANDLE;
+ pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
ret |= 1;
- if (persistent_gnts[i]) {
- rb_erase(&persistent_gnts[i]->node,
- &blkif->persistent_gnts);
- blkif->persistent_gnt_c--;
- kfree(persistent_gnts[i]);
- persistent_gnts[i] = NULL;
- }
+ goto next;
}
+ pages[seg_idx]->handle = map[new_map_idx].handle;
+ } else {
+ continue;
}
- if (persistent_gnts[i]) {
- if (persistent_gnts[i]->handle ==
- BLKBACK_INVALID_HANDLE) {
+ if (use_persistent_gnts &&
+ blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
+ /*
+ * We are using persistent grants, the grant is
+ * not mapped but we might have room for it.
+ */
+ persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
+ GFP_KERNEL);
+ if (!persistent_gnt) {
/*
- * If this is a new persistent grant
- * save the handler
+ * If we don't have enough memory to
+ * allocate the persistent_gnt struct
+ * map this grant non-persistenly
*/
- persistent_gnts[i]->handle = map[j++].handle;
+ goto next;
}
- pending_handle(pending_req, i) =
- persistent_gnts[i]->handle;
+ persistent_gnt->gnt = map[new_map_idx].ref;
+ persistent_gnt->handle = map[new_map_idx].handle;
+ persistent_gnt->page = pages[seg_idx]->page;
+ if (add_persistent_gnt(blkif,
+ persistent_gnt)) {
+ kfree(persistent_gnt);
+ persistent_gnt = NULL;
+ goto next;
+ }
+ pages[seg_idx]->persistent_gnt = persistent_gnt;
+ pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
+ persistent_gnt->gnt, blkif->persistent_gnt_c,
+ xen_blkif_max_pgrants);
+ goto next;
+ }
+ if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
+ blkif->vbd.overflow_max_grants = 1;
+ pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
+ blkif->domid, blkif->vbd.handle);
+ }
+ /*
+ * We could not map this grant persistently, so use it as
+ * a non-persistent grant.
+ */
+next:
+ new_map_idx++;
+ }
+ segs_to_map = 0;
+ last_map = map_until;
+ if (map_until != num)
+ goto again;
- if (ret)
- continue;
- } else {
- pending_handle(pending_req, i) = map[j++].handle;
- bitmap_set(pending_req->unmap_seg, i, 1);
+ return ret;
+
+out_of_memory:
+ pr_alert(DRV_PFX "%s: out of memory\n", __func__);
+ put_free_pages(blkif, pages_to_gnt, segs_to_map);
+ return -ENOMEM;
+}
+
+static int xen_blkbk_map_seg(struct pending_req *pending_req)
+{
+ int rc;
+
+ rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
+ pending_req->nr_pages,
+ (pending_req->operation != BLKIF_OP_READ));
+
+ return rc;
+}
- if (ret)
- continue;
+static int xen_blkbk_parse_indirect(struct blkif_request *req,
+ struct pending_req *pending_req,
+ struct seg_buf seg[],
+ struct phys_req *preq)
+{
+ struct grant_page **pages = pending_req->indirect_pages;
+ struct xen_blkif *blkif = pending_req->blkif;
+ int indirect_grefs, rc, n, nseg, i;
+ struct blkif_request_segment_aligned *segments = NULL;
+
+ nseg = pending_req->nr_pages;
+ indirect_grefs = INDIRECT_PAGES(nseg);
+ BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
+
+ for (i = 0; i < indirect_grefs; i++)
+ pages[i]->gref = req->u.indirect.indirect_grefs[i];
+
+ rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
+ if (rc)
+ goto unmap;
+
+ for (n = 0, i = 0; n < nseg; n++) {
+ if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
+ /* Map indirect segments */
+ if (segments)
+ kunmap_atomic(segments);
+ segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
+ }
+ i = n % SEGS_PER_INDIRECT_FRAME;
+ pending_req->segments[n]->gref = segments[i].gref;
+ seg[n].nsec = segments[i].last_sect -
+ segments[i].first_sect + 1;
+ seg[n].offset = (segments[i].first_sect << 9);
+ if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
+ (segments[i].last_sect < segments[i].first_sect)) {
+ rc = -EINVAL;
+ goto unmap;
}
- seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
+ preq->nr_sects += seg[n].nsec;
}
- return ret;
+
+unmap:
+ if (segments)
+ kunmap_atomic(segments);
+ xen_blkbk_unmap(blkif, pages, indirect_grefs);
+ return rc;
}
static int dispatch_discard_io(struct xen_blkif *blkif,
@@ -647,7 +885,18 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
int status = BLKIF_RSP_OKAY;
struct block_device *bdev = blkif->vbd.bdev;
unsigned long secure;
+ struct phys_req preq;
+
+ preq.sector_number = req->u.discard.sector_number;
+ preq.nr_sects = req->u.discard.nr_sectors;
+ err = xen_vbd_translate(&preq, blkif, WRITE);
+ if (err) {
+ pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
+ preq.sector_number,
+ preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
+ goto fail_response;
+ }
blkif->st_ds_req++;
xen_blkif_get(blkif);
@@ -658,7 +907,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
req->u.discard.nr_sectors,
GFP_KERNEL, secure);
-
+fail_response:
if (err == -EOPNOTSUPP) {
pr_debug(DRV_PFX "discard op failed, not supported\n");
status = BLKIF_RSP_EOPNOTSUPP;
@@ -674,7 +923,7 @@ static int dispatch_other_io(struct xen_blkif *blkif,
struct blkif_request *req,
struct pending_req *pending_req)
{
- free_req(pending_req);
+ free_req(blkif, pending_req);
make_response(blkif, req->u.other.id, req->operation,
BLKIF_RSP_EOPNOTSUPP);
return -EIO;
@@ -726,7 +975,9 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
* the proper response on the ring.
*/
if (atomic_dec_and_test(&pending_req->pendcnt)) {
- xen_blkbk_unmap(pending_req);
+ xen_blkbk_unmap(pending_req->blkif,
+ pending_req->segments,
+ pending_req->nr_pages);
make_response(pending_req->blkif, pending_req->id,
pending_req->operation, pending_req->status);
xen_blkif_put(pending_req->blkif);
@@ -734,7 +985,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
if (atomic_read(&pending_req->blkif->drain))
complete(&pending_req->blkif->drain_complete);
}
- free_req(pending_req);
+ free_req(pending_req->blkif, pending_req);
}
}
@@ -767,6 +1018,12 @@ __do_block_io_op(struct xen_blkif *blkif)
rp = blk_rings->common.sring->req_prod;
rmb(); /* Ensure we see queued requests up to 'rp'. */
+ if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
+ rc = blk_rings->common.rsp_prod_pvt;
+ pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
+ rp, rc, rp - rc, blkif->vbd.pdevice);
+ return -EACCES;
+ }
while (rc != rp) {
if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
@@ -777,7 +1034,7 @@ __do_block_io_op(struct xen_blkif *blkif)
break;
}
- pending_req = alloc_req();
+ pending_req = alloc_req(blkif);
if (NULL == pending_req) {
blkif->st_oo_req++;
more_to_do = 1;
@@ -807,11 +1064,12 @@ __do_block_io_op(struct xen_blkif *blkif)
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
+ case BLKIF_OP_INDIRECT:
if (dispatch_rw_block_io(blkif, &req, pending_req))
goto done;
break;
case BLKIF_OP_DISCARD:
- free_req(pending_req);
+ free_req(blkif, pending_req);
if (dispatch_discard_io(blkif, &req))
goto done;
break;
@@ -853,17 +1111,28 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
struct pending_req *pending_req)
{
struct phys_req preq;
- struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct seg_buf *seg = pending_req->seg;
unsigned int nseg;
struct bio *bio = NULL;
- struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct bio **biolist = pending_req->biolist;
int i, nbio = 0;
int operation;
struct blk_plug plug;
bool drain = false;
- struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct grant_page **pages = pending_req->segments;
+ unsigned short req_operation;
+
+ req_operation = req->operation == BLKIF_OP_INDIRECT ?
+ req->u.indirect.indirect_op : req->operation;
+ if ((req->operation == BLKIF_OP_INDIRECT) &&
+ (req_operation != BLKIF_OP_READ) &&
+ (req_operation != BLKIF_OP_WRITE)) {
+ pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
+ req_operation);
+ goto fail_response;
+ }
- switch (req->operation) {
+ switch (req_operation) {
case BLKIF_OP_READ:
blkif->st_rd_req++;
operation = READ;
@@ -885,33 +1154,47 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
}
/* Check that the number of segments is sane. */
- nseg = req->u.rw.nr_segments;
+ nseg = req->operation == BLKIF_OP_INDIRECT ?
+ req->u.indirect.nr_segments : req->u.rw.nr_segments;
if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
- unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
+ unlikely((req->operation != BLKIF_OP_INDIRECT) &&
+ (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
+ unlikely((req->operation == BLKIF_OP_INDIRECT) &&
+ (nseg > MAX_INDIRECT_SEGMENTS))) {
pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
nseg);
/* Haven't submitted any bio's yet. */
goto fail_response;
}
- preq.sector_number = req->u.rw.sector_number;
preq.nr_sects = 0;
pending_req->blkif = blkif;
pending_req->id = req->u.rw.id;
- pending_req->operation = req->operation;
+ pending_req->operation = req_operation;
pending_req->status = BLKIF_RSP_OKAY;
pending_req->nr_pages = nseg;
- for (i = 0; i < nseg; i++) {
- seg[i].nsec = req->u.rw.seg[i].last_sect -
- req->u.rw.seg[i].first_sect + 1;
- if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
- (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
+ if (req->operation != BLKIF_OP_INDIRECT) {
+ preq.dev = req->u.rw.handle;
+ preq.sector_number = req->u.rw.sector_number;
+ for (i = 0; i < nseg; i++) {
+ pages[i]->gref = req->u.rw.seg[i].gref;
+ seg[i].nsec = req->u.rw.seg[i].last_sect -
+ req->u.rw.seg[i].first_sect + 1;
+ seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
+ if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
+ (req->u.rw.seg[i].last_sect <
+ req->u.rw.seg[i].first_sect))
+ goto fail_response;
+ preq.nr_sects += seg[i].nsec;
+ }
+ } else {
+ preq.dev = req->u.indirect.handle;
+ preq.sector_number = req->u.indirect.sector_number;
+ if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
goto fail_response;
- preq.nr_sects += seg[i].nsec;
-
}
if (xen_vbd_translate(&preq, blkif, operation) != 0) {
@@ -948,7 +1231,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap.
*/
- if (xen_blkbk_map(req, pending_req, seg, pages))
+ if (xen_blkbk_map_seg(pending_req))
goto fail_flush;
/*
@@ -960,11 +1243,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
(bio_add_page(bio,
- pages[i],
+ pages[i]->page,
seg[i].nsec << 9,
seg[i].offset) == 0)) {
- bio = bio_alloc(GFP_KERNEL, nseg-i);
+ int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
+ bio = bio_alloc(GFP_KERNEL, nr_iovecs);
if (unlikely(bio == NULL))
goto fail_put_bio;
@@ -1009,11 +1293,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
return 0;
fail_flush:
- xen_blkbk_unmap(pending_req);
+ xen_blkbk_unmap(blkif, pending_req->segments,
+ pending_req->nr_pages);
fail_response:
/* Haven't submitted any bio's yet. */
- make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR);
- free_req(pending_req);
+ make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
+ free_req(blkif, pending_req);
msleep(1); /* back off a bit */
return -EIO;
@@ -1070,73 +1355,20 @@ static void make_response(struct xen_blkif *blkif, u64 id,
static int __init xen_blkif_init(void)
{
- int i, mmap_pages;
int rc = 0;
if (!xen_domain())
return -ENODEV;
- blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
- if (!blkbk) {
- pr_alert(DRV_PFX "%s: out of memory!\n", __func__);
- return -ENOMEM;
- }
-
- mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
-
- blkbk->pending_reqs = kzalloc(sizeof(blkbk->pending_reqs[0]) *
- xen_blkif_reqs, GFP_KERNEL);
- blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) *
- mmap_pages, GFP_KERNEL);
- blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
- mmap_pages, GFP_KERNEL);
-
- if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
- !blkbk->pending_pages) {
- rc = -ENOMEM;
- goto out_of_memory;
- }
-
- for (i = 0; i < mmap_pages; i++) {
- blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
- blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
- if (blkbk->pending_pages[i] == NULL) {
- rc = -ENOMEM;
- goto out_of_memory;
- }
- }
rc = xen_blkif_interface_init();
if (rc)
goto failed_init;
- INIT_LIST_HEAD(&blkbk->pending_free);
- spin_lock_init(&blkbk->pending_free_lock);
- init_waitqueue_head(&blkbk->pending_free_wq);
-
- for (i = 0; i < xen_blkif_reqs; i++)
- list_add_tail(&blkbk->pending_reqs[i].free_list,
- &blkbk->pending_free);
-
rc = xen_blkif_xenbus_init();
if (rc)
goto failed_init;
- return 0;
-
- out_of_memory:
- pr_alert(DRV_PFX "%s: out of memory\n", __func__);
failed_init:
- kfree(blkbk->pending_reqs);
- kfree(blkbk->pending_grant_handles);
- if (blkbk->pending_pages) {
- for (i = 0; i < mmap_pages; i++) {
- if (blkbk->pending_pages[i])
- __free_page(blkbk->pending_pages[i]);
- }
- kfree(blkbk->pending_pages);
- }
- kfree(blkbk);
- blkbk = NULL;
return rc;
}
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 60103e2517ba..8d8807563d99 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -50,6 +50,19 @@
__func__, __LINE__, ##args)
+/*
+ * This is the maximum number of segments that would be allowed in indirect
+ * requests. This value will also be passed to the frontend.
+ */
+#define MAX_INDIRECT_SEGMENTS 256
+
+#define SEGS_PER_INDIRECT_FRAME \
+ (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+#define MAX_INDIRECT_PAGES \
+ ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
+#define INDIRECT_PAGES(_segs) \
+ ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
+
/* Not a real protocol. Used to generate ring structs which contain
* the elements common to all protocols only. This way we get a
* compiler-checkable way to use common struct elements, so we can
@@ -83,12 +96,31 @@ struct blkif_x86_32_request_other {
uint64_t id; /* private guest value, echoed in resp */
} __attribute__((__packed__));
+struct blkif_x86_32_request_indirect {
+ uint8_t indirect_op;
+ uint16_t nr_segments;
+ uint64_t id;
+ blkif_sector_t sector_number;
+ blkif_vdev_t handle;
+ uint16_t _pad1;
+ grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
+ /*
+ * The maximum number of indirect segments (and pages) that will
+ * be used is determined by MAX_INDIRECT_SEGMENTS, this value
+ * is also exported to the guest (via xenstore
+ * feature-max-indirect-segments entry), so the frontend knows how
+ * many indirect segments the backend supports.
+ */
+ uint64_t _pad2; /* make it 64 byte aligned */
+} __attribute__((__packed__));
+
struct blkif_x86_32_request {
uint8_t operation; /* BLKIF_OP_??? */
union {
struct blkif_x86_32_request_rw rw;
struct blkif_x86_32_request_discard discard;
struct blkif_x86_32_request_other other;
+ struct blkif_x86_32_request_indirect indirect;
} u;
} __attribute__((__packed__));
@@ -127,12 +159,32 @@ struct blkif_x86_64_request_other {
uint64_t id; /* private guest value, echoed in resp */
} __attribute__((__packed__));
+struct blkif_x86_64_request_indirect {
+ uint8_t indirect_op;
+ uint16_t nr_segments;
+ uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */
+ uint64_t id;
+ blkif_sector_t sector_number;
+ blkif_vdev_t handle;
+ uint16_t _pad2;
+ grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
+ /*
+ * The maximum number of indirect segments (and pages) that will
+ * be used is determined by MAX_INDIRECT_SEGMENTS, this value
+ * is also exported to the guest (via xenstore
+ * feature-max-indirect-segments entry), so the frontend knows how
+ * many indirect segments the backend supports.
+ */
+ uint32_t _pad3; /* make it 64 byte aligned */
+} __attribute__((__packed__));
+
struct blkif_x86_64_request {
uint8_t operation; /* BLKIF_OP_??? */
union {
struct blkif_x86_64_request_rw rw;
struct blkif_x86_64_request_discard discard;
struct blkif_x86_64_request_other other;
+ struct blkif_x86_64_request_indirect indirect;
} u;
} __attribute__((__packed__));
@@ -182,12 +234,26 @@ struct xen_vbd {
struct backend_info;
+/* Number of available flags */
+#define PERSISTENT_GNT_FLAGS_SIZE 2
+/* This persistent grant is currently in use */
+#define PERSISTENT_GNT_ACTIVE 0
+/*
+ * This persistent grant has been used, this flag is set when we remove the
+ * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
+ */
+#define PERSISTENT_GNT_WAS_ACTIVE 1
+
+/* Number of requests that we can fit in a ring */
+#define XEN_BLKIF_REQS 32
struct persistent_gnt {
struct page *page;
grant_ref_t gnt;
grant_handle_t handle;
+ DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
struct rb_node node;
+ struct list_head remove_node;
};
struct xen_blkif {
@@ -219,6 +285,23 @@ struct xen_blkif {
/* tree to store persistent grants */
struct rb_root persistent_gnts;
unsigned int persistent_gnt_c;
+ atomic_t persistent_gnt_in_use;
+ unsigned long next_lru;
+
+ /* used by the kworker that offload work from the persistent purge */
+ struct list_head persistent_purge_list;
+ struct work_struct persistent_purge_work;
+
+ /* buffer of free pages to map grant refs */
+ spinlock_t free_pages_lock;
+ int free_pages_num;
+ struct list_head free_pages;
+
+ /* List of all 'pending_req' available */
+ struct list_head pending_free;
+ /* And its spinlock. */
+ spinlock_t pending_free_lock;
+ wait_queue_head_t pending_free_wq;
/* statistics */
unsigned long st_print;
@@ -231,6 +314,41 @@ struct xen_blkif {
unsigned long long st_wr_sect;
wait_queue_head_t waiting_to_free;
+ /* Thread shutdown wait queue. */
+ wait_queue_head_t shutdown_wq;
+};
+
+struct seg_buf {
+ unsigned long offset;
+ unsigned int nsec;
+};
+
+struct grant_page {
+ struct page *page;
+ struct persistent_gnt *persistent_gnt;
+ grant_handle_t handle;
+ grant_ref_t gref;
+};
+
+/*
+ * Each outstanding request that we've passed to the lower device layers has a
+ * 'pending_req' allocated to it. Each buffer_head that completes decrements
+ * the pendcnt towards zero. When it hits zero, the specified domain has a
+ * response queued for it, with the saved 'id' passed back.
+ */
+struct pending_req {
+ struct xen_blkif *blkif;
+ u64 id;
+ int nr_pages;
+ atomic_t pendcnt;
+ unsigned short operation;
+ int status;
+ struct list_head free_list;
+ struct grant_page *segments[MAX_INDIRECT_SEGMENTS];
+ /* Indirect descriptors */
+ struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
+ struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
+ struct bio *biolist[MAX_INDIRECT_SEGMENTS];
};
@@ -257,6 +375,7 @@ int xen_blkif_xenbus_init(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);
+int xen_blkif_purge_persistent(void *arg);
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state);
@@ -268,7 +387,7 @@ struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
{
- int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
dst->operation = src->operation;
switch (src->operation) {
case BLKIF_OP_READ:
@@ -291,6 +410,18 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
+ case BLKIF_OP_INDIRECT:
+ dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
+ dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
+ dst->u.indirect.handle = src->u.indirect.handle;
+ dst->u.indirect.id = src->u.indirect.id;
+ dst->u.indirect.sector_number = src->u.indirect.sector_number;
+ barrier();
+ j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
+ for (i = 0; i < j; i++)
+ dst->u.indirect.indirect_grefs[i] =
+ src->u.indirect.indirect_grefs[i];
+ break;
default:
/*
* Don't know how to translate this op. Only get the
@@ -304,7 +435,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
static inline void blkif_get_x86_64_req(struct blkif_request *dst,
struct blkif_x86_64_request *src)
{
- int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
dst->operation = src->operation;
switch (src->operation) {
case BLKIF_OP_READ:
@@ -327,6 +458,18 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
+ case BLKIF_OP_INDIRECT:
+ dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
+ dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
+ dst->u.indirect.handle = src->u.indirect.handle;
+ dst->u.indirect.id = src->u.indirect.id;
+ dst->u.indirect.sector_number = src->u.indirect.sector_number;
+ barrier();
+ j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
+ for (i = 0; i < j; i++)
+ dst->u.indirect.indirect_grefs[i] =
+ src->u.indirect.indirect_grefs[i];
+ break;
default:
/*
* Don't know how to translate this op. Only get the
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 04608a6502d7..fe5c3cd10c34 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -98,12 +98,17 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
err = PTR_ERR(blkif->xenblkd);
blkif->xenblkd = NULL;
xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
+ return;
}
}
static struct xen_blkif *xen_blkif_alloc(domid_t domid)
{
struct xen_blkif *blkif;
+ struct pending_req *req, *n;
+ int i, j;
+
+ BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
if (!blkif)
@@ -118,8 +123,57 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
blkif->st_print = jiffies;
init_waitqueue_head(&blkif->waiting_to_free);
blkif->persistent_gnts.rb_node = NULL;
+ spin_lock_init(&blkif->free_pages_lock);
+ INIT_LIST_HEAD(&blkif->free_pages);
+ blkif->free_pages_num = 0;
+ atomic_set(&blkif->persistent_gnt_in_use, 0);
+
+ INIT_LIST_HEAD(&blkif->pending_free);
+
+ for (i = 0; i < XEN_BLKIF_REQS; i++) {
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ goto fail;
+ list_add_tail(&req->free_list,
+ &blkif->pending_free);
+ for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
+ req->segments[j] = kzalloc(sizeof(*req->segments[0]),
+ GFP_KERNEL);
+ if (!req->segments[j])
+ goto fail;
+ }
+ for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
+ req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
+ GFP_KERNEL);
+ if (!req->indirect_pages[j])
+ goto fail;
+ }
+ }
+ spin_lock_init(&blkif->pending_free_lock);
+ init_waitqueue_head(&blkif->pending_free_wq);
+ init_waitqueue_head(&blkif->shutdown_wq);
return blkif;
+
+fail:
+ list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
+ list_del(&req->free_list);
+ for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
+ if (!req->segments[j])
+ break;
+ kfree(req->segments[j]);
+ }
+ for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
+ if (!req->indirect_pages[j])
+ break;
+ kfree(req->indirect_pages[j]);
+ }
+ kfree(req);
+ }
+
+ kmem_cache_free(xen_blkif_cachep, blkif);
+
+ return ERR_PTR(-ENOMEM);
}
static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
@@ -178,6 +232,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
{
if (blkif->xenblkd) {
kthread_stop(blkif->xenblkd);
+ wake_up(&blkif->shutdown_wq);
blkif->xenblkd = NULL;
}
@@ -198,8 +253,28 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
static void xen_blkif_free(struct xen_blkif *blkif)
{
+ struct pending_req *req, *n;
+ int i = 0, j;
+
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
+
+ /* Check that there is no request in use */
+ list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
+ list_del(&req->free_list);
+
+ for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
+ kfree(req->segments[j]);
+
+ for (j = 0; j < MAX_INDIRECT_PAGES; j++)
+ kfree(req->indirect_pages[j]);
+
+ kfree(req);
+ i++;
+ }
+
+ WARN_ON(i != XEN_BLKIF_REQS);
+
kmem_cache_free(xen_blkif_cachep, blkif);
}
@@ -678,6 +753,11 @@ again:
dev->nodename);
goto abort;
}
+ err = xenbus_printf(xbt, dev->nodename, "feature-max-indirect-segments", "%u",
+ MAX_INDIRECT_SEGMENTS);
+ if (err)
+ dev_warn(&dev->dev, "writing %s/feature-max-indirect-segments (%d)",
+ dev->nodename, err);
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd));
@@ -704,6 +784,11 @@ again:
dev->nodename);
goto abort;
}
+ err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
+ bdev_physical_block_size(be->blkif->vbd.bdev));
+ if (err)
+ xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
+ dev->nodename);
err = xenbus_transaction_end(xbt, 0);
if (err == -EAGAIN)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d89ef86220f4..a4660bbee8a6 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -74,12 +74,30 @@ struct grant {
struct blk_shadow {
struct blkif_request req;
struct request *request;
- struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct grant **grants_used;
+ struct grant **indirect_grants;
+ struct scatterlist *sg;
+};
+
+struct split_bio {
+ struct bio *bio;
+ atomic_t pending;
+ int err;
};
static DEFINE_MUTEX(blkfront_mutex);
static const struct block_device_operations xlvbd_block_fops;
+/*
+ * Maximum number of segments in indirect requests, the actual value used by
+ * the frontend driver is the minimum of this value and the value provided
+ * by the backend driver.
+ */
+
+static unsigned int xen_blkif_max_segments = 32;
+module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
+MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
+
#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
/*
@@ -98,7 +116,6 @@ struct blkfront_info
enum blkif_state connected;
int ring_ref;
struct blkif_front_ring ring;
- struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int evtchn, irq;
struct request_queue *rq;
struct work_struct work;
@@ -114,6 +131,7 @@ struct blkfront_info
unsigned int discard_granularity;
unsigned int discard_alignment;
unsigned int feature_persistent:1;
+ unsigned int max_indirect_segments;
int is_ready;
};
@@ -142,6 +160,13 @@ static DEFINE_SPINLOCK(minor_lock);
#define DEV_NAME "xvd" /* name in /dev */
+#define SEGS_PER_INDIRECT_FRAME \
+ (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+#define INDIRECT_GREFS(_segs) \
+ ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
+
+static int blkfront_setup_indirect(struct blkfront_info *info);
+
static int get_id_from_freelist(struct blkfront_info *info)
{
unsigned long free = info->shadow_free;
@@ -358,7 +383,8 @@ static int blkif_queue_request(struct request *req)
struct blkif_request *ring_req;
unsigned long id;
unsigned int fsect, lsect;
- int i, ref;
+ int i, ref, n;
+ struct blkif_request_segment_aligned *segments = NULL;
/*
* Used to store if we are able to queue the request by just using
@@ -369,21 +395,27 @@ static int blkif_queue_request(struct request *req)
grant_ref_t gref_head;
struct grant *gnt_list_entry = NULL;
struct scatterlist *sg;
+ int nseg, max_grefs;
if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
return 1;
- /* Check if we have enought grants to allocate a requests */
- if (info->persistent_gnts_c < BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+ max_grefs = info->max_indirect_segments ?
+ info->max_indirect_segments +
+ INDIRECT_GREFS(info->max_indirect_segments) :
+ BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
+ /* Check if we have enough grants to allocate a requests */
+ if (info->persistent_gnts_c < max_grefs) {
new_persistent_gnts = 1;
if (gnttab_alloc_grant_references(
- BLKIF_MAX_SEGMENTS_PER_REQUEST - info->persistent_gnts_c,
+ max_grefs - info->persistent_gnts_c,
&gref_head) < 0) {
gnttab_request_free_callback(
&info->callback,
blkif_restart_queue_callback,
info,
- BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ max_grefs);
return 1;
}
} else
@@ -394,42 +426,67 @@ static int blkif_queue_request(struct request *req)
id = get_id_from_freelist(info);
info->shadow[id].request = req;
- ring_req->u.rw.id = id;
- ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
- ring_req->u.rw.handle = info->handle;
-
- ring_req->operation = rq_data_dir(req) ?
- BLKIF_OP_WRITE : BLKIF_OP_READ;
-
- if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
- /*
- * Ideally we can do an unordered flush-to-disk. In case the
- * backend onlysupports barriers, use that. A barrier request
- * a superset of FUA, so we can implement it the same
- * way. (It's also a FLUSH+FUA, since it is
- * guaranteed ordered WRT previous writes.)
- */
- ring_req->operation = info->flush_op;
- }
-
if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
- /* id, sector_number and handle are set above. */
ring_req->operation = BLKIF_OP_DISCARD;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
+ ring_req->u.discard.id = id;
+ ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
else
ring_req->u.discard.flag = 0;
} else {
- ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
- info->sg);
- BUG_ON(ring_req->u.rw.nr_segments >
- BLKIF_MAX_SEGMENTS_PER_REQUEST);
-
- for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
+ BUG_ON(info->max_indirect_segments == 0 &&
+ req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ BUG_ON(info->max_indirect_segments &&
+ req->nr_phys_segments > info->max_indirect_segments);
+ nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
+ ring_req->u.rw.id = id;
+ if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
+ /*
+ * The indirect operation can only be a BLKIF_OP_READ or
+ * BLKIF_OP_WRITE
+ */
+ BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
+ ring_req->operation = BLKIF_OP_INDIRECT;
+ ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
+ BLKIF_OP_WRITE : BLKIF_OP_READ;
+ ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req->u.indirect.handle = info->handle;
+ ring_req->u.indirect.nr_segments = nseg;
+ } else {
+ ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req->u.rw.handle = info->handle;
+ ring_req->operation = rq_data_dir(req) ?
+ BLKIF_OP_WRITE : BLKIF_OP_READ;
+ if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ /*
+ * Ideally we can do an unordered flush-to-disk. In case the
+ * backend onlysupports barriers, use that. A barrier request
+ * a superset of FUA, so we can implement it the same
+ * way. (It's also a FLUSH+FUA, since it is
+ * guaranteed ordered WRT previous writes.)
+ */
+ ring_req->operation = info->flush_op;
+ }
+ ring_req->u.rw.nr_segments = nseg;
+ }
+ for_each_sg(info->shadow[id].sg, sg, nseg, i) {
fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1;
+ if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
+ (i % SEGS_PER_INDIRECT_FRAME == 0)) {
+ if (segments)
+ kunmap_atomic(segments);
+
+ n = i / SEGS_PER_INDIRECT_FRAME;
+ gnt_list_entry = get_grant(&gref_head, info);
+ info->shadow[id].indirect_grants[n] = gnt_list_entry;
+ segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
+ ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
+ }
+
gnt_list_entry = get_grant(&gref_head, info);
ref = gnt_list_entry->gref;
@@ -441,8 +498,7 @@ static int blkif_queue_request(struct request *req)
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
- shared_data = kmap_atomic(
- pfn_to_page(gnt_list_entry->pfn));
+ shared_data = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
bvec_data = kmap_atomic(sg_page(sg));
/*
@@ -461,13 +517,23 @@ static int blkif_queue_request(struct request *req)
kunmap_atomic(bvec_data);
kunmap_atomic(shared_data);
}
-
- ring_req->u.rw.seg[i] =
- (struct blkif_request_segment) {
- .gref = ref,
- .first_sect = fsect,
- .last_sect = lsect };
+ if (ring_req->operation != BLKIF_OP_INDIRECT) {
+ ring_req->u.rw.seg[i] =
+ (struct blkif_request_segment) {
+ .gref = ref,
+ .first_sect = fsect,
+ .last_sect = lsect };
+ } else {
+ n = i % SEGS_PER_INDIRECT_FRAME;
+ segments[n] =
+ (struct blkif_request_segment_aligned) {
+ .gref = ref,
+ .first_sect = fsect,
+ .last_sect = lsect };
+ }
}
+ if (segments)
+ kunmap_atomic(segments);
}
info->ring.req_prod_pvt++;
@@ -542,7 +608,9 @@ wait:
flush_requests(info);
}
-static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
+static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
+ unsigned int physical_sector_size,
+ unsigned int segments)
{
struct request_queue *rq;
struct blkfront_info *info = gd->private_data;
@@ -564,14 +632,15 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
/* Hard sector size and max sectors impersonate the equiv. hardware. */
blk_queue_logical_block_size(rq, sector_size);
- blk_queue_max_hw_sectors(rq, 512);
+ blk_queue_physical_block_size(rq, physical_sector_size);
+ blk_queue_max_hw_sectors(rq, (segments * PAGE_SIZE) / 512);
/* Each segment in a request is up to an aligned page in size. */
blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
blk_queue_max_segment_size(rq, PAGE_SIZE);
/* Ensure a merged request will fit in a single I/O ring slot. */
- blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ blk_queue_max_segments(rq, segments);
/* Make sure buffer addresses are sector-aligned. */
blk_queue_dma_alignment(rq, 511);
@@ -588,13 +657,16 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
static void xlvbd_flush(struct blkfront_info *info)
{
blk_queue_flush(info->rq, info->feature_flush);
- printk(KERN_INFO "blkfront: %s: %s: %s %s\n",
+ printk(KERN_INFO "blkfront: %s: %s: %s %s %s %s %s\n",
info->gd->disk_name,
info->flush_op == BLKIF_OP_WRITE_BARRIER ?
"barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ?
"flush diskcache" : "barrier or flush"),
- info->feature_flush ? "enabled" : "disabled",
- info->feature_persistent ? "using persistent grants" : "");
+ info->feature_flush ? "enabled;" : "disabled;",
+ "persistent grants:",
+ info->feature_persistent ? "enabled;" : "disabled;",
+ "indirect descriptors:",
+ info->max_indirect_segments ? "enabled;" : "disabled;");
}
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
@@ -667,7 +739,8 @@ static char *encode_disk_name(char *ptr, unsigned int n)
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
struct blkfront_info *info,
- u16 vdisk_info, u16 sector_size)
+ u16 vdisk_info, u16 sector_size,
+ unsigned int physical_sector_size)
{
struct gendisk *gd;
int nr_minors = 1;
@@ -734,7 +807,9 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
gd->driverfs_dev = &(info->xbdev->dev);
set_capacity(gd, capacity);
- if (xlvbd_init_blk_queue(gd, sector_size)) {
+ if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
+ info->max_indirect_segments ? :
+ BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
del_gendisk(gd);
goto release;
}
@@ -818,6 +893,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
{
struct grant *persistent_gnt;
struct grant *n;
+ int i, j, segs;
/* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&info->io_lock);
@@ -843,6 +919,47 @@ static void blkif_free(struct blkfront_info *info, int suspend)
}
BUG_ON(info->persistent_gnts_c != 0);
+ for (i = 0; i < BLK_RING_SIZE; i++) {
+ /*
+ * Clear persistent grants present in requests already
+ * on the shared ring
+ */
+ if (!info->shadow[i].request)
+ goto free_shadow;
+
+ segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
+ info->shadow[i].req.u.indirect.nr_segments :
+ info->shadow[i].req.u.rw.nr_segments;
+ for (j = 0; j < segs; j++) {
+ persistent_gnt = info->shadow[i].grants_used[j];
+ gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
+ __free_page(pfn_to_page(persistent_gnt->pfn));
+ kfree(persistent_gnt);
+ }
+
+ if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT)
+ /*
+ * If this is not an indirect operation don't try to
+ * free indirect segments
+ */
+ goto free_shadow;
+
+ for (j = 0; j < INDIRECT_GREFS(segs); j++) {
+ persistent_gnt = info->shadow[i].indirect_grants[j];
+ gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
+ __free_page(pfn_to_page(persistent_gnt->pfn));
+ kfree(persistent_gnt);
+ }
+
+free_shadow:
+ kfree(info->shadow[i].grants_used);
+ info->shadow[i].grants_used = NULL;
+ kfree(info->shadow[i].indirect_grants);
+ info->shadow[i].indirect_grants = NULL;
+ kfree(info->shadow[i].sg);
+ info->shadow[i].sg = NULL;
+ }
+
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
spin_unlock_irq(&info->io_lock);
@@ -867,12 +984,13 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
struct blkif_response *bret)
{
int i = 0;
- struct bio_vec *bvec;
- struct req_iterator iter;
- unsigned long flags;
+ struct scatterlist *sg;
char *bvec_data;
void *shared_data;
- unsigned int offset = 0;
+ int nseg;
+
+ nseg = s->req.operation == BLKIF_OP_INDIRECT ?
+ s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
if (bret->operation == BLKIF_OP_READ) {
/*
@@ -881,26 +999,29 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
* than PAGE_SIZE, we have to keep track of the current offset,
* to be sure we are copying the data from the right shared page.
*/
- rq_for_each_segment(bvec, s->request, iter) {
- BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
- if (bvec->bv_offset < offset)
- i++;
- BUG_ON(i >= s->req.u.rw.nr_segments);
+ for_each_sg(s->sg, sg, nseg, i) {
+ BUG_ON(sg->offset + sg->length > PAGE_SIZE);
shared_data = kmap_atomic(
pfn_to_page(s->grants_used[i]->pfn));
- bvec_data = bvec_kmap_irq(bvec, &flags);
- memcpy(bvec_data, shared_data + bvec->bv_offset,
- bvec->bv_len);
- bvec_kunmap_irq(bvec_data, &flags);
+ bvec_data = kmap_atomic(sg_page(sg));
+ memcpy(bvec_data + sg->offset,
+ shared_data + sg->offset,
+ sg->length);
+ kunmap_atomic(bvec_data);
kunmap_atomic(shared_data);
- offset = bvec->bv_offset + bvec->bv_len;
}
}
/* Add the persistent grant into the list of free grants */
- for (i = 0; i < s->req.u.rw.nr_segments; i++) {
+ for (i = 0; i < nseg; i++) {
list_add(&s->grants_used[i]->node, &info->persistent_gnts);
info->persistent_gnts_c++;
}
+ if (s->req.operation == BLKIF_OP_INDIRECT) {
+ for (i = 0; i < INDIRECT_GREFS(nseg); i++) {
+ list_add(&s->indirect_grants[i]->node, &info->persistent_gnts);
+ info->persistent_gnts_c++;
+ }
+ }
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -1034,14 +1155,6 @@ static int setup_blkring(struct xenbus_device *dev,
SHARED_RING_INIT(sring);
FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
- sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-
- /* Allocate memory for grants */
- err = fill_grant_buffer(info, BLK_RING_SIZE *
- BLKIF_MAX_SEGMENTS_PER_REQUEST);
- if (err)
- goto fail;
-
err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
if (err < 0) {
free_page((unsigned long)sring);
@@ -1223,13 +1336,84 @@ static int blkfront_probe(struct xenbus_device *dev,
return 0;
}
+/*
+ * This is a clone of md_trim_bio, used to split a bio into smaller ones
+ */
+static void trim_bio(struct bio *bio, int offset, int size)
+{
+ /* 'bio' is a cloned bio which we need to trim to match
+ * the given offset and size.
+ * This requires adjusting bi_sector, bi_size, and bi_io_vec
+ */
+ int i;
+ struct bio_vec *bvec;
+ int sofar = 0;
+
+ size <<= 9;
+ if (offset == 0 && size == bio->bi_size)
+ return;
+
+ bio->bi_sector += offset;
+ bio->bi_size = size;
+ offset <<= 9;
+ clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+
+ while (bio->bi_idx < bio->bi_vcnt &&
+ bio->bi_io_vec[bio->bi_idx].bv_len <= offset) {
+ /* remove this whole bio_vec */
+ offset -= bio->bi_io_vec[bio->bi_idx].bv_len;
+ bio->bi_idx++;
+ }
+ if (bio->bi_idx < bio->bi_vcnt) {
+ bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
+ bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
+ }
+ /* avoid any complications with bi_idx being non-zero*/
+ if (bio->bi_idx) {
+ memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
+ (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
+ bio->bi_vcnt -= bio->bi_idx;
+ bio->bi_idx = 0;
+ }
+ /* Make sure vcnt and last bv are not too big */
+ bio_for_each_segment(bvec, bio, i) {
+ if (sofar + bvec->bv_len > size)
+ bvec->bv_len = size - sofar;
+ if (bvec->bv_len == 0) {
+ bio->bi_vcnt = i;
+ break;
+ }
+ sofar += bvec->bv_len;
+ }
+}
+
+static void split_bio_end(struct bio *bio, int error)
+{
+ struct split_bio *split_bio = bio->bi_private;
+
+ if (error)
+ split_bio->err = error;
+
+ if (atomic_dec_and_test(&split_bio->pending)) {
+ split_bio->bio->bi_phys_segments = 0;
+ bio_endio(split_bio->bio, split_bio->err);
+ kfree(split_bio);
+ }
+ bio_put(bio);
+}
static int blkif_recover(struct blkfront_info *info)
{
int i;
- struct blkif_request *req;
+ struct request *req, *n;
struct blk_shadow *copy;
- int j;
+ int rc;
+ struct bio *bio, *cloned_bio;
+ struct bio_list bio_list, merge_bio;
+ unsigned int segs, offset;
+ int pending, size;
+ struct split_bio *split_bio;
+ struct list_head requests;
/* Stage 1: Make a safe copy of the shadow state. */
copy = kmemdup(info->shadow, sizeof(info->shadow),
@@ -1244,36 +1428,64 @@ static int blkif_recover(struct blkfront_info *info)
info->shadow_free = info->ring.req_prod_pvt;
info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
- /* Stage 3: Find pending requests and requeue them. */
+ rc = blkfront_setup_indirect(info);
+ if (rc) {
+ kfree(copy);
+ return rc;
+ }
+
+ segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ blk_queue_max_segments(info->rq, segs);
+ bio_list_init(&bio_list);
+ INIT_LIST_HEAD(&requests);
for (i = 0; i < BLK_RING_SIZE; i++) {
/* Not in use? */
if (!copy[i].request)
continue;
- /* Grab a request slot and copy shadow state into it. */
- req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
- *req = copy[i].req;
-
- /* We get a new request id, and must reset the shadow state. */
- req->u.rw.id = get_id_from_freelist(info);
- memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i]));
-
- if (req->operation != BLKIF_OP_DISCARD) {
- /* Rewrite any grant references invalidated by susp/resume. */
- for (j = 0; j < req->u.rw.nr_segments; j++)
- gnttab_grant_foreign_access_ref(
- req->u.rw.seg[j].gref,
- info->xbdev->otherend_id,
- pfn_to_mfn(copy[i].grants_used[j]->pfn),
- 0);
+ /*
+ * Get the bios in the request so we can re-queue them.
+ */
+ if (copy[i].request->cmd_flags &
+ (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+ /*
+ * Flush operations don't contain bios, so
+ * we need to requeue the whole request
+ */
+ list_add(&copy[i].request->queuelist, &requests);
+ continue;
}
- info->shadow[req->u.rw.id].req = *req;
-
- info->ring.req_prod_pvt++;
+ merge_bio.head = copy[i].request->bio;
+ merge_bio.tail = copy[i].request->biotail;
+ bio_list_merge(&bio_list, &merge_bio);
+ copy[i].request->bio = NULL;
+ blk_put_request(copy[i].request);
}
kfree(copy);
+ /*
+ * Empty the queue, this is important because we might have
+ * requests in the queue with more segments than what we
+ * can handle now.
+ */
+ spin_lock_irq(&info->io_lock);
+ while ((req = blk_fetch_request(info->rq)) != NULL) {
+ if (req->cmd_flags &
+ (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+ list_add(&req->queuelist, &requests);
+ continue;
+ }
+ merge_bio.head = req->bio;
+ merge_bio.tail = req->biotail;
+ bio_list_merge(&bio_list, &merge_bio);
+ req->bio = NULL;
+ if (req->cmd_flags & (REQ_FLUSH | REQ_FUA))
+ pr_alert("diskcache flush request found!\n");
+ __blk_put_request(info->rq, req);
+ }
+ spin_unlock_irq(&info->io_lock);
+
xenbus_switch_state(info->xbdev, XenbusStateConnected);
spin_lock_irq(&info->io_lock);
@@ -1281,14 +1493,50 @@ static int blkif_recover(struct blkfront_info *info)
/* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED;
- /* Send off requeued requests */
- flush_requests(info);
-
/* Kick any other new requests queued since we resumed */
kick_pending_request_queues(info);
+ list_for_each_entry_safe(req, n, &requests, queuelist) {
+ /* Requeue pending requests (flush or discard) */
+ list_del_init(&req->queuelist);
+ BUG_ON(req->nr_phys_segments > segs);
+ blk_requeue_request(info->rq, req);
+ }
spin_unlock_irq(&info->io_lock);
+ while ((bio = bio_list_pop(&bio_list)) != NULL) {
+ /* Traverse the list of pending bios and re-queue them */
+ if (bio_segments(bio) > segs) {
+ /*
+ * This bio has more segments than what we can
+ * handle, we have to split it.
+ */
+ pending = (bio_segments(bio) + segs - 1) / segs;
+ split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
+ BUG_ON(split_bio == NULL);
+ atomic_set(&split_bio->pending, pending);
+ split_bio->bio = bio;
+ for (i = 0; i < pending; i++) {
+ offset = (i * segs * PAGE_SIZE) >> 9;
+ size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
+ (unsigned int)(bio->bi_size >> 9) - offset);
+ cloned_bio = bio_clone(bio, GFP_NOIO);
+ BUG_ON(cloned_bio == NULL);
+ trim_bio(cloned_bio, offset, size);
+ cloned_bio->bi_private = split_bio;
+ cloned_bio->bi_end_io = split_bio_end;
+ submit_bio(cloned_bio->bi_rw, cloned_bio);
+ }
+ /*
+ * Now we have to wait for all those smaller bios to
+ * end, so we can also end the "parent" bio.
+ */
+ continue;
+ }
+ /* We don't need to split this bio */
+ submit_bio(bio->bi_rw, bio);
+ }
+
return 0;
}
@@ -1308,8 +1556,12 @@ static int blkfront_resume(struct xenbus_device *dev)
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
err = talk_to_blkback(dev, info);
- if (info->connected == BLKIF_STATE_SUSPENDED && !err)
- err = blkif_recover(info);
+
+ /*
+ * We have to wait for the backend to switch to
+ * connected state, since we want to read which
+ * features it supports.
+ */
return err;
}
@@ -1387,6 +1639,60 @@ static void blkfront_setup_discard(struct blkfront_info *info)
kfree(type);
}
+static int blkfront_setup_indirect(struct blkfront_info *info)
+{
+ unsigned int indirect_segments, segs;
+ int err, i;
+
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "feature-max-indirect-segments", "%u", &indirect_segments,
+ NULL);
+ if (err) {
+ info->max_indirect_segments = 0;
+ segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+ } else {
+ info->max_indirect_segments = min(indirect_segments,
+ xen_blkif_max_segments);
+ segs = info->max_indirect_segments;
+ }
+
+ err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE);
+ if (err)
+ goto out_of_memory;
+
+ for (i = 0; i < BLK_RING_SIZE; i++) {
+ info->shadow[i].grants_used = kzalloc(
+ sizeof(info->shadow[i].grants_used[0]) * segs,
+ GFP_NOIO);
+ info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO);
+ if (info->max_indirect_segments)
+ info->shadow[i].indirect_grants = kzalloc(
+ sizeof(info->shadow[i].indirect_grants[0]) *
+ INDIRECT_GREFS(segs),
+ GFP_NOIO);
+ if ((info->shadow[i].grants_used == NULL) ||
+ (info->shadow[i].sg == NULL) ||
+ (info->max_indirect_segments &&
+ (info->shadow[i].indirect_grants == NULL)))
+ goto out_of_memory;
+ sg_init_table(info->shadow[i].sg, segs);
+ }
+
+
+ return 0;
+
+out_of_memory:
+ for (i = 0; i < BLK_RING_SIZE; i++) {
+ kfree(info->shadow[i].grants_used);
+ info->shadow[i].grants_used = NULL;
+ kfree(info->shadow[i].sg);
+ info->shadow[i].sg = NULL;
+ kfree(info->shadow[i].indirect_grants);
+ info->shadow[i].indirect_grants = NULL;
+ }
+ return -ENOMEM;
+}
+
/*
* Invoked when the backend is finally 'ready' (and has told produced
* the details about the physical device - #sectors, size, etc).
@@ -1395,6 +1701,7 @@ static void blkfront_connect(struct blkfront_info *info)
{
unsigned long long sectors;
unsigned long sector_size;
+ unsigned int physical_sector_size;
unsigned int binfo;
int err;
int barrier, flush, discard, persistent;
@@ -1414,8 +1721,15 @@ static void blkfront_connect(struct blkfront_info *info)
set_capacity(info->gd, sectors);
revalidate_disk(info->gd);
- /* fall through */
+ return;
case BLKIF_STATE_SUSPENDED:
+ /*
+ * If we are recovering from suspension, we need to wait
+ * for the backend to announce it's features before
+ * reconnecting, at least we need to know if the backend
+ * supports indirect descriptors, and how many.
+ */
+ blkif_recover(info);
return;
default:
@@ -1437,6 +1751,16 @@ static void blkfront_connect(struct blkfront_info *info)
return;
}
+ /*
+ * physcial-sector-size is a newer field, so old backends may not
+ * provide this. Assume physical sector size to be the same as
+ * sector_size in that case.
+ */
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "physical-sector-size", "%u", &physical_sector_size);
+ if (err != 1)
+ physical_sector_size = sector_size;
+
info->feature_flush = 0;
info->flush_op = 0;
@@ -1483,7 +1807,15 @@ static void blkfront_connect(struct blkfront_info *info)
else
info->feature_persistent = persistent;
- err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
+ err = blkfront_setup_indirect(info);
+ if (err) {
+ xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
+ info->xbdev->otherend);
+ return;
+ }
+
+ err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
+ physical_sector_size);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
info->xbdev->otherend);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b012d7600e1a..7cde885011ed 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -103,10 +103,10 @@ struct pstate_adjust_policy {
static struct pstate_adjust_policy default_policy = {
.sample_rate_ms = 10,
.deadband = 0,
- .setpoint = 109,
- .p_gain_pct = 17,
+ .setpoint = 97,
+ .p_gain_pct = 20,
.d_gain_pct = 0,
- .i_gain_pct = 4,
+ .i_gain_pct = 0,
};
struct perf_limits {
@@ -468,12 +468,12 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
{
int32_t busy_scaled;
- int32_t core_busy, turbo_pstate, current_pstate;
+ int32_t core_busy, max_pstate, current_pstate;
core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
- turbo_pstate = int_tofp(cpu->pstate.turbo_pstate);
+ max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
- busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate));
+ busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
return fp_toint(busy_scaled);
}
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 5996521a1caf..84573b4d6f92 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -429,7 +429,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
dma_addr_t src_dma, dst_dma;
int ret = 0;
- desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return -ENOMEM;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 27e86d938262..89e109022d78 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -48,6 +48,8 @@ static LIST_HEAD(mc_devices);
*/
static void const *edac_mc_owner;
+static struct bus_type mc_bus[EDAC_MAX_MCS];
+
unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
unsigned len)
{
@@ -723,6 +725,11 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
int ret = -EINVAL;
edac_dbg(0, "\n");
+ if (mci->mc_idx >= EDAC_MAX_MCS) {
+ pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx);
+ return -ENODEV;
+ }
+
#ifdef CONFIG_EDAC_DEBUG
if (edac_debug_level >= 3)
edac_mc_dump_mci(mci);
@@ -762,6 +769,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
/* set load time so that error rate can be tracked */
mci->start_time = jiffies;
+ mci->bus = &mc_bus[mci->mc_idx];
+
if (edac_create_sysfs_mci_device(mci)) {
edac_mc_printk(mci, KERN_WARNING,
"failed to create sysfs device\n");
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index ef15a7e613bc..e7c32c4f7837 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -370,7 +370,7 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
return -ENODEV;
csrow->dev.type = &csrow_attr_type;
- csrow->dev.bus = &mci->bus;
+ csrow->dev.bus = mci->bus;
device_initialize(&csrow->dev);
csrow->dev.parent = &mci->dev;
csrow->mci = mci;
@@ -605,7 +605,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
dimm->mci = mci;
dimm->dev.type = &dimm_attr_type;
- dimm->dev.bus = &mci->bus;
+ dimm->dev.bus = mci->bus;
device_initialize(&dimm->dev);
dimm->dev.parent = &mci->dev;
@@ -975,11 +975,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
* The memory controller needs its own bus, in order to avoid
* namespace conflicts at /sys/bus/edac.
*/
- mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
- if (!mci->bus.name)
+ mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
+ if (!mci->bus->name)
return -ENOMEM;
- edac_dbg(0, "creating bus %s\n", mci->bus.name);
- err = bus_register(&mci->bus);
+
+ edac_dbg(0, "creating bus %s\n", mci->bus->name);
+
+ err = bus_register(mci->bus);
if (err < 0)
return err;
@@ -988,7 +990,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
device_initialize(&mci->dev);
mci->dev.parent = mci_pdev;
- mci->dev.bus = &mci->bus;
+ mci->dev.bus = mci->bus;
dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
dev_set_drvdata(&mci->dev, mci);
pm_runtime_forbid(&mci->dev);
@@ -997,8 +999,8 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
err = device_add(&mci->dev);
if (err < 0) {
edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
- bus_unregister(&mci->bus);
- kfree(mci->bus.name);
+ bus_unregister(mci->bus);
+ kfree(mci->bus->name);
return err;
}
@@ -1064,8 +1066,8 @@ fail:
}
fail2:
device_unregister(&mci->dev);
- bus_unregister(&mci->bus);
- kfree(mci->bus.name);
+ bus_unregister(mci->bus);
+ kfree(mci->bus->name);
return err;
}
@@ -1098,8 +1100,8 @@ void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
device_unregister(&mci->dev);
- bus_unregister(&mci->bus);
- kfree(mci->bus.name);
+ bus_unregister(mci->bus);
+ kfree(mci->bus->name);
}
static void mc_attr_release(struct device *dev)
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index 1b635178cc44..157b934e8ce3 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -974,7 +974,7 @@ static int i5100_setup_debugfs(struct mem_ctl_info *mci)
if (!i5100_debugfs)
return -ENODEV;
- priv->debugfs = debugfs_create_dir(mci->bus.name, i5100_debugfs);
+ priv->debugfs = debugfs_create_dir(mci->bus->name, i5100_debugfs);
if (!priv->debugfs)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index f4491a497cc8..c2fa77086eb5 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -378,7 +378,7 @@ static int msm_gpio_probe(struct platform_device *pdev)
int ret, ngpio;
struct resource *res;
- if (!of_property_read_u32(pdev->dev.of_node, "ngpio", &ngpio)) {
+ if (of_property_read_u32(pdev->dev.of_node, "ngpio", &ngpio)) {
dev_err(&pdev->dev, "%s: ngpio property missing\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index dfeb3a3a8f20..c57244ef428b 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1037,6 +1037,18 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
+#if defined(CONFIG_OF_GPIO)
+static inline bool omap_gpio_chip_boot_dt(struct gpio_chip *chip)
+{
+ return chip->of_node != NULL;
+}
+#else
+static inline bool omap_gpio_chip_boot_dt(struct gpio_chip *chip)
+{
+ return false;
+}
+#endif
+
static void omap_gpio_chip_init(struct gpio_bank *bank)
{
int j;
@@ -1068,24 +1080,68 @@ static void omap_gpio_chip_init(struct gpio_bank *bank)
gpiochip_add(&bank->chip);
- for (j = 0; j < bank->width; j++) {
- int irq = irq_create_mapping(bank->domain, j);
- irq_set_lockdep_class(irq, &gpio_lock_class);
- irq_set_chip_data(irq, bank);
- if (bank->is_mpuio) {
- omap_mpuio_alloc_gc(bank, irq, bank->width);
- } else {
- irq_set_chip_and_handler(irq, &gpio_irq_chip,
- handle_simple_irq);
- set_irq_flags(irq, IRQF_VALID);
- }
- }
+ /*
+ * REVISIT these explicit calls to irq_create_mapping()
+ * to do the GPIO to IRQ domain mapping for each GPIO in
+ * the bank can be removed once all OMAP platforms have
+ * been migrated to Device Tree boot only.
+ * Since in DT boot irq_create_mapping() is called from
+ * irq_create_of_mapping() only for the GPIO lines that
+ * are used as interrupts.
+ */
+ if (!omap_gpio_chip_boot_dt(&bank->chip))
+ for (j = 0; j < bank->width; j++)
+ irq_create_mapping(bank->domain, j);
irq_set_chained_handler(bank->irq, gpio_irq_handler);
irq_set_handler_data(bank->irq, bank);
}
static const struct of_device_id omap_gpio_match[];
+static int omap_gpio_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct gpio_bank *bank = d->host_data;
+ int gpio;
+ int ret;
+
+ if (!bank)
+ return -EINVAL;
+
+ irq_set_lockdep_class(virq, &gpio_lock_class);
+ irq_set_chip_data(virq, bank);
+ if (bank->is_mpuio) {
+ omap_mpuio_alloc_gc(bank, virq, bank->width);
+ } else {
+ irq_set_chip_and_handler(virq, &gpio_irq_chip,
+ handle_simple_irq);
+ set_irq_flags(virq, IRQF_VALID);
+ }
+
+ /*
+ * REVISIT most GPIO IRQ chip drivers need to call
+ * gpio_request() before a GPIO line can be used as an
+ * IRQ. Ideally this should be handled by the IRQ core
+ * but until then this has to be done on a per driver
+ * basis. Remove this once this is managed by the core.
+ */
+ if (omap_gpio_chip_boot_dt(&bank->chip)) {
+ gpio = irq_to_gpio(bank, hwirq);
+ ret = gpio_request_one(gpio, GPIOF_IN, NULL);
+ if (ret) {
+ dev_err(bank->dev, "Could not request GPIO%d\n", gpio);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct irq_domain_ops omap_gpio_irq_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = omap_gpio_irq_map,
+};
+
static int omap_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1151,10 +1207,10 @@ static int omap_gpio_probe(struct platform_device *pdev)
}
bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
- 0, &irq_domain_simple_ops, NULL);
+ 0, &omap_gpio_irq_ops, bank);
#else
bank->domain = irq_domain_add_linear(node, bank->width,
- &irq_domain_simple_ops, NULL);
+ &omap_gpio_irq_ops, bank);
#endif
if (!bank->domain) {
dev_err(dev, "Couldn't register an IRQ domain\n");
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 738a4294d820..6a647493ca7f 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -677,6 +677,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
/* don't break so fail path works correct */
fail = 1;
break;
+
+ if (connector->dpms != DRM_MODE_DPMS_ON) {
+ DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
+ mode_changed = true;
+ }
}
}
@@ -754,6 +759,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
ret = -EINVAL;
goto fail;
}
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
+ }
}
drm_helper_disable_unused_functions(dev);
} else if (fb_changed) {
@@ -771,22 +782,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
}
}
- /*
- * crtc set_config helpers implicit set the crtc and all connected
- * encoders to DPMS on for a full mode set. But for just an fb update it
- * doesn't do that. To not confuse userspace, do an explicit DPMS_ON
- * unconditionally. This will also ensure driver internal dpms state is
- * consistent again.
- */
- if (set->crtc->enabled) {
- DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
- for (i = 0; i < set->num_connectors; i++) {
- DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
- drm_get_connector_name(set->connectors[i]));
- set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
- }
- }
-
kfree(save_connectors);
kfree(save_encoders);
kfree(save_crtcs);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index cf188ab7051a..66c63808fa35 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1495,6 +1495,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = info;
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ spin_lock_init(&dev_priv->rps.lock);
+ spin_lock_init(&dev_priv->gt_lock);
+ spin_lock_init(&dev_priv->backlight.lock);
+ mutex_init(&dev_priv->dpio_lock);
+ mutex_init(&dev_priv->rps.hw_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
+
i915_dump_device_info(dev_priv);
if (i915_get_bridge_dev(dev)) {
@@ -1585,6 +1594,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
intel_irq_init(dev);
+ intel_gt_sanitize(dev);
intel_gt_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1610,15 +1620,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- spin_lock_init(&dev_priv->irq_lock);
- spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->rps.lock);
- spin_lock_init(&dev_priv->backlight.lock);
- mutex_init(&dev_priv->dpio_lock);
-
- mutex_init(&dev_priv->rps.hw_lock);
- mutex_init(&dev_priv->modeset_restore_lock);
-
dev_priv->num_plane = 1;
if (IS_VALLEYVIEW(dev))
dev_priv->num_plane = 2;
@@ -1648,7 +1649,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (INTEL_INFO(dev)->num_pipes) {
/* Must be done after probing outputs */
intel_opregion_init(dev);
- acpi_video_register_with_quirks();
+ acpi_video_register();
}
if (IS_GEN5(dev))
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f4af1ca0fb62..45b3c030f483 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -706,7 +706,7 @@ static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
- intel_gt_reset(dev);
+ intel_gt_sanitize(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
@@ -732,7 +732,7 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- intel_gt_reset(dev);
+ intel_gt_sanitize(dev);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -1253,21 +1253,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ unsigned long irqflags; \
u##x val = 0; \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- unsigned long irqflags; \
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val)); \
return val; \
}
@@ -1280,8 +1280,10 @@ __i915_read(64, q)
#define __i915_write(x, y) \
void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ unsigned long irqflags; \
u32 __fifo_ret = 0; \
trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
@@ -1293,6 +1295,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
hsw_unclaimed_reg_check(dev_priv, reg); \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
}
__i915_write(8, b)
__i915_write(16, w)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a416645bcd23..d2ee3343c943 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -555,6 +555,7 @@ enum intel_sbi_destination {
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
struct intel_fbdev;
struct intel_fbc_work;
@@ -1583,7 +1584,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
-extern void intel_gt_reset(struct drm_device *dev);
+extern void intel_gt_sanitize(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 97afd2639fb6..d9e2208cfe98 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2258,7 +2258,17 @@ void i915_gem_restore_fences(struct drm_device *dev)
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- i915_gem_write_fence(dev, i, reg->obj);
+
+ /*
+ * Commit delayed tiling changes if we have an object still
+ * attached to the fence, otherwise just clear the fence.
+ */
+ if (reg->obj) {
+ i915_gem_object_update_fence(reg->obj, reg,
+ reg->obj->tiling_mode);
+ } else {
+ i915_gem_write_fence(dev, i, NULL);
+ }
}
}
@@ -2795,6 +2805,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
mb();
+ WARN(obj && (!obj->stride || !obj->tiling_mode),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ obj->stride, obj->tiling_mode);
+
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
@@ -2836,6 +2850,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
fence->obj = NULL;
list_del_init(&fence->lru_list);
}
+ obj->fence_dirty = false;
}
static int
@@ -2965,7 +2980,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
return 0;
i915_gem_object_update_fence(obj, reg, enable);
- obj->fence_dirty = false;
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 324211ac9c55..b042ee5c4070 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -301,7 +301,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
- intel_dp->DP = intel_dig_port->port_reversal |
+ intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
@@ -1109,7 +1109,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
* enabling the port.
*/
I915_WRITE(DDI_BUF_CTL(port),
- intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
+ intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE);
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1347,8 +1348,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->get_config = intel_ddi_get_config;
intel_dig_port->port = port;
- intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
- DDI_BUF_PORT_REVERSAL;
+ intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ (DDI_BUF_PORT_REVERSAL |
+ DDI_A_4_LANES);
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 85f3eb74d2b7..5fb305840db8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4913,22 +4913,19 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
uint32_t tmp;
tmp = I915_READ(PFIT_CONTROL);
+ if (!(tmp & PFIT_ENABLE))
+ return;
+ /* Check whether the pfit is attached to our pipe. */
if (INTEL_INFO(dev)->gen < 4) {
if (crtc->pipe != PIPE_B)
return;
-
- /* gen2/3 store dither state in pfit control, needs to match */
- pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE;
} else {
if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
return;
}
- if (!(tmp & PFIT_ENABLE))
- return;
-
- pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL);
+ pipe_config->gmch_pfit.control = tmp;
pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
if (INTEL_INFO(dev)->gen < 5)
pipe_config->gmch_pfit.lvds_border_bits =
@@ -8317,6 +8314,8 @@ check_shared_dpll_state(struct drm_device *dev)
pll->active, pll->refcount);
WARN(pll->active && !pll->on,
"pll in active use but not on in sw tracking\n");
+ WARN(pll->on && !pll->active,
+ "pll in on but not on in use in sw tracking\n");
WARN(pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
@@ -8541,15 +8540,20 @@ static void intel_set_config_restore_state(struct drm_device *dev,
}
static bool
-is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors,
- int num_connectors)
+is_crtc_connector_off(struct drm_mode_set *set)
{
int i;
- for (i = 0; i < num_connectors; i++)
- if (connectors[i].encoder &&
- connectors[i].encoder->crtc == crtc &&
- connectors[i].dpms != DRM_MODE_DPMS_ON)
+ if (set->num_connectors == 0)
+ return false;
+
+ if (WARN_ON(set->connectors == NULL))
+ return false;
+
+ for (i = 0; i < set->num_connectors; i++)
+ if (set->connectors[i]->encoder &&
+ set->connectors[i]->encoder->crtc == set->crtc &&
+ set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
return true;
return false;
@@ -8562,10 +8566,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
- if (set->connectors != NULL &&
- is_crtc_connector_off(set->crtc, *set->connectors,
- set->num_connectors)) {
- config->mode_changed = true;
+ if (is_crtc_connector_off(set)) {
+ config->mode_changed = true;
} else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
@@ -9398,6 +9400,17 @@ static void quirk_invert_brightness(struct drm_device *dev)
DRM_INFO("applying inverted panel brightness quirk\n");
}
+/*
+ * Some machines (Dell XPS13) suffer broken backlight controls if
+ * BLM_PCH_PWM_ENABLE is set.
+ */
+static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
+ DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -9467,6 +9480,11 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Dell XPS13 HD Sandy Bridge */
+ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
+ /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
+ { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
};
static void intel_init_quirks(struct drm_device *dev)
@@ -9817,8 +9835,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
pll->refcount = pll->active;
- DRM_DEBUG_KMS("%s hw state readout: refcount %i\n",
- pll->name, pll->refcount);
+ DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
+ pll->name, pll->refcount, pll->on);
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -9869,6 +9887,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_plane *plane;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
+ int i;
intel_modeset_readout_hw_state(dev);
@@ -9884,6 +9903,18 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
}
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ if (!pll->on || pll->active)
+ continue;
+
+ DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
+
+ pll->disable(dev_priv, pll);
+ pll->on = false;
+ }
+
if (force_restore) {
/*
* We need to use raw interfaces for restoring state to avoid
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index c8c9b6f48230..b7d6e09456ce 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -504,7 +504,7 @@ struct intel_dp {
struct intel_digital_port {
struct intel_encoder base;
enum port port;
- u32 port_reversal;
+ u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
};
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 98df2a0c85bd..2fd3fd5b943e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -785,10 +785,22 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
}
}
+static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+ if (IS_G4X(dev))
+ return 165000;
+ else if (IS_HASWELL(dev))
+ return 300000;
+ else
+ return 225000;
+}
+
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- if (mode->clock > 165000)
+ if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
@@ -806,6 +818,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
+ int portclock_limit = hdmi_portclock_limit(intel_hdmi);
int desired_bpp;
if (intel_hdmi->color_range_auto) {
@@ -829,7 +842,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000
+ if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
&& HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -846,7 +859,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = desired_bpp;
}
- if (adjusted_mode->clock > 225000) {
+ if (adjusted_mode->clock > portclock_limit) {
DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 021e8daa022d..61348eae2f04 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -109,6 +109,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
flags |= DRM_MODE_FLAG_PVSYNC;
pipe_config->adjusted_mode.flags |= flags;
+
+ /* gen2/3 store dither state in pfit control, needs to match */
+ if (INTEL_INFO(dev)->gen < 4) {
+ tmp = I915_READ(PFIT_CONTROL);
+
+ pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+ }
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -290,14 +297,11 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
intel_pch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
- return true;
} else {
intel_gmch_panel_fitting(intel_crtc, pipe_config,
intel_connector->panel.fitting_mode);
- }
- drm_mode_set_crtcinfo(adjusted_mode, 0);
- pipe_config->timings_set = true;
+ }
/*
* XXX: It would be nice to support lower refresh rates on the
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 80bea1d3209f..67e2c1f1c9a8 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -194,6 +194,9 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ pipe_config->timings_set = true;
+
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
@@ -580,7 +583,8 @@ void intel_panel_enable_backlight(struct drm_device *dev,
POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_PCH_SPLIT(dev) &&
+ !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d10e6735771f..51a2a60f5bfc 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5476,7 +5476,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
gen6_gt_check_fifodbg(dev_priv);
}
-void intel_gt_reset(struct drm_device *dev)
+void intel_gt_sanitize(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5487,16 +5487,16 @@ void intel_gt_reset(struct drm_device *dev)
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
+
+ /* BIOS often leaves RC6 enabled, but disable it for hw init */
+ if (INTEL_INFO(dev)->gen >= 6)
+ intel_disable_gt_powersave(dev);
}
void intel_gt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- spin_lock_init(&dev_priv->gt_lock);
-
- intel_gt_reset(dev);
-
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
index 262c9f5f5f60..ce860de43e61 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
nv_subdev(priv)->unit = 0x00008000;
+ nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nvc0_bsp_cclass;
nv_engine(priv)->sclass = nvc0_bsp_sclass;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
index c46882c83982..ba6aeca0285e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
nv_subdev(priv)->unit = 0x00008000;
+ nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nve0_bsp_cclass;
nv_engine(priv)->sclass = nve0_bsp_sclass;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c
index 3c7a31f7590e..e03fc8e4dc1d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c
@@ -23,6 +23,25 @@
#include <engine/falcon.h>
#include <subdev/timer.h>
+void
+nouveau_falcon_intr(struct nouveau_subdev *subdev)
+{
+ struct nouveau_falcon *falcon = (void *)subdev;
+ u32 dispatch = nv_ro32(falcon, 0x01c);
+ u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
+
+ if (intr & 0x00000010) {
+ nv_debug(falcon, "ucode halted\n");
+ nv_wo32(falcon, 0x004, 0x00000010);
+ intr &= ~0x00000010;
+ }
+
+ if (intr) {
+ nv_error(falcon, "unhandled intr 0x%08x\n", intr);
+ nv_wo32(falcon, 0x004, intr);
+ }
+}
+
u32
_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
{
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
index 98072c1ff360..73719aaa62d6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
@@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
nv_subdev(priv)->unit = 0x00000002;
+ nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nvc0_ppp_cclass;
nv_engine(priv)->sclass = nvc0_ppp_sclass;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
index 1879229b60eb..ac1f62aace72 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
@@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
nv_subdev(priv)->unit = 0x00020000;
+ nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nvc0_vp_cclass;
nv_engine(priv)->sclass = nvc0_vp_sclass;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
index d28ecbf7bc49..d4c3108479c9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
@@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
nv_subdev(priv)->unit = 0x00020000;
+ nv_subdev(priv)->intr = nouveau_falcon_intr;
nv_engine(priv)->cclass = &nve0_vp_cclass;
nv_engine(priv)->sclass = nve0_vp_sclass;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
index 1edec386ab36..181aa7da524d 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h
@@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, u32, bool, const char *,
const char *, int, void **);
+void nouveau_falcon_intr(struct nouveau_subdev *subdev);
+
#define _nouveau_falcon_dtor _nouveau_engine_dtor
int _nouveau_falcon_init(struct nouveau_object *);
int _nouveau_falcon_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4b1afb131380..4e7ee5f4155c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
+ WARN_ON(nvbo->pin_refcnt > 0);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
kfree(nvbo);
}
@@ -197,6 +198,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
size_t acc_size;
int ret;
int type = ttm_bo_type_device;
+ int max_size = INT_MAX & ~((1 << drm->client.base.vm->vmm->lpg_shift) - 1);
+
+ if (size <= 0 || size > max_size) {
+ nv_warn(drm, "skipped size %x\n", (u32)size);
+ return -EINVAL;
+ }
if (sg)
type = ttm_bo_type_sg;
@@ -340,13 +347,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
- int ret;
+ int ret, ref;
ret = ttm_bo_reserve(bo, false, false, false, 0);
if (ret)
return ret;
- if (--nvbo->pin_refcnt)
+ ref = --nvbo->pin_refcnt;
+ WARN_ON_ONCE(ref < 0);
+ if (ref)
goto out;
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
@@ -578,7 +587,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
int ret = RING_SPACE(chan, 2);
if (ret == 0) {
BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
- OUT_RING (chan, handle);
+ OUT_RING (chan, handle & 0x0000ffff);
FIRE_RING (chan);
}
return ret;
@@ -973,7 +982,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
- mutex_lock(&chan->cli->mutex);
+ mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
/* create temporary vmas for the transfer and attach them to the
* old nouveau_mem node, these will get cleaned up after ttm has
@@ -1014,7 +1023,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
- { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
@@ -1034,7 +1043,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct nouveau_channel *chan;
u32 handle = (mthd->engine << 16) | mthd->oclass;
- if (mthd->init == nve0_bo_move_init)
+ if (mthd->engine)
chan = drm->cechan;
else
chan = drm->channel;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 708b2d1c0037..907d20ef6d4d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
{
struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
- int ret;
+ int ret = -ENOMEM;
gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (!gem)
@@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
if (!nouveau_fb)
- return ERR_PTR(-ENOMEM);
+ goto err_unref;
ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
- if (ret) {
- drm_gem_object_unreference(gem);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto err;
return &nouveau_fb->base;
+
+err:
+ kfree(nouveau_fb);
+err_unref:
+ drm_gem_object_unreference(gem);
+ return ERR_PTR(ret);
}
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
@@ -524,9 +528,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_page_flip_state *s;
struct nouveau_channel *chan = NULL;
struct nouveau_fence *fence;
- struct list_head res;
- struct ttm_validate_buffer res_val[2];
+ struct ttm_validate_buffer resv[2] = {
+ { .bo = &old_bo->bo },
+ { .bo = &new_bo->bo },
+ };
struct ww_acquire_ctx ticket;
+ LIST_HEAD(res);
int ret;
if (!drm->channel)
@@ -545,27 +552,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
chan = drm->channel;
spin_unlock(&old_bo->bo.bdev->fence_lock);
- mutex_lock(&chan->cli->mutex);
-
if (new_bo != old_bo) {
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
- if (likely(!ret)) {
- res_val[0].bo = &old_bo->bo;
- res_val[1].bo = &new_bo->bo;
- INIT_LIST_HEAD(&res);
- list_add_tail(&res_val[0].head, &res);
- list_add_tail(&res_val[1].head, &res);
- ret = ttm_eu_reserve_buffers(&ticket, &res);
- if (ret)
- nouveau_bo_unpin(new_bo);
- }
- } else
- ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
+ if (ret)
+ goto fail_free;
- if (ret) {
- mutex_unlock(&chan->cli->mutex);
- goto fail_free;
+ list_add(&resv[1].head, &res);
}
+ list_add(&resv[0].head, &res);
+
+ mutex_lock(&chan->cli->mutex);
+ ret = ttm_eu_reserve_buffers(&ticket, &res);
+ if (ret)
+ goto fail_unpin;
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
@@ -576,10 +575,8 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (nv_device(drm->device)->card_type >= NV_50) {
ret = nv50_display_flip_next(crtc, fb, chan, 0);
- if (ret) {
- mutex_unlock(&chan->cli->mutex);
+ if (ret)
goto fail_unreserve;
- }
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
@@ -590,22 +587,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Update the crtc struct and cleanup */
crtc->fb = fb;
- if (old_bo != new_bo) {
- ttm_eu_fence_buffer_objects(&ticket, &res, fence);
+ ttm_eu_fence_buffer_objects(&ticket, &res, fence);
+ if (old_bo != new_bo)
nouveau_bo_unpin(old_bo);
- } else {
- nouveau_bo_fence(new_bo, fence);
- ttm_bo_unreserve(&new_bo->bo);
- }
nouveau_fence_unref(&fence);
return 0;
fail_unreserve:
- if (old_bo != new_bo) {
- ttm_eu_backoff_reservation(&ticket, &res);
+ ttm_eu_backoff_reservation(&ticket, &res);
+fail_unpin:
+ mutex_unlock(&chan->cli->mutex);
+ if (old_bo != new_bo)
nouveau_bo_unpin(new_bo);
- } else
- ttm_bo_unreserve(&new_bo->bo);
fail_free:
kfree(s);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 218a4b522fe5..61972668fd05 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -192,6 +192,18 @@ nouveau_accel_init(struct nouveau_drm *drm)
arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
arg1 = 1;
+ } else
+ if (device->chipset >= 0xa3 &&
+ device->chipset != 0xaa &&
+ device->chipset != 0xac) {
+ ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
+ NVDRM_CHAN + 1, NvDmaFB, NvDmaTT,
+ &drm->cechan);
+ if (ret)
+ NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
+
+ arg0 = NvDmaFB;
+ arg1 = NvDmaTT;
} else {
arg0 = NvDmaFB;
arg1 = NvDmaTT;
@@ -284,8 +296,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
return 0;
}
-static struct lock_class_key drm_client_lock_class_key;
-
static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{
@@ -297,7 +307,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
if (ret)
return ret;
- lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
dev->dev_private = drm;
drm->dev = dev;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 9352010030e9..4c1bc061fae2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -385,6 +385,7 @@ out_unlock:
mutex_unlock(&dev->struct_mutex);
if (chan)
nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
+ nouveau_bo_unmap(nvbo);
out_unpin:
nouveau_bo_unpin(nvbo);
out_unref:
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 1680d9187bab..be3149932c2d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
int ret;
fence->channel = chan;
- fence->timeout = jiffies + (3 * DRM_HZ);
+ fence->timeout = jiffies + (15 * DRM_HZ);
fence->sequence = ++fctx->sequence;
ret = fctx->emit(fence);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index e72d09c068a8..830cb7bad922 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return;
nvbo->gem = NULL;
- /* Lockdep hates you for doing reserve with gem object lock held */
- if (WARN_ON_ONCE(nvbo->pin_refcnt)) {
- nvbo->pin_refcnt = 1;
- nouveau_bo_unpin(nvbo);
- }
-
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 54dc6355b0c2..8b40a36c1b57 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -355,6 +355,7 @@ struct nv50_oimm {
struct nv50_head {
struct nouveau_crtc base;
+ struct nouveau_bo *image;
struct nv50_curs curs;
struct nv50_sync sync;
struct nv50_ovly ovly;
@@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
{
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_head *head = nv50_head(crtc);
struct nv50_sync *sync = nv50_sync(crtc);
- int head = nv_crtc->index, ret;
u32 *push;
+ int ret;
swap_interval <<= 4;
if (swap_interval == 0)
@@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
- OUT_RING (chan, NvEvoSema0 + head);
+ OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
OUT_RING (chan, sync->addr ^ 0x10);
BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, sync->data + 1);
@@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (chan, sync->data);
} else
if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
- u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
+ u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
ret = RING_SPACE(chan, 12);
if (ret)
return ret;
@@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL);
} else
if (chan) {
- u64 addr = nv84_fence_crtc(chan, head) + sync->addr;
+ u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
ret = RING_SPACE(chan, 10);
if (ret)
return ret;
@@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
evo_kick(push, sync);
+
+ nouveau_bo_ref(nv_fb->nvbo, &head->image);
return 0;
}
@@ -1038,18 +1042,17 @@ static int
nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
{
struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+ struct nv50_head *head = nv50_head(crtc);
int ret;
ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (old_fb) {
- nvfb = nouveau_framebuffer(old_fb);
- nouveau_bo_unpin(nvfb->nvbo);
+ if (ret == 0) {
+ if (head->image)
+ nouveau_bo_unpin(head->image);
+ nouveau_bo_ref(nvfb->nvbo, &head->image);
}
- return 0;
+ return ret;
}
static int
@@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
}
}
+static void
+nv50_crtc_disable(struct drm_crtc *crtc)
+{
+ struct nv50_head *head = nv50_head(crtc);
+ if (head->image)
+ nouveau_bo_unpin(head->image);
+ nouveau_bo_ref(NULL, &head->image);
+}
+
static int
nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height)
@@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nv50_head *head = nv50_head(crtc);
+
nv50_dmac_destroy(disp->core, &head->ovly.base);
nv50_pioc_destroy(disp->core, &head->oimm.base);
nv50_dmac_destroy(disp->core, &head->sync.base);
nv50_pioc_destroy(disp->core, &head->curs.base);
+
+ /*XXX: this shouldn't be necessary, but the core doesn't call
+ * disconnect() during the cleanup paths
+ */
+ if (head->image)
+ nouveau_bo_unpin(head->image);
+ nouveau_bo_ref(NULL, &head->image);
+
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
if (nv_crtc->cursor.nvbo)
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
+
nouveau_bo_unmap(nv_crtc->lut.nvbo);
if (nv_crtc->lut.nvbo)
nouveau_bo_unpin(nv_crtc->lut.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+
drm_crtc_cleanup(crtc);
kfree(crtc);
}
@@ -1296,6 +1319,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
.mode_set_base = nv50_crtc_mode_set_base,
.mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
.load_lut = nv50_crtc_lut_load,
+ .disable = nv50_crtc_disable,
};
static const struct drm_crtc_funcs nv50_crtc_func = {
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 93c2f2cceb51..eb89653a7a17 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
- cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+ cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
}
@@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
- cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+ cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
}
@@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev)
struct qxl_release *release;
uint64_t id, next_id;
int i = 0;
- int ret;
union qxl_release_info *info;
while (qxl_ring_pop(qdev->release_ring, &id)) {
@@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev)
if (release == NULL)
break;
- ret = qxl_release_reserve(qdev, release, false);
- if (ret) {
- qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
- DRM_ERROR("failed to reserve release %lld\n", id);
- }
-
info = qxl_release_map(qdev, release);
next_id = info->next;
qxl_release_unmap(qdev, release, info);
- qxl_release_unreserve(qdev, release);
QXL_INFO(qdev, "popped %lld, next %lld\n", id,
next_id);
@@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev)
return i;
}
-int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+ struct qxl_release *release,
+ unsigned long size,
struct qxl_bo **_bo)
{
struct qxl_bo *bo;
int ret;
ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
- QXL_GEM_DOMAIN_VRAM, NULL, &bo);
+ false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
if (ret) {
DRM_ERROR("failed to allocate VRAM BO\n");
return ret;
}
- ret = qxl_bo_reserve(bo, false);
- if (unlikely(ret != 0))
+ ret = qxl_release_list_add(release, bo);
+ if (ret)
goto out_unref;
*_bo = bo;
return 0;
out_unref:
qxl_bo_unref(&bo);
- return 0;
+ return ret;
}
static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
@@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
if (ret)
return ret;
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ return ret;
+
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->u.surface_create.format = surf->surf.format;
@@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
surf->surf_create = release;
- /* no need to add a release to the fence for this bo,
+ /* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface
and it would never signal otherwise */
- qxl_fence_releaseable(qdev, release);
-
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
-
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock);
@@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
-
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
- qxl_release_unreserve(qdev, release);
-
+ qxl_release_fence_buffer_objects(release);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index f76f5dd7bfc4..835caba026d3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
kfree(qxl_crtc);
}
-static void
+static int
qxl_hide_cursor(struct qxl_device *qdev)
{
struct qxl_release *release;
@@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev)
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
&release, NULL);
+ if (ret)
+ return ret;
+
+ ret = qxl_release_reserve_list(release, true);
+ if (ret) {
+ qxl_release_free(qdev, release);
+ return ret;
+ }
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_HIDE;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+ return 0;
}
static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
@@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
int size = 64*64*4;
int ret = 0;
- if (!handle) {
- qxl_hide_cursor(qdev);
- return 0;
- }
+ if (!handle)
+ return qxl_hide_cursor(qdev);
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
if (!obj) {
@@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
goto out_unref;
ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+ qxl_bo_unreserve(user_bo);
if (ret)
- goto out_unreserve;
+ goto out_unref;
ret = qxl_bo_kmap(user_bo, &user_ptr);
if (ret)
@@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
&release, NULL);
if (ret)
goto out_kunmap;
- ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size,
- &cursor_bo);
+
+ ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size,
+ &cursor_bo);
if (ret)
goto out_free_release;
- ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+
+ ret = qxl_release_reserve_list(release, false);
if (ret)
goto out_free_bo;
+ ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+ if (ret)
+ goto out_backoff;
+
cursor->header.unique = 0;
cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
cursor->header.width = 64;
@@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(cursor_bo);
- /* finish with the userspace bo */
qxl_bo_kunmap(user_bo);
- qxl_bo_unpin(user_bo);
- qxl_bo_unreserve(user_bo);
- drm_gem_object_unreference_unlocked(obj);
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
@@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
cmd->u.set.position.y = qcrtc->cur_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
- qxl_release_add_res(qdev, release, cursor_bo);
cmd->u.set.visible = 1;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+ /* finish with the userspace bo */
+ ret = qxl_bo_reserve(user_bo, false);
+ if (!ret) {
+ qxl_bo_unpin(user_bo);
+ qxl_bo_unreserve(user_bo);
+ }
+ drm_gem_object_unreference_unlocked(obj);
- qxl_bo_unreserve(cursor_bo);
qxl_bo_unref(&cursor_bo);
return ret;
+
+out_backoff:
+ qxl_release_backoff_reserve_list(release);
out_free_bo:
qxl_bo_unref(&cursor_bo);
out_free_release:
- qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
out_kunmap:
qxl_bo_kunmap(user_bo);
out_unpin:
qxl_bo_unpin(user_bo);
-out_unreserve:
- qxl_bo_unreserve(user_bo);
out_unref:
drm_gem_object_unreference_unlocked(obj);
return ret;
@@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
&release, NULL);
+ if (ret)
+ return ret;
+
+ ret = qxl_release_reserve_list(release, true);
+ if (ret) {
+ qxl_release_free(qdev, release);
+ return ret;
+ }
qcrtc->cur_x = x;
qcrtc->cur_y = y;
@@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd->u.position.y = qcrtc->cur_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 3c8c3dbf9378..56e1d633875e 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -23,25 +23,29 @@
#include "qxl_drv.h"
#include "qxl_object.h"
+static int alloc_clips(struct qxl_device *qdev,
+ struct qxl_release *release,
+ unsigned num_clips,
+ struct qxl_bo **clips_bo)
+{
+ int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
+
+ return qxl_alloc_bo_reserved(qdev, release, size, clips_bo);
+}
+
/* returns a pointer to the already allocated qxl_rect array inside
* the qxl_clip_rects. This is *not* the same as the memory allocated
* on the device, it is offset to qxl_clip_rects.chunk.data */
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
struct qxl_drawable *drawable,
unsigned num_clips,
- struct qxl_bo **clips_bo,
- struct qxl_release *release)
+ struct qxl_bo *clips_bo)
{
struct qxl_clip_rects *dev_clips;
int ret;
- int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
- ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
- if (ret)
- return NULL;
- ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips);
+ ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips);
if (ret) {
- qxl_bo_unref(clips_bo);
return NULL;
}
dev_clips->num_rects = num_clips;
@@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
}
static int
+alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
+{
+ int ret;
+ ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
+ QXL_RELEASE_DRAWABLE, release,
+ NULL);
+ return ret;
+}
+
+static void
+free_drawable(struct qxl_device *qdev, struct qxl_release *release)
+{
+ qxl_release_free(qdev, release);
+}
+
+/* release needs to be reserved at this point */
+static int
make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
const struct qxl_rect *rect,
- struct qxl_release **release)
+ struct qxl_release *release)
{
struct qxl_drawable *drawable;
- int i, ret;
+ int i;
- ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable),
- QXL_RELEASE_DRAWABLE, release,
- NULL);
- if (ret)
- return ret;
+ drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+ if (!drawable)
+ return -ENOMEM;
- drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
drawable->type = type;
drawable->surface_id = surface; /* Only primary for now */
@@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
drawable->bbox = *rect;
drawable->mm_time = qdev->rom->mm_clock;
- qxl_release_unmap(qdev, *release, &drawable->release_info);
+ qxl_release_unmap(qdev, release, &drawable->release_info);
return 0;
}
-static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
+static int alloc_palette_object(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_bo **palette_bo)
+{
+ return qxl_alloc_bo_reserved(qdev, release,
+ sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
+ palette_bo);
+}
+
+static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
+ struct qxl_release *release,
const struct qxl_fb_image *qxl_fb_image)
{
- struct qxl_device *qdev = qxl_fb_image->qdev;
const struct fb_image *fb_image = &qxl_fb_image->fb_image;
uint32_t visual = qxl_fb_image->visual;
const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
@@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
static uint64_t unique; /* we make no attempt to actually set this
* correctly globaly, since that would require
* tracking all of our palettes. */
-
- ret = qxl_alloc_bo_reserved(qdev,
- sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
- palette_bo);
-
- ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
+ ret = qxl_bo_kmap(palette_bo, (void **)&pal);
pal->num_ents = 2;
pal->unique = unique++;
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
}
pal->ents[0] = bgcolor;
pal->ents[1] = fgcolor;
- qxl_bo_kunmap(*palette_bo);
+ qxl_bo_kunmap(palette_bo);
return 0;
}
@@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
const char *src = fb_image->data;
int depth = fb_image->depth;
struct qxl_release *release;
- struct qxl_bo *image_bo;
struct qxl_image *image;
int ret;
-
+ struct qxl_drm_image *dimage;
+ struct qxl_bo *palette_bo = NULL;
if (stride == 0)
stride = depth * width / 8;
+ ret = alloc_drawable(qdev, &release);
+ if (ret)
+ return;
+
+ ret = qxl_image_alloc_objects(qdev, release,
+ &dimage,
+ height, stride);
+ if (ret)
+ goto out_free_drawable;
+
+ if (depth == 1) {
+ ret = alloc_palette_object(qdev, release, &palette_bo);
+ if (ret)
+ goto out_free_image;
+ }
+
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_palette;
+
rect.left = x;
rect.right = x + width;
rect.top = y;
rect.bottom = y + height;
- ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release);
- if (ret)
- return;
+ ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release);
+ if (ret) {
+ qxl_release_backoff_reserve_list(release);
+ goto out_free_palette;
+ }
- ret = qxl_image_create(qdev, release, &image_bo,
- (const uint8_t *)src, 0, 0,
- width, height, depth, stride);
+ ret = qxl_image_init(qdev, release, dimage,
+ (const uint8_t *)src, 0, 0,
+ width, height, depth, stride);
if (ret) {
- qxl_release_unreserve(qdev, release);
+ qxl_release_backoff_reserve_list(release);
qxl_release_free(qdev, release);
return;
}
if (depth == 1) {
- struct qxl_bo *palette_bo;
void *ptr;
- ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image);
- qxl_release_add_res(qdev, release, palette_bo);
+ ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
- ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
+ ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
image = ptr;
image->u.bitmap.palette =
qxl_bo_physical_address(qdev, palette_bo, 0);
- qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
- qxl_bo_unreserve(palette_bo);
- qxl_bo_unref(&palette_bo);
+ qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr);
}
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
@@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
drawable->u.copy.mask.bitmap = 0;
drawable->u.copy.src_bitmap =
- qxl_bo_physical_address(qdev, image_bo, 0);
+ qxl_bo_physical_address(qdev, dimage->bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_release_add_res(qdev, release, image_bo);
- qxl_bo_unreserve(image_bo);
- qxl_bo_unref(&image_bo);
-
- qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+out_free_palette:
+ if (palette_bo)
+ qxl_bo_unref(&palette_bo);
+out_free_image:
+ qxl_image_free_objects(qdev, dimage);
+out_free_drawable:
+ if (ret)
+ free_drawable(qdev, release);
}
/* push a draw command using the given clipping rectangles as
@@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
int depth = qxl_fb->base.bits_per_pixel;
uint8_t *surface_base;
struct qxl_release *release;
- struct qxl_bo *image_bo;
struct qxl_bo *clips_bo;
+ struct qxl_drm_image *dimage;
int ret;
+ ret = alloc_drawable(qdev, &release);
+ if (ret)
+ return;
+
left = clips->x1;
right = clips->x2;
top = clips->y1;
@@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
width = right - left;
height = bottom - top;
+
+ ret = alloc_clips(qdev, release, num_clips, &clips_bo);
+ if (ret)
+ goto out_free_drawable;
+
+ ret = qxl_image_alloc_objects(qdev, release,
+ &dimage,
+ height, stride);
+ if (ret)
+ goto out_free_clips;
+
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_image;
+
drawable_rect.left = left;
drawable_rect.right = right;
drawable_rect.top = top;
drawable_rect.bottom = bottom;
+
ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
- &release);
+ release);
if (ret)
- return;
+ goto out_release_backoff;
ret = qxl_bo_kmap(bo, (void **)&surface_base);
if (ret)
- goto out_unref;
+ goto out_release_backoff;
- ret = qxl_image_create(qdev, release, &image_bo, surface_base,
- left, top, width, height, depth, stride);
+
+ ret = qxl_image_init(qdev, release, dimage, surface_base,
+ left, top, width, height, depth, stride);
qxl_bo_kunmap(bo);
if (ret)
- goto out_unref;
+ goto out_release_backoff;
+
+ rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
+ if (!rects)
+ goto out_release_backoff;
- rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
- if (!rects) {
- qxl_bo_unref(&image_bo);
- goto out_unref;
- }
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
drawable->clip.data = qxl_bo_physical_address(qdev,
clips_bo, 0);
- qxl_release_add_res(qdev, release, clips_bo);
drawable->u.copy.src_area.top = 0;
drawable->u.copy.src_area.bottom = height;
@@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
drawable->u.copy.mask.pos.y = 0;
drawable->u.copy.mask.bitmap = 0;
- drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0);
+ drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_release_add_res(qdev, release, image_bo);
- qxl_bo_unreserve(image_bo);
- qxl_bo_unref(&image_bo);
+
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
rects[i].left = clips_ptr->x1;
@@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
rects[i].bottom = clips_ptr->y2;
}
qxl_bo_kunmap(clips_bo);
- qxl_bo_unreserve(clips_bo);
- qxl_bo_unref(&clips_bo);
- qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
- return;
+ qxl_release_fence_buffer_objects(release);
+
+out_release_backoff:
+ if (ret)
+ qxl_release_backoff_reserve_list(release);
+out_free_image:
+ qxl_image_free_objects(qdev, dimage);
+out_free_clips:
+ qxl_bo_unref(&clips_bo);
+out_free_drawable:
+ /* only free drawable on error */
+ if (ret)
+ free_drawable(qdev, release);
-out_unref:
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
}
void qxl_draw_copyarea(struct qxl_device *qdev,
@@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
struct qxl_release *release;
int ret;
+ ret = alloc_drawable(qdev, &release);
+ if (ret)
+ return;
+
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_release;
+
rect.left = dx;
rect.top = dy;
rect.right = dx + width;
rect.bottom = dy + height;
- ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release);
- if (ret)
- return;
+ ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release);
+ if (ret) {
+ qxl_release_backoff_reserve_list(release);
+ goto out_free_release;
+ }
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->u.copy_bits.src_pos.x = sx;
drawable->u.copy_bits.src_pos.y = sy;
-
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_fence_releaseable(qdev, release);
+
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+out_free_release:
+ if (ret)
+ free_drawable(qdev, release);
}
void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
@@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
struct qxl_release *release;
int ret;
- ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release);
+ ret = alloc_drawable(qdev, &release);
if (ret)
return;
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_release;
+
+ ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release);
+ if (ret) {
+ qxl_release_backoff_reserve_list(release);
+ goto out_free_release;
+ }
+
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
drawable->u.fill.brush.u.color = color;
@@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
drawable->u.fill.mask.bitmap = 0;
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_fence_releaseable(qdev, release);
+
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+out_free_release:
+ if (ret)
+ free_drawable(qdev, release);
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index aacb791464a3..7e96f4f11738 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -42,6 +42,9 @@
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+/* just for ttm_validate_buffer */
+#include <ttm/ttm_execbuf_util.h>
+
#include <drm/qxl_drm.h>
#include "qxl_dev.h"
@@ -118,9 +121,9 @@ struct qxl_bo {
uint32_t surface_id;
struct qxl_fence fence; /* per bo fence - list of releases */
struct qxl_release *surf_create;
- atomic_t reserve_count;
};
#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
struct qxl_gem {
struct mutex mutex;
@@ -128,12 +131,7 @@ struct qxl_gem {
};
struct qxl_bo_list {
- struct list_head lhead;
- struct qxl_bo *bo;
-};
-
-struct qxl_reloc_list {
- struct list_head bos;
+ struct ttm_validate_buffer tv;
};
struct qxl_crtc {
@@ -195,10 +193,20 @@ enum {
struct qxl_release {
int id;
int type;
- int bo_count;
uint32_t release_offset;
uint32_t surface_release_id;
- struct qxl_bo *bos[QXL_MAX_RES];
+ struct ww_acquire_ctx ticket;
+ struct list_head bos;
+};
+
+struct qxl_drm_chunk {
+ struct list_head head;
+ struct qxl_bo *bo;
+};
+
+struct qxl_drm_image {
+ struct qxl_bo *bo;
+ struct list_head chunk_list;
};
struct qxl_fb_image {
@@ -314,6 +322,7 @@ struct qxl_device {
struct workqueue_struct *gc_queue;
struct work_struct gc_work;
+ struct work_struct fb_work;
};
/* forward declaration for QXL_INFO_IO */
@@ -433,12 +442,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
/* qxl image */
-int qxl_image_create(struct qxl_device *qdev,
- struct qxl_release *release,
- struct qxl_bo **image_bo,
- const uint8_t *data,
- int x, int y, int width, int height,
- int depth, int stride);
+int qxl_image_init(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image *dimage,
+ const uint8_t *data,
+ int x, int y, int width, int height,
+ int depth, int stride);
+int
+qxl_image_alloc_objects(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image **image_ptr,
+ int height, int stride);
+void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
+
void qxl_update_screen(struct qxl_device *qxl);
/* qxl io operations (qxl_cmd.c) */
@@ -459,20 +475,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible
void qxl_io_flush_release(struct qxl_device *qdev);
void qxl_io_flush_surfaces(struct qxl_device *qdev);
-int qxl_release_reserve(struct qxl_device *qdev,
- struct qxl_release *release, bool no_wait);
-void qxl_release_unreserve(struct qxl_device *qdev,
- struct qxl_release *release);
union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
struct qxl_release *release);
void qxl_release_unmap(struct qxl_device *qdev,
struct qxl_release *release,
union qxl_release_info *info);
-/*
- * qxl_bo_add_resource.
- *
- */
-void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
+int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
+int qxl_release_reserve_list(struct qxl_release *release, bool no_intr);
+void qxl_release_backoff_reserve_list(struct qxl_release *release);
+void qxl_release_fence_buffer_objects(struct qxl_release *release);
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
@@ -481,15 +492,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
int type, struct qxl_release **release,
struct qxl_bo **rbo);
-int qxl_fence_releaseable(struct qxl_device *qdev,
- struct qxl_release *release);
+
int
qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible);
int
qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible);
-int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+ struct qxl_release *release,
+ unsigned long size,
struct qxl_bo **_bo);
/* qxl drawing commands */
@@ -510,15 +522,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
u32 sx, u32 sy,
u32 dx, u32 dy);
-uint64_t
-qxl_release_alloc(struct qxl_device *qdev, int type,
- struct qxl_release **ret);
-
void qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release);
-void qxl_release_add_res(struct qxl_device *qdev,
- struct qxl_release *release,
- struct qxl_bo *bo);
+
/* used by qxl_debugfs_release */
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
uint64_t id);
@@ -561,7 +567,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein
int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
/* qxl_fence.c */
-int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
+void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
void qxl_fence_fini(struct qxl_fence *qfence);
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 76f39d88d684..88722f233430 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -37,12 +37,29 @@
#define QXL_DIRTY_DELAY (HZ / 30)
+#define QXL_FB_OP_FILLRECT 1
+#define QXL_FB_OP_COPYAREA 2
+#define QXL_FB_OP_IMAGEBLIT 3
+
+struct qxl_fb_op {
+ struct list_head head;
+ int op_type;
+ union {
+ struct fb_fillrect fr;
+ struct fb_copyarea ca;
+ struct fb_image ib;
+ } op;
+ void *img_data;
+};
+
struct qxl_fbdev {
struct drm_fb_helper helper;
struct qxl_framebuffer qfb;
struct list_head fbdev_list;
struct qxl_device *qdev;
+ spinlock_t delayed_ops_lock;
+ struct list_head delayed_ops;
void *shadow;
int size;
@@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = {
.deferred_io = qxl_deferred_io,
};
-static void qxl_fb_fillrect(struct fb_info *info,
- const struct fb_fillrect *fb_rect)
+static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev,
+ const struct fb_fillrect *fb_rect)
+{
+ struct qxl_fb_op *op;
+ unsigned long flags;
+
+ op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
+ if (!op)
+ return;
+
+ op->op.fr = *fb_rect;
+ op->img_data = NULL;
+ op->op_type = QXL_FB_OP_FILLRECT;
+
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_add_tail(&op->head, &qfbdev->delayed_ops);
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+}
+
+static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev,
+ const struct fb_copyarea *fb_copy)
+{
+ struct qxl_fb_op *op;
+ unsigned long flags;
+
+ op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
+ if (!op)
+ return;
+
+ op->op.ca = *fb_copy;
+ op->img_data = NULL;
+ op->op_type = QXL_FB_OP_COPYAREA;
+
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_add_tail(&op->head, &qfbdev->delayed_ops);
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+}
+
+static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev,
+ const struct fb_image *fb_image)
+{
+ struct qxl_fb_op *op;
+ unsigned long flags;
+ uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1);
+
+ op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN);
+ if (!op)
+ return;
+
+ op->op.ib = *fb_image;
+ op->img_data = (void *)(op + 1);
+ op->op_type = QXL_FB_OP_IMAGEBLIT;
+
+ memcpy(op->img_data, fb_image->data, size);
+
+ op->op.ib.data = op->img_data;
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_add_tail(&op->head, &qfbdev->delayed_ops);
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+}
+
+static void qxl_fb_fillrect_internal(struct fb_info *info,
+ const struct fb_fillrect *fb_rect)
{
struct qxl_fbdev *qfbdev = info->par;
struct qxl_device *qdev = qfbdev->qdev;
@@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info,
qxl_draw_fill_rec.rect = rect;
qxl_draw_fill_rec.color = color;
qxl_draw_fill_rec.rop = rop;
+
+ qxl_draw_fill(&qxl_draw_fill_rec);
+}
+
+static void qxl_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *fb_rect)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_device *qdev = qfbdev->qdev;
+
if (!drm_can_sleep()) {
- qxl_io_log(qdev,
- "%s: TODO use RCU, mysterious locks with spin_lock\n",
- __func__);
+ qxl_fb_delayed_fillrect(qfbdev, fb_rect);
+ schedule_work(&qdev->fb_work);
return;
}
- qxl_draw_fill(&qxl_draw_fill_rec);
+ /* make sure any previous work is done */
+ flush_work(&qdev->fb_work);
+ qxl_fb_fillrect_internal(info, fb_rect);
}
-static void qxl_fb_copyarea(struct fb_info *info,
- const struct fb_copyarea *region)
+static void qxl_fb_copyarea_internal(struct fb_info *info,
+ const struct fb_copyarea *region)
{
struct qxl_fbdev *qfbdev = info->par;
@@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info,
region->dx, region->dy);
}
+static void qxl_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_device *qdev = qfbdev->qdev;
+
+ if (!drm_can_sleep()) {
+ qxl_fb_delayed_copyarea(qfbdev, region);
+ schedule_work(&qdev->fb_work);
+ return;
+ }
+ /* make sure any previous work is done */
+ flush_work(&qdev->fb_work);
+ qxl_fb_copyarea_internal(info, region);
+}
+
static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
{
qxl_draw_opaque_fb(qxl_fb_image, 0);
}
+static void qxl_fb_imageblit_internal(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_fb_image qxl_fb_image;
+
+ /* ensure proper order rendering operations - TODO: must do this
+ * for everything. */
+ qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
+ qxl_fb_imageblit_safe(&qxl_fb_image);
+}
+
static void qxl_fb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct qxl_fbdev *qfbdev = info->par;
struct qxl_device *qdev = qfbdev->qdev;
- struct qxl_fb_image qxl_fb_image;
if (!drm_can_sleep()) {
- /* we cannot do any ttm_bo allocation since that will fail on
- * ioremap_wc..__get_vm_area_node, so queue the work item
- * instead This can happen from printk inside an interrupt
- * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
- qxl_io_log(qdev,
- "%s: TODO use RCU, mysterious locks with spin_lock\n",
- __func__);
+ qxl_fb_delayed_imageblit(qfbdev, image);
+ schedule_work(&qdev->fb_work);
return;
}
+ /* make sure any previous work is done */
+ flush_work(&qdev->fb_work);
+ qxl_fb_imageblit_internal(info, image);
+}
- /* ensure proper order of rendering operations - TODO: must do this
- * for everything. */
- qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
- qxl_fb_imageblit_safe(&qxl_fb_image);
+static void qxl_fb_work(struct work_struct *work)
+{
+ struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
+ unsigned long flags;
+ struct qxl_fb_op *entry, *tmp;
+ struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
+
+ /* since the irq context just adds entries to the end of the
+ list dropping the lock should be fine, as entry isn't modified
+ in the operation code */
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) {
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+ switch (entry->op_type) {
+ case QXL_FB_OP_FILLRECT:
+ qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr);
+ break;
+ case QXL_FB_OP_COPYAREA:
+ qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca);
+ break;
+ case QXL_FB_OP_IMAGEBLIT:
+ qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib);
+ break;
+ }
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_del(&entry->head);
+ kfree(entry);
+ }
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
}
int qxl_fb_init(struct qxl_device *qdev)
{
+ INIT_WORK(&qdev->fb_work, qxl_fb_work);
return 0;
}
@@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev)
qfbdev->qdev = qdev;
qdev->mode_info.qfbdev = qfbdev;
qfbdev->helper.funcs = &qxl_fb_helper_funcs;
-
+ spin_lock_init(&qfbdev->delayed_ops_lock);
+ INIT_LIST_HEAD(&qfbdev->delayed_ops);
ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
qxl_num_crtc /* num_crtc - QXL supports just 1 */,
QXLFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
index 63c6715ad385..ae59e91cfb9a 100644
--- a/drivers/gpu/drm/qxl/qxl_fence.c
+++ b/drivers/gpu/drm/qxl/qxl_fence.c
@@ -49,17 +49,11 @@
For some reason every so often qxl hw fails to release, things go wrong.
*/
-
-
-int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
+/* must be called with the fence lock held */
+void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id)
{
- struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
-
- spin_lock(&bo->tbo.bdev->fence_lock);
radix_tree_insert(&qfence->tree, rel_id, qfence);
qfence->num_active_releases++;
- spin_unlock(&bo->tbo.bdev->fence_lock);
- return 0;
}
int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index a235693aabba..25e1777fb0a2 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
/* At least align on page size */
if (alignment < PAGE_SIZE)
alignment = PAGE_SIZE;
- r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo);
+ r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR(
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index cf856206996b..7fbcc35e8ad3 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -30,31 +30,100 @@
#include "qxl_object.h"
static int
-qxl_image_create_helper(struct qxl_device *qdev,
+qxl_allocate_chunk(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image *image,
+ unsigned int chunk_size)
+{
+ struct qxl_drm_chunk *chunk;
+ int ret;
+
+ chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
+ if (!chunk)
+ return -ENOMEM;
+
+ ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
+ if (ret) {
+ kfree(chunk);
+ return ret;
+ }
+
+ list_add_tail(&chunk->head, &image->chunk_list);
+ return 0;
+}
+
+int
+qxl_image_alloc_objects(struct qxl_device *qdev,
struct qxl_release *release,
- struct qxl_bo **image_bo,
- const uint8_t *data,
- int width, int height,
- int depth, unsigned int hash,
- int stride)
+ struct qxl_drm_image **image_ptr,
+ int height, int stride)
+{
+ struct qxl_drm_image *image;
+ int ret;
+
+ image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
+ if (!image)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&image->chunk_list);
+
+ ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
+ if (ret) {
+ kfree(image);
+ return ret;
+ }
+
+ ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
+ if (ret) {
+ qxl_bo_unref(&image->bo);
+ kfree(image);
+ return ret;
+ }
+ *image_ptr = image;
+ return 0;
+}
+
+void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
{
+ struct qxl_drm_chunk *chunk, *tmp;
+
+ list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
+ qxl_bo_unref(&chunk->bo);
+ kfree(chunk);
+ }
+
+ qxl_bo_unref(&dimage->bo);
+ kfree(dimage);
+}
+
+static int
+qxl_image_init_helper(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image *dimage,
+ const uint8_t *data,
+ int width, int height,
+ int depth, unsigned int hash,
+ int stride)
+{
+ struct qxl_drm_chunk *drv_chunk;
struct qxl_image *image;
struct qxl_data_chunk *chunk;
int i;
int chunk_stride;
int linesize = width * depth / 8;
- struct qxl_bo *chunk_bo;
- int ret;
+ struct qxl_bo *chunk_bo, *image_bo;
void *ptr;
/* Chunk */
/* FIXME: Check integer overflow */
/* TODO: variable number of chunks */
+
+ drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
+
+ chunk_bo = drv_chunk->bo;
chunk_stride = stride; /* TODO: should use linesize, but it renders
wrong (check the bitmaps are sent correctly
first) */
- ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
- &chunk_bo);
-
+
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
chunk = ptr;
chunk->data_size = height * chunk_stride;
@@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev,
while (remain > 0) {
page_base = out_offset & PAGE_MASK;
page_offset = offset_in_page(out_offset);
-
size = min((int)(PAGE_SIZE - page_offset), remain);
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
@@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev,
}
}
}
-
-
qxl_bo_kunmap(chunk_bo);
- /* Image */
- ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
-
- ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
+ image_bo = dimage->bo;
+ ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
image = ptr;
image->descriptor.id = 0;
@@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev,
image->u.bitmap.stride = chunk_stride;
image->u.bitmap.palette = 0;
image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
- qxl_release_add_res(qdev, release, chunk_bo);
- qxl_bo_unreserve(chunk_bo);
- qxl_bo_unref(&chunk_bo);
- qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr);
+ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
return 0;
}
-int qxl_image_create(struct qxl_device *qdev,
+int qxl_image_init(struct qxl_device *qdev,
struct qxl_release *release,
- struct qxl_bo **image_bo,
+ struct qxl_drm_image *dimage,
const uint8_t *data,
int x, int y, int width, int height,
int depth, int stride)
{
data += y * stride + x * (depth / 8);
- return qxl_image_create_helper(qdev, release, image_bo, data,
+ return qxl_image_init_helper(qdev, release, dimage, data,
width, height, depth, 0, stride);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 27f45e49250d..6de33563d6f1 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data,
&qxl_map->offset);
}
+struct qxl_reloc_info {
+ int type;
+ struct qxl_bo *dst_bo;
+ uint32_t dst_offset;
+ struct qxl_bo *src_bo;
+ int src_offset;
+};
+
/*
* dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
* are on vram).
* *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
*/
static void
-apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
- struct qxl_bo *src, uint64_t src_off)
+apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
{
void *reloc_page;
-
- reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
- *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
- src, src_off);
- qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+ reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+ *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
+ info->src_bo,
+ info->src_offset);
+ qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
}
static void
-apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
- struct qxl_bo *src)
+apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
{
uint32_t id = 0;
void *reloc_page;
- if (src && !src->is_primary)
- id = src->surface_id;
+ if (info->src_bo && !info->src_bo->is_primary)
+ id = info->src_bo->surface_id;
- reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
- *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
- qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+ reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+ *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
+ qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
}
/* return holding the reference to this object */
static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
struct drm_file *file_priv, uint64_t handle,
- struct qxl_reloc_list *reloc_list)
+ struct qxl_release *release)
{
struct drm_gem_object *gobj;
struct qxl_bo *qobj;
int ret;
gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
- if (!gobj) {
- DRM_ERROR("bad bo handle %lld\n", handle);
+ if (!gobj)
return NULL;
- }
+
qobj = gem_to_qxl_bo(gobj);
- ret = qxl_bo_list_add(reloc_list, qobj);
+ ret = qxl_release_list_add(release, qobj);
if (ret)
return NULL;
@@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
* However, the command as passed from user space must *not* contain the initial
* QXLReleaseInfo struct (first XXX bytes)
*/
-static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int qxl_process_single_command(struct qxl_device *qdev,
+ struct drm_qxl_command *cmd,
+ struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
- struct drm_qxl_execbuffer *execbuffer = data;
- struct drm_qxl_command user_cmd;
- int cmd_num;
- struct qxl_bo *reloc_src_bo;
- struct qxl_bo *reloc_dst_bo;
- struct drm_qxl_reloc reloc;
+ struct qxl_reloc_info *reloc_info;
+ int release_type;
+ struct qxl_release *release;
+ struct qxl_bo *cmd_bo;
void *fb_cmd;
- int i, ret;
- struct qxl_reloc_list reloc_list;
+ int i, j, ret, num_relocs;
int unwritten;
- uint32_t reloc_dst_offset;
- INIT_LIST_HEAD(&reloc_list.bos);
- for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
- struct qxl_release *release;
- struct qxl_bo *cmd_bo;
- int release_type;
- struct drm_qxl_command *commands =
- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+ switch (cmd->type) {
+ case QXL_CMD_DRAW:
+ release_type = QXL_RELEASE_DRAWABLE;
+ break;
+ case QXL_CMD_SURFACE:
+ case QXL_CMD_CURSOR:
+ default:
+ DRM_DEBUG("Only draw commands in execbuffers\n");
+ return -EINVAL;
+ break;
+ }
- if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
- sizeof(user_cmd)))
- return -EFAULT;
- switch (user_cmd.type) {
- case QXL_CMD_DRAW:
- release_type = QXL_RELEASE_DRAWABLE;
- break;
- case QXL_CMD_SURFACE:
- case QXL_CMD_CURSOR:
- default:
- DRM_DEBUG("Only draw commands in execbuffers\n");
- return -EINVAL;
- break;
- }
+ if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
+ return -EINVAL;
- if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
- return -EINVAL;
+ if (!access_ok(VERIFY_READ,
+ (void *)(unsigned long)cmd->command,
+ cmd->command_size))
+ return -EFAULT;
- if (!access_ok(VERIFY_READ,
- (void *)(unsigned long)user_cmd.command,
- user_cmd.command_size))
- return -EFAULT;
+ reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
+ if (!reloc_info)
+ return -ENOMEM;
- ret = qxl_alloc_release_reserved(qdev,
- sizeof(union qxl_release_info) +
- user_cmd.command_size,
- release_type,
- &release,
- &cmd_bo);
- if (ret)
- return ret;
+ ret = qxl_alloc_release_reserved(qdev,
+ sizeof(union qxl_release_info) +
+ cmd->command_size,
+ release_type,
+ &release,
+ &cmd_bo);
+ if (ret)
+ goto out_free_reloc;
- /* TODO copy slow path code from i915 */
- fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
+ /* TODO copy slow path code from i915 */
+ fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
- {
- struct qxl_drawable *draw = fb_cmd;
+ {
+ struct qxl_drawable *draw = fb_cmd;
+ draw->mm_time = qdev->rom->mm_clock;
+ }
- draw->mm_time = qdev->rom->mm_clock;
- }
- qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
- if (unwritten) {
- DRM_ERROR("got unwritten %d\n", unwritten);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EFAULT;
+ qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
+ if (unwritten) {
+ DRM_ERROR("got unwritten %d\n", unwritten);
+ ret = -EFAULT;
+ goto out_free_release;
+ }
+
+ /* fill out reloc info structs */
+ num_relocs = 0;
+ for (i = 0; i < cmd->relocs_num; ++i) {
+ struct drm_qxl_reloc reloc;
+
+ if (DRM_COPY_FROM_USER(&reloc,
+ &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
+ sizeof(reloc))) {
+ ret = -EFAULT;
+ goto out_free_bos;
}
- for (i = 0 ; i < user_cmd.relocs_num; ++i) {
- if (DRM_COPY_FROM_USER(&reloc,
- &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
- sizeof(reloc))) {
- qxl_bo_list_unreserve(&reloc_list, true);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EFAULT;
- }
+ /* add the bos to the list of bos to validate -
+ need to validate first then process relocs? */
+ if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
+ DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
- /* add the bos to the list of bos to validate -
- need to validate first then process relocs? */
- if (reloc.dst_handle) {
- reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
- reloc.dst_handle, &reloc_list);
- if (!reloc_dst_bo) {
- qxl_bo_list_unreserve(&reloc_list, true);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EINVAL;
- }
- reloc_dst_offset = 0;
- } else {
- reloc_dst_bo = cmd_bo;
- reloc_dst_offset = release->release_offset;
+ ret = -EINVAL;
+ goto out_free_bos;
+ }
+ reloc_info[i].type = reloc.reloc_type;
+
+ if (reloc.dst_handle) {
+ reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
+ reloc.dst_handle, release);
+ if (!reloc_info[i].dst_bo) {
+ ret = -EINVAL;
+ reloc_info[i].src_bo = NULL;
+ goto out_free_bos;
}
-
- /* reserve and validate the reloc dst bo */
- if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
- reloc_src_bo =
- qxlhw_handle_to_bo(qdev, file_priv,
- reloc.src_handle, &reloc_list);
- if (!reloc_src_bo) {
- if (reloc_dst_bo != cmd_bo)
- drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
- qxl_bo_list_unreserve(&reloc_list, true);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EINVAL;
- }
- } else
- reloc_src_bo = NULL;
- if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
- apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
- reloc_src_bo, reloc.src_offset);
- } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
- apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
- } else {
- DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
- return -EINVAL;
+ reloc_info[i].dst_offset = reloc.dst_offset;
+ } else {
+ reloc_info[i].dst_bo = cmd_bo;
+ reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
+ }
+ num_relocs++;
+
+ /* reserve and validate the reloc dst bo */
+ if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
+ reloc_info[i].src_bo =
+ qxlhw_handle_to_bo(qdev, file_priv,
+ reloc.src_handle, release);
+ if (!reloc_info[i].src_bo) {
+ if (reloc_info[i].dst_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
+ ret = -EINVAL;
+ goto out_free_bos;
}
+ reloc_info[i].src_offset = reloc.src_offset;
+ } else {
+ reloc_info[i].src_bo = NULL;
+ reloc_info[i].src_offset = 0;
+ }
+ }
- if (reloc_src_bo && reloc_src_bo != cmd_bo) {
- qxl_release_add_res(qdev, release, reloc_src_bo);
- drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
- }
+ /* validate all buffers */
+ ret = qxl_release_reserve_list(release, false);
+ if (ret)
+ goto out_free_bos;
- if (reloc_dst_bo != cmd_bo)
- drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
- }
- qxl_fence_releaseable(qdev, release);
+ for (i = 0; i < cmd->relocs_num; ++i) {
+ if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
+ apply_reloc(qdev, &reloc_info[i]);
+ else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
+ apply_surf_reloc(qdev, &reloc_info[i]);
+ }
- ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
- if (ret == -ERESTARTSYS) {
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- qxl_bo_list_unreserve(&reloc_list, true);
+ ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
+ if (ret)
+ qxl_release_backoff_reserve_list(release);
+ else
+ qxl_release_fence_buffer_objects(release);
+
+out_free_bos:
+ for (j = 0; j < num_relocs; j++) {
+ if (reloc_info[j].dst_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
+ if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
+ }
+out_free_release:
+ if (ret)
+ qxl_release_free(qdev, release);
+out_free_reloc:
+ kfree(reloc_info);
+ return ret;
+}
+
+static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct qxl_device *qdev = dev->dev_private;
+ struct drm_qxl_execbuffer *execbuffer = data;
+ struct drm_qxl_command user_cmd;
+ int cmd_num;
+ int ret;
+
+ for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
+
+ struct drm_qxl_command *commands =
+ (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+
+ if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+ sizeof(user_cmd)))
+ return -EFAULT;
+
+ ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
+ if (ret)
return ret;
- }
- qxl_release_unreserve(qdev, release);
}
- qxl_bo_list_unreserve(&reloc_list, 0);
return 0;
}
@@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
goto out;
if (!qobj->pin_count) {
- qxl_ttm_placement_from_domain(qobj, qobj->type);
+ qxl_ttm_placement_from_domain(qobj, qobj->type, false);
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
true, false);
if (unlikely(ret))
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 1191fe7788c9..aa161cddd87e 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
return false;
}
-void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
+ u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
qbo->placement.fpfn = 0;
qbo->placement.lpfn = 0;
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM)
- qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
+ qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
if (domain == QXL_GEM_DOMAIN_SURFACE)
- qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
+ qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
if (domain == QXL_GEM_DOMAIN_CPU)
- qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
if (!c)
qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
qbo->placement.num_placement = c;
@@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
int qxl_bo_create(struct qxl_device *qdev,
- unsigned long size, bool kernel, u32 domain,
+ unsigned long size, bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr)
{
@@ -99,15 +100,15 @@ int qxl_bo_create(struct qxl_device *qdev,
}
bo->gem_base.driver_private = NULL;
bo->type = domain;
- bo->pin_count = 0;
+ bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
qxl_fence_init(qdev, &bo->fence);
INIT_LIST_HEAD(&bo->list);
- atomic_set(&bo->reserve_count, 0);
+
if (surf)
bo->surf = *surf;
- qxl_ttm_placement_from_domain(bo, domain);
+ qxl_ttm_placement_from_domain(bo, domain, pinned);
r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, 0, !kernel, NULL, size,
@@ -228,7 +229,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
{
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
- int r, i;
+ int r;
if (bo->pin_count) {
bo->pin_count++;
@@ -236,9 +237,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
*gpu_addr = qxl_bo_gpu_offset(bo);
return 0;
}
- qxl_ttm_placement_from_domain(bo, domain);
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ qxl_ttm_placement_from_domain(bo, domain, true);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
@@ -317,53 +316,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
return 0;
}
-void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
-{
- struct qxl_bo_list *entry, *sf;
-
- list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
- qxl_bo_unreserve(entry->bo);
- list_del(&entry->lhead);
- kfree(entry);
- }
-}
-
-int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
-{
- struct qxl_bo_list *entry;
- int ret;
-
- list_for_each_entry(entry, &reloc_list->bos, lhead) {
- if (entry->bo == bo)
- return 0;
- }
-
- entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- entry->bo = bo;
- list_add(&entry->lhead, &reloc_list->bos);
-
- ret = qxl_bo_reserve(bo, false);
- if (ret)
- return ret;
-
- if (!bo->pin_count) {
- qxl_ttm_placement_from_domain(bo, bo->type);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false);
- if (ret)
- return ret;
- }
-
- /* allocate a surface for reserved + validated buffers */
- ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
- if (ret)
- return ret;
- return 0;
-}
-
int qxl_surf_evict(struct qxl_device *qdev)
{
return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ee7ad79ce781..8cb6167038e5 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
extern int qxl_bo_create(struct qxl_device *qdev,
unsigned long size,
- bool kernel, u32 domain,
+ bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr);
extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
@@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
extern void qxl_bo_unref(struct qxl_bo **bo);
extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
extern int qxl_bo_unpin(struct qxl_bo *bo);
-extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
-extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
-extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b443d6751d5f..b61449e52cd5 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -38,7 +38,8 @@
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
-uint64_t
+
+static uint64_t
qxl_release_alloc(struct qxl_device *qdev, int type,
struct qxl_release **ret)
{
@@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
return 0;
}
release->type = type;
- release->bo_count = 0;
release->release_offset = 0;
release->surface_release_id = 0;
+ INIT_LIST_HEAD(&release->bos);
idr_preload(GFP_KERNEL);
spin_lock(&qdev->release_idr_lock);
@@ -77,20 +78,20 @@ void
qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release)
{
- int i;
-
- QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
- release->type, release->bo_count);
+ struct qxl_bo_list *entry, *tmp;
+ QXL_INFO(qdev, "release %d, type %d\n", release->id,
+ release->type);
if (release->surface_release_id)
qxl_surface_id_dealloc(qdev, release->surface_release_id);
- for (i = 0 ; i < release->bo_count; ++i) {
+ list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
QXL_INFO(qdev, "release %llx\n",
- release->bos[i]->tbo.addr_space_offset
+ entry->tv.bo->addr_space_offset
- DRM_FILE_OFFSET);
- qxl_fence_remove_release(&release->bos[i]->fence, release->id);
- qxl_bo_unref(&release->bos[i]);
+ qxl_fence_remove_release(&bo->fence, release->id);
+ qxl_bo_unref(&bo);
}
spin_lock(&qdev->release_idr_lock);
idr_remove(&qdev->release_idr, release->id);
@@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev,
kfree(release);
}
-void
-qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
- struct qxl_bo *bo)
-{
- int i;
- for (i = 0; i < release->bo_count; i++)
- if (release->bos[i] == bo)
- return;
-
- if (release->bo_count >= QXL_MAX_RES) {
- DRM_ERROR("exceeded max resource on a qxl_release item\n");
- return;
- }
- release->bos[release->bo_count++] = qxl_bo_ref(bo);
-}
-
static int qxl_release_bo_alloc(struct qxl_device *qdev,
struct qxl_bo **bo)
{
int ret;
- ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL,
+ /* pin releases bo's they are too messy to evict */
+ ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
+ QXL_GEM_DOMAIN_VRAM, NULL,
bo);
return ret;
}
-int qxl_release_reserve(struct qxl_device *qdev,
- struct qxl_release *release, bool no_wait)
+int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
+{
+ struct qxl_bo_list *entry;
+
+ list_for_each_entry(entry, &release->bos, tv.head) {
+ if (entry->tv.bo == &bo->tbo)
+ return 0;
+ }
+
+ entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ qxl_bo_ref(bo);
+ entry->tv.bo = &bo->tbo;
+ list_add_tail(&entry->tv.head, &release->bos);
+ return 0;
+}
+
+static int qxl_release_validate_bo(struct qxl_bo *bo)
{
int ret;
- if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
- ret = qxl_bo_reserve(release->bos[0], no_wait);
+
+ if (!bo->pin_count) {
+ qxl_ttm_placement_from_domain(bo, bo->type, false);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement,
+ true, false);
if (ret)
return ret;
}
+
+ /* allocate a surface for reserved + validated buffers */
+ ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
+{
+ int ret;
+ struct qxl_bo_list *entry;
+
+ /* if only one object on the release its the release itself
+ since these objects are pinned no need to reserve */
+ if (list_is_singular(&release->bos))
+ return 0;
+
+ ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(entry, &release->bos, tv.head) {
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
+
+ ret = qxl_release_validate_bo(bo);
+ if (ret) {
+ ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+ return ret;
+ }
+ }
return 0;
}
-void qxl_release_unreserve(struct qxl_device *qdev,
- struct qxl_release *release)
+void qxl_release_backoff_reserve_list(struct qxl_release *release)
{
- if (atomic_dec_and_test(&release->bos[0]->reserve_count))
- qxl_bo_unreserve(release->bos[0]);
+ /* if only one object on the release its the release itself
+ since these objects are pinned no need to reserve */
+ if (list_is_singular(&release->bos))
+ return;
+
+ ttm_eu_backoff_reservation(&release->ticket, &release->bos);
}
+
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
struct qxl_release *create_rel,
struct qxl_release **release)
{
- int ret;
-
if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
int idr_ret;
+ struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
struct qxl_bo *bo;
union qxl_release_info *info;
/* stash the release after the create command */
idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
- bo = qxl_bo_ref(create_rel->bos[0]);
+ bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
(*release)->release_offset = create_rel->release_offset + 64;
- qxl_release_add_res(qdev, *release, bo);
+ qxl_release_list_add(*release, bo);
- ret = qxl_release_reserve(qdev, *release, false);
- if (ret) {
- DRM_ERROR("release reserve failed\n");
- goto out_unref;
- }
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
-
-out_unref:
qxl_bo_unref(&bo);
- return ret;
+ return 0;
}
return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
@@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
{
struct qxl_bo *bo;
int idr_ret;
- int ret;
+ int ret = 0;
union qxl_release_info *info;
int cur_idx;
@@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
mutex_unlock(&qdev->release_mutex);
return ret;
}
-
- /* pin releases bo's they are too messy to evict */
- ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
- qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
- qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
}
bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
@@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
if (rbo)
*rbo = bo;
- qxl_release_add_res(qdev, *release, bo);
-
- ret = qxl_release_reserve(qdev, *release, false);
mutex_unlock(&qdev->release_mutex);
- if (ret)
- goto out_unref;
+
+ qxl_release_list_add(*release, bo);
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
-out_unref:
qxl_bo_unref(&bo);
return ret;
}
-int qxl_fence_releaseable(struct qxl_device *qdev,
- struct qxl_release *release)
-{
- int i, ret;
- for (i = 0; i < release->bo_count; i++) {
- if (!release->bos[i]->tbo.sync_obj)
- release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
- ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
- if (ret)
- return ret;
- }
- return 0;
-}
-
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
uint64_t id)
{
@@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
DRM_ERROR("failed to find id in release_idr\n");
return NULL;
}
- if (release->bo_count < 1) {
- DRM_ERROR("read a released resource with 0 bos\n");
- return NULL;
- }
+
return release;
}
@@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
{
void *ptr;
union qxl_release_info *info;
- struct qxl_bo *bo = release->bos[0];
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
+ if (!ptr)
+ return NULL;
info = ptr + (release->release_offset & ~PAGE_SIZE);
return info;
}
@@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev,
struct qxl_release *release,
union qxl_release_info *info)
{
- struct qxl_bo *bo = release->bos[0];
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
void *ptr;
ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
}
+
+void qxl_release_fence_buffer_objects(struct qxl_release *release)
+{
+ struct ttm_validate_buffer *entry;
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ struct ttm_bo_driver *driver;
+ struct qxl_bo *qbo;
+
+ /* if only one object on the release its the release itself
+ since these objects are pinned no need to reserve */
+ if (list_is_singular(&release->bos))
+ return;
+
+ bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
+ bdev = bo->bdev;
+ driver = bdev->driver;
+ glob = bo->glob;
+
+ spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->fence_lock);
+
+ list_for_each_entry(entry, &release->bos, head) {
+ bo = entry->bo;
+ qbo = to_qxl_bo(bo);
+
+ if (!entry->bo->sync_obj)
+ entry->bo->sync_obj = &qbo->fence;
+
+ qxl_fence_add_release_locked(&qbo->fence, release->id);
+
+ ttm_bo_add_to_lru(bo);
+ ww_mutex_unlock(&bo->resv->lock);
+ entry->reserved = false;
+ }
+ spin_unlock(&bdev->fence_lock);
+ spin_unlock(&glob->lru_lock);
+ ww_acquire_fini(&release->ticket);
+}
+
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 489cb8cece4d..1dfd84cda2a1 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -206,7 +206,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
return;
}
qbo = container_of(bo, struct qxl_bo, tbo);
- qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
+ qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
*placement = qbo->placement;
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 064023bed480..32501f6ec991 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -44,6 +44,41 @@ static char *pre_emph_names[] = {
};
/***** radeon AUX functions *****/
+
+/* Atom needs data in little endian format
+ * so swap as appropriate when copying data to
+ * or from atom. Note that atom operates on
+ * dw units.
+ */
+static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+{
+#ifdef __BIG_ENDIAN
+ u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
+ u32 *dst32, *src32;
+ int i;
+
+ memcpy(src_tmp, src, num_bytes);
+ src32 = (u32 *)src_tmp;
+ dst32 = (u32 *)dst_tmp;
+ if (to_le) {
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = cpu_to_le32(src32[i]);
+ memcpy(dst, dst_tmp, num_bytes);
+ } else {
+ u8 dws = num_bytes & ~3;
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = le32_to_cpu(src32[i]);
+ memcpy(dst, dst_tmp, dws);
+ if (num_bytes % 4) {
+ for (i = 0; i < (num_bytes % 4); i++)
+ dst[dws+i] = dst_tmp[dws+i];
+ }
+ }
+#else
+ memcpy(dst, src, num_bytes);
+#endif
+}
+
union aux_channel_transaction {
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
@@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
- memcpy(base, send, send_bytes);
+ radeon_copy_swap(base, send, send_bytes, true);
- args.v1.lpAuxRequest = 0 + 4;
- args.v1.lpDataOut = 16 + 4;
+ args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
+ args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
@@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
recv_bytes = recv_size;
if (recv && recv_size)
- memcpy(recv, base + 16, recv_bytes);
+ radeon_copy_swap(recv, base + 16, recv_bytes, false);
return recv_bytes;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 393880a09412..10f712e37003 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3166,7 +3166,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 6 + 21);
+ r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
@@ -3181,6 +3181,9 @@ int r600_copy_cpdma(struct radeon_device *rdev,
radeon_semaphore_free(rdev, &sem, NULL);
}
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, WAIT_3D_IDLE_bit);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
if (cur_size_in_bytes > 0x1fffff)
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index b88f54b134ab..e5c860f4ccbe 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -278,9 +278,9 @@ bool r600_dynamicpm_enabled(struct radeon_device *rdev)
void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF);
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
else
- WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+ WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
}
void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 78bec1a58ed1..f8f8b3113ddd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1161,6 +1161,7 @@ static struct radeon_asic rv6xx_asic = {
.get_mclk = &rv6xx_dpm_get_mclk,
.print_power_state = &rv6xx_dpm_print_power_state,
.debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &rv6xx_dpm_force_performance_level,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index ca1895709908..902479fa737f 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -421,6 +421,8 @@ void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
+int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level);
/* rs780 dpm */
int rs780_dpm_init(struct radeon_device *rdev);
int rs780_dpm_enable(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 78edadc9e86b..68ce36056019 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
enum radeon_combios_table_offset table)
{
struct radeon_device *rdev = dev->dev_private;
- int rev;
+ int rev, size;
uint16_t offset = 0, check_offset;
if (!rdev->bios)
@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
switch (table) {
/* absolute offset tables */
case COMBIOS_ASIC_INIT_1_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0xc);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0xc;
break;
case COMBIOS_BIOS_SUPPORT_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x14);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x14;
break;
case COMBIOS_DAC_PROGRAMMING_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2a;
break;
case COMBIOS_MAX_COLOR_DEPTH_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2c;
break;
case COMBIOS_CRTC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2e;
break;
case COMBIOS_PLL_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x30);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x30;
break;
case COMBIOS_TV_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x32);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x32;
break;
case COMBIOS_DFP_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x34);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x34;
break;
case COMBIOS_HW_CONFIG_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x36);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x36;
break;
case COMBIOS_MULTIMEDIA_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x38);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x38;
break;
case COMBIOS_TV_STD_PATCH_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x3e;
break;
case COMBIOS_LCD_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x40);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x40;
break;
case COMBIOS_MOBILE_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x42);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x42;
break;
case COMBIOS_PLL_INIT_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x46);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x46;
break;
case COMBIOS_MEM_CONFIG_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x48);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x48;
break;
case COMBIOS_SAVE_MASK_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4a;
break;
case COMBIOS_HARDCODED_EDID_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4c;
break;
case COMBIOS_ASIC_INIT_2_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4e;
break;
case COMBIOS_CONNECTOR_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x50);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x50;
break;
case COMBIOS_DYN_CLK_1_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x52);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x52;
break;
case COMBIOS_RESERVED_MEM_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x54);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x54;
break;
case COMBIOS_EXT_TMDS_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x58);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x58;
break;
case COMBIOS_MEM_CLK_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5a;
break;
case COMBIOS_EXT_DAC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5c;
break;
case COMBIOS_MISC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5e;
break;
case COMBIOS_CRT_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x60);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x60;
break;
case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x62);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x62;
break;
case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x64);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x64;
break;
case COMBIOS_FAN_SPEED_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x66);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x66;
break;
case COMBIOS_OVERDRIVE_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x68);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x68;
break;
case COMBIOS_OEM_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6a;
break;
case COMBIOS_DYN_CLK_2_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6c;
break;
case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6e;
break;
case COMBIOS_I2C_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x70);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x70;
break;
/* relative offset tables */
case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
}
break;
default:
+ check_offset = 0;
break;
}
- return offset;
+ size = RBIOS8(rdev->bios_header_start + 0x6);
+ /* check absolute offset tables */
+ if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
+ offset = RBIOS16(rdev->bios_header_start + check_offset);
+ return offset;
}
bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
@@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
- /* if the values are all zeros, use the table */
- if (p_dac->ps2_pdac_adj)
+ /* if the values are zeros, use the table */
+ if ((dac == 0) || (bg == 0))
+ found = 0;
+ else
found = 1;
}
/* quirks */
+ /* Radeon 7000 (RV100) */
+ if (((dev->pdev->device == 0x5159) &&
+ (dev->pdev->subsystem_vendor == 0x174B) &&
+ (dev->pdev->subsystem_device == 0x7c28)) ||
/* Radeon 9100 (R200) */
- if ((dev->pdev->device == 0x514D) &&
+ ((dev->pdev->device == 0x514D) &&
(dev->pdev->subsystem_vendor == 0x174B) &&
- (dev->pdev->subsystem_device == 0x7149)) {
+ (dev->pdev->subsystem_device == 0x7149))) {
/* vbios value is bad, use the default */
found = 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index d9d31a383276..6a51d943ccf4 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -466,7 +466,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
size += rdev->vm_manager.max_pfn * 8;
size *= 2;
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
- RADEON_VM_PTB_ALIGN(size),
+ RADEON_GPU_PAGE_ALIGN(size),
RADEON_VM_PTB_ALIGN_SIZE,
RADEON_GEM_DOMAIN_VRAM);
if (r) {
@@ -621,7 +621,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
}
retry:
- pd_size = RADEON_VM_PTB_ALIGN(radeon_vm_directory_size(rdev));
+ pd_size = radeon_vm_directory_size(rdev);
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_directory, pd_size,
RADEON_VM_PTB_ALIGN_SIZE, false);
@@ -953,8 +953,8 @@ static int radeon_vm_update_pdes(struct radeon_device *rdev,
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_tables[pt_idx],
- RADEON_VM_PTB_ALIGN(RADEON_VM_PTE_COUNT * 8),
- RADEON_VM_PTB_ALIGN_SIZE, false);
+ RADEON_VM_PTE_COUNT * 8,
+ RADEON_GPU_PAGE_SIZE, false);
if (r == -ENOMEM) {
r = radeon_vm_evict(rdev, vm);
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 65e33f387341..363018c60412 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -819,7 +819,7 @@ static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev)
POWERMODE1(calculate_memory_refresh_rate(rdev,
pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
POWERMODE2(calculate_memory_refresh_rate(rdev,
- pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
+ pi->hw.sclks[R600_POWER_LEVEL_HIGH])) |
POWERMODE3(calculate_memory_refresh_rate(rdev,
pi->hw.sclks[R600_POWER_LEVEL_HIGH])));
WREG32(ARB_RFSH_RATE, arb_refresh_rate);
@@ -1182,10 +1182,10 @@ static void rv6xx_program_display_gap(struct radeon_device *rdev)
u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) {
+ if (rdev->pm.dpm.new_active_crtcs & 1) {
tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
- } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) {
+ } else if (rdev->pm.dpm.new_active_crtcs & 2) {
tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
} else {
@@ -1670,6 +1670,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
int ret;
+ pi->restricted_levels = 0;
+
rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
rv6xx_clear_vc(rdev);
@@ -1756,6 +1758,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
+ rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
+
return 0;
}
@@ -2085,3 +2089,34 @@ u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low)
else
return requested_state->high.mclk;
}
+
+int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level)
+{
+ struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ pi->restricted_levels = 3;
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ pi->restricted_levels = 2;
+ } else {
+ pi->restricted_levels = 0;
+ }
+
+ rv6xx_clear_vc(rdev);
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
+ r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
+ r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
+ rv6xx_enable_medium(rdev);
+ rv6xx_enable_high(rdev);
+ if (pi->restricted_levels == 3)
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
+ rv6xx_program_vc(rdev);
+ rv6xx_program_at(rdev);
+
+ rdev->pm.dpm.forced_level = level;
+
+ return 0;
+}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 8dd08c029da2..2d094cfdb1e0 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -562,7 +562,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
struct hv_hotadd_state *has)
{
int ret = 0;
- int i, nid, t;
+ int i, nid;
unsigned long start_pfn;
unsigned long processed_pfn;
unsigned long total_pfn = pfn_count;
@@ -607,14 +607,11 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
/*
* Wait for the memory block to be onlined.
+ * Since the hot add has succeeded, it is ok to
+ * proceed even if the pages in the hot added region
+ * have not been "onlined" within the allowed time.
*/
- t = wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
- if (t == 0) {
- pr_info("hot_add memory timedout\n");
- has->ha_end_pfn -= HA_CHUNK;
- has->covered_end_pfn -= processed_pfn;
- break;
- }
+ wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
}
@@ -978,6 +975,14 @@ static void post_status(struct hv_dynmem_device *dm)
dm->num_pages_ballooned +
compute_balloon_floor();
+ /*
+ * If our transaction ID is no longer current, just don't
+ * send the status. This can happen if we were interrupted
+ * after we picked our transaction ID.
+ */
+ if (status.hdr.trans_id != atomic_read(&trans_id))
+ return;
+
vmbus_sendpacket(dm->dev->channel, &status,
sizeof(struct dm_status),
(unsigned long)NULL,
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a2464bf07c49..e8e071fc1d6d 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -690,7 +690,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
if (ret)
pr_err("Unable to register child device\n");
else
- pr_info("child device %s registered\n",
+ pr_debug("child device %s registered\n",
dev_name(&child_device_obj->device));
return ret;
@@ -702,14 +702,14 @@ int vmbus_device_register(struct hv_device *child_device_obj)
*/
void vmbus_device_unregister(struct hv_device *device_obj)
{
+ pr_debug("child device %s unregistered\n",
+ dev_name(&device_obj->device));
+
/*
* Kick off the process of unregistering the device.
* This will call vmbus_remove() and eventually vmbus_device_release()
*/
device_unregister(&device_obj->device);
-
- pr_info("child device %s unregistered\n",
- dev_name(&device_obj->device));
}
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 048f2947e08b..e45f5575fd4d 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -63,7 +63,10 @@
#include "bcache.h"
#include "btree.h"
+#include <linux/freezer.h>
+#include <linux/kthread.h>
#include <linux/random.h>
+#include <trace/events/bcache.h>
#define MAX_IN_FLIGHT_DISCARDS 8U
@@ -151,7 +154,7 @@ static void discard_finish(struct work_struct *w)
mutex_unlock(&ca->set->bucket_lock);
closure_wake_up(&ca->set->bucket_wait);
- wake_up(&ca->set->alloc_wait);
+ wake_up_process(ca->alloc_thread);
closure_put(&ca->set->cl);
}
@@ -350,38 +353,30 @@ static void invalidate_buckets(struct cache *ca)
break;
}
- pr_debug("free %zu/%zu free_inc %zu/%zu unused %zu/%zu",
- fifo_used(&ca->free), ca->free.size,
- fifo_used(&ca->free_inc), ca->free_inc.size,
- fifo_used(&ca->unused), ca->unused.size);
+ trace_bcache_alloc_invalidate(ca);
}
#define allocator_wait(ca, cond) \
do { \
- DEFINE_WAIT(__wait); \
- \
while (1) { \
- prepare_to_wait(&ca->set->alloc_wait, \
- &__wait, TASK_INTERRUPTIBLE); \
+ set_current_state(TASK_INTERRUPTIBLE); \
if (cond) \
break; \
\
mutex_unlock(&(ca)->set->bucket_lock); \
- if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \
- finish_wait(&ca->set->alloc_wait, &__wait); \
- closure_return(cl); \
- } \
+ if (kthread_should_stop()) \
+ return 0; \
\
+ try_to_freeze(); \
schedule(); \
mutex_lock(&(ca)->set->bucket_lock); \
} \
- \
- finish_wait(&ca->set->alloc_wait, &__wait); \
+ __set_current_state(TASK_RUNNING); \
} while (0)
-void bch_allocator_thread(struct closure *cl)
+static int bch_allocator_thread(void *arg)
{
- struct cache *ca = container_of(cl, struct cache, alloc);
+ struct cache *ca = arg;
mutex_lock(&ca->set->bucket_lock);
@@ -442,7 +437,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl)
{
long r = -1;
again:
- wake_up(&ca->set->alloc_wait);
+ wake_up_process(ca->alloc_thread);
if (fifo_used(&ca->free) > ca->watermark[watermark] &&
fifo_pop(&ca->free, r)) {
@@ -476,9 +471,7 @@ again:
return r;
}
- pr_debug("alloc failure: blocked %i free %zu free_inc %zu unused %zu",
- atomic_read(&ca->set->prio_blocked), fifo_used(&ca->free),
- fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+ trace_bcache_alloc_fail(ca);
if (cl) {
closure_wait(&ca->set->bucket_wait, cl);
@@ -552,6 +545,17 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned watermark,
/* Init */
+int bch_cache_allocator_start(struct cache *ca)
+{
+ struct task_struct *k = kthread_run(bch_allocator_thread,
+ ca, "bcache_allocator");
+ if (IS_ERR(k))
+ return PTR_ERR(k);
+
+ ca->alloc_thread = k;
+ return 0;
+}
+
void bch_cache_allocator_exit(struct cache *ca)
{
struct discard *d;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index d3e15b42a4ab..b39f6f0b45f2 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -178,7 +178,6 @@
#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
#include <linux/bio.h>
-#include <linux/blktrace_api.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -388,8 +387,6 @@ struct keybuf_key {
typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
struct keybuf {
- keybuf_pred_fn *key_predicate;
-
struct bkey last_scanned;
spinlock_t lock;
@@ -437,9 +434,12 @@ struct bcache_device {
/* If nonzero, we're detaching/unregistering from cache set */
atomic_t detaching;
+ int flush_done;
+
+ uint64_t nr_stripes;
+ unsigned stripe_size_bits;
+ atomic_t *stripe_sectors_dirty;
- atomic_long_t sectors_dirty;
- unsigned long sectors_dirty_gc;
unsigned long sectors_dirty_last;
long sectors_dirty_derivative;
@@ -531,6 +531,7 @@ struct cached_dev {
unsigned sequential_merge:1;
unsigned verify:1;
+ unsigned partial_stripes_expensive:1;
unsigned writeback_metadata:1;
unsigned writeback_running:1;
unsigned char writeback_percent;
@@ -565,8 +566,7 @@ struct cache {
unsigned watermark[WATERMARK_MAX];
- struct closure alloc;
- struct workqueue_struct *alloc_workqueue;
+ struct task_struct *alloc_thread;
struct closure prio;
struct prio_set *disk_buckets;
@@ -664,13 +664,9 @@ struct gc_stat {
* CACHE_SET_STOPPING always gets set first when we're closing down a cache set;
* we'll continue to run normally for awhile with CACHE_SET_STOPPING set (i.e.
* flushing dirty data).
- *
- * CACHE_SET_STOPPING_2 gets set at the last phase, when it's time to shut down
- * the allocation thread.
*/
#define CACHE_SET_UNREGISTERING 0
#define CACHE_SET_STOPPING 1
-#define CACHE_SET_STOPPING_2 2
struct cache_set {
struct closure cl;
@@ -703,9 +699,6 @@ struct cache_set {
/* For the btree cache */
struct shrinker shrink;
- /* For the allocator itself */
- wait_queue_head_t alloc_wait;
-
/* For the btree cache and anything allocation related */
struct mutex bucket_lock;
@@ -823,10 +816,9 @@ struct cache_set {
/*
* A btree node on disk could have too many bsets for an iterator to fit
- * on the stack - this is a single element mempool for btree_read_work()
+ * on the stack - have to dynamically allocate them
*/
- struct mutex fill_lock;
- struct btree_iter *fill_iter;
+ mempool_t *fill_iter;
/*
* btree_sort() is a merge sort and requires temporary space - single
@@ -834,6 +826,7 @@ struct cache_set {
*/
struct mutex sort_lock;
struct bset *sort;
+ unsigned sort_crit_factor;
/* List of buckets we're currently writing data to */
struct list_head data_buckets;
@@ -906,8 +899,6 @@ static inline unsigned local_clock_us(void)
return local_clock() >> 10;
}
-#define MAX_BSETS 4U
-
#define BTREE_PRIO USHRT_MAX
#define INITIAL_PRIO 32768
@@ -1112,23 +1103,6 @@ static inline void __bkey_put(struct cache_set *c, struct bkey *k)
atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
}
-/* Blktrace macros */
-
-#define blktrace_msg(c, fmt, ...) \
-do { \
- struct request_queue *q = bdev_get_queue(c->bdev); \
- if (q) \
- blk_add_trace_msg(q, fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define blktrace_msg_all(s, fmt, ...) \
-do { \
- struct cache *_c; \
- unsigned i; \
- for_each_cache(_c, (s), i) \
- blktrace_msg(_c, fmt, ##__VA_ARGS__); \
-} while (0)
-
static inline void cached_dev_put(struct cached_dev *dc)
{
if (atomic_dec_and_test(&dc->count))
@@ -1173,10 +1147,16 @@ static inline uint8_t bucket_disk_gen(struct bucket *b)
static struct kobj_attribute ksysfs_##n = \
__ATTR(n, S_IWUSR|S_IRUSR, show, store)
-/* Forward declarations */
+static inline void wake_up_allocators(struct cache_set *c)
+{
+ struct cache *ca;
+ unsigned i;
+
+ for_each_cache(ca, c, i)
+ wake_up_process(ca->alloc_thread);
+}
-void bch_writeback_queue(struct cached_dev *);
-void bch_writeback_add(struct cached_dev *, unsigned);
+/* Forward declarations */
void bch_count_io_errors(struct cache *, int, const char *);
void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
@@ -1193,7 +1173,6 @@ void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
uint8_t bch_inc_gen(struct cache *, struct bucket *);
void bch_rescale_priorities(struct cache_set *, int);
bool bch_bucket_add_unused(struct cache *, struct bucket *);
-void bch_allocator_thread(struct closure *);
long bch_bucket_alloc(struct cache *, unsigned, struct closure *);
void bch_bucket_free(struct cache_set *, struct bkey *);
@@ -1241,9 +1220,9 @@ void bch_cache_set_stop(struct cache_set *);
struct cache_set *bch_cache_set_alloc(struct cache_sb *);
void bch_btree_cache_free(struct cache_set *);
int bch_btree_cache_alloc(struct cache_set *);
-void bch_cached_dev_writeback_init(struct cached_dev *);
void bch_moving_init_cache_set(struct cache_set *);
+int bch_cache_allocator_start(struct cache *ca);
void bch_cache_allocator_exit(struct cache *ca);
int bch_cache_allocator_init(struct cache *ca);
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 1d27d3af3251..8010eed06a51 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -78,6 +78,7 @@ struct bkey *bch_keylist_pop(struct keylist *l)
bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
{
unsigned i;
+ char buf[80];
if (level && (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k)))
goto bad;
@@ -102,7 +103,8 @@ bool __bch_ptr_invalid(struct cache_set *c, int level, const struct bkey *k)
return false;
bad:
- cache_bug(c, "spotted bad key %s: %s", pkey(k), bch_ptr_status(c, k));
+ bch_bkey_to_text(buf, sizeof(buf), k);
+ cache_bug(c, "spotted bad key %s: %s", buf, bch_ptr_status(c, k));
return true;
}
@@ -162,10 +164,16 @@ bool bch_ptr_bad(struct btree *b, const struct bkey *k)
#ifdef CONFIG_BCACHE_EDEBUG
bug:
mutex_unlock(&b->c->bucket_lock);
- btree_bug(b,
+
+ {
+ char buf[80];
+
+ bch_bkey_to_text(buf, sizeof(buf), k);
+ btree_bug(b,
"inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
- pkey(k), PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
- g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+ buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
+ g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
+ }
return true;
#endif
}
@@ -1084,33 +1092,39 @@ void bch_btree_sort_into(struct btree *b, struct btree *new)
new->sets->size = 0;
}
+#define SORT_CRIT (4096 / sizeof(uint64_t))
+
void bch_btree_sort_lazy(struct btree *b)
{
- if (b->nsets) {
- unsigned i, j, keys = 0, total;
+ unsigned crit = SORT_CRIT;
+ int i;
- for (i = 0; i <= b->nsets; i++)
- keys += b->sets[i].data->keys;
-
- total = keys;
+ /* Don't sort if nothing to do */
+ if (!b->nsets)
+ goto out;
- for (j = 0; j < b->nsets; j++) {
- if (keys * 2 < total ||
- keys < 1000) {
- bch_btree_sort_partial(b, j);
- return;
- }
+ /* If not a leaf node, always sort */
+ if (b->level) {
+ bch_btree_sort(b);
+ return;
+ }
- keys -= b->sets[j].data->keys;
- }
+ for (i = b->nsets - 1; i >= 0; --i) {
+ crit *= b->c->sort_crit_factor;
- /* Must sort if b->nsets == 3 or we'll overflow */
- if (b->nsets >= (MAX_BSETS - 1) - b->level) {
- bch_btree_sort(b);
+ if (b->sets[i].data->keys < crit) {
+ bch_btree_sort_partial(b, i);
return;
}
}
+ /* Sort if we'd overflow */
+ if (b->nsets + 1 == MAX_BSETS) {
+ bch_btree_sort(b);
+ return;
+ }
+
+out:
bset_build_written_tree(b);
}
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 57a9cff41546..ae115a253d73 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -1,6 +1,8 @@
#ifndef _BCACHE_BSET_H
#define _BCACHE_BSET_H
+#include <linux/slab.h>
+
/*
* BKEYS:
*
@@ -142,6 +144,8 @@
/* Btree key comparison/iteration */
+#define MAX_BSETS 4U
+
struct btree_iter {
size_t size, used;
struct btree_iter_set {
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 7a5658f04e62..ee372884c405 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -24,6 +24,7 @@
#include "btree.h"
#include "debug.h"
#include "request.h"
+#include "writeback.h"
#include <linux/slab.h>
#include <linux/bitops.h>
@@ -134,44 +135,17 @@ static uint64_t btree_csum_set(struct btree *b, struct bset *i)
return crc ^ 0xffffffffffffffffULL;
}
-static void btree_bio_endio(struct bio *bio, int error)
+static void bch_btree_node_read_done(struct btree *b)
{
- struct closure *cl = bio->bi_private;
- struct btree *b = container_of(cl, struct btree, io.cl);
-
- if (error)
- set_btree_node_io_error(b);
-
- bch_bbio_count_io_errors(b->c, bio, error, (bio->bi_rw & WRITE)
- ? "writing btree" : "reading btree");
- closure_put(cl);
-}
-
-static void btree_bio_init(struct btree *b)
-{
- BUG_ON(b->bio);
- b->bio = bch_bbio_alloc(b->c);
-
- b->bio->bi_end_io = btree_bio_endio;
- b->bio->bi_private = &b->io.cl;
-}
-
-void bch_btree_read_done(struct closure *cl)
-{
- struct btree *b = container_of(cl, struct btree, io.cl);
- struct bset *i = b->sets[0].data;
- struct btree_iter *iter = b->c->fill_iter;
const char *err = "bad btree header";
- BUG_ON(b->nsets || b->written);
-
- bch_bbio_free(b->bio, b->c);
- b->bio = NULL;
+ struct bset *i = b->sets[0].data;
+ struct btree_iter *iter;
- mutex_lock(&b->c->fill_lock);
+ iter = mempool_alloc(b->c->fill_iter, GFP_NOWAIT);
+ iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
iter->used = 0;
- if (btree_node_io_error(b) ||
- !i->seq)
+ if (!i->seq)
goto err;
for (;
@@ -228,17 +202,8 @@ void bch_btree_read_done(struct closure *cl)
if (b->written < btree_blocks(b))
bch_bset_init_next(b);
out:
-
- mutex_unlock(&b->c->fill_lock);
-
- spin_lock(&b->c->btree_read_time_lock);
- bch_time_stats_update(&b->c->btree_read_time, b->io_start_time);
- spin_unlock(&b->c->btree_read_time_lock);
-
- smp_wmb(); /* read_done is our write lock */
- set_btree_node_read_done(b);
-
- closure_return(cl);
+ mempool_free(iter, b->c->fill_iter);
+ return;
err:
set_btree_node_io_error(b);
bch_cache_set_error(b->c, "%s at bucket %zu, block %zu, %u keys",
@@ -247,48 +212,69 @@ err:
goto out;
}
-void bch_btree_read(struct btree *b)
+static void btree_node_read_endio(struct bio *bio, int error)
+{
+ struct closure *cl = bio->bi_private;
+ closure_put(cl);
+}
+
+void bch_btree_node_read(struct btree *b)
{
- BUG_ON(b->nsets || b->written);
+ uint64_t start_time = local_clock();
+ struct closure cl;
+ struct bio *bio;
+
+ trace_bcache_btree_read(b);
+
+ closure_init_stack(&cl);
+
+ bio = bch_bbio_alloc(b->c);
+ bio->bi_rw = REQ_META|READ_SYNC;
+ bio->bi_size = KEY_SIZE(&b->key) << 9;
+ bio->bi_end_io = btree_node_read_endio;
+ bio->bi_private = &cl;
+
+ bch_bio_map(bio, b->sets[0].data);
+
+ bch_submit_bbio(bio, b->c, &b->key, 0);
+ closure_sync(&cl);
- if (!closure_trylock(&b->io.cl, &b->c->cl))
- BUG();
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+ set_btree_node_io_error(b);
- b->io_start_time = local_clock();
+ bch_bbio_free(bio, b->c);
- btree_bio_init(b);
- b->bio->bi_rw = REQ_META|READ_SYNC;
- b->bio->bi_size = KEY_SIZE(&b->key) << 9;
+ if (btree_node_io_error(b))
+ goto err;
- bch_bio_map(b->bio, b->sets[0].data);
+ bch_btree_node_read_done(b);
- pr_debug("%s", pbtree(b));
- trace_bcache_btree_read(b->bio);
- bch_submit_bbio(b->bio, b->c, &b->key, 0);
+ spin_lock(&b->c->btree_read_time_lock);
+ bch_time_stats_update(&b->c->btree_read_time, start_time);
+ spin_unlock(&b->c->btree_read_time_lock);
- continue_at(&b->io.cl, bch_btree_read_done, system_wq);
+ return;
+err:
+ bch_cache_set_error(b->c, "io error reading bucket %lu",
+ PTR_BUCKET_NR(b->c, &b->key, 0));
}
static void btree_complete_write(struct btree *b, struct btree_write *w)
{
if (w->prio_blocked &&
!atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
- wake_up(&b->c->alloc_wait);
+ wake_up_allocators(b->c);
if (w->journal) {
atomic_dec_bug(w->journal);
__closure_wake_up(&b->c->journal.wait);
}
- if (w->owner)
- closure_put(w->owner);
-
w->prio_blocked = 0;
w->journal = NULL;
- w->owner = NULL;
}
-static void __btree_write_done(struct closure *cl)
+static void __btree_node_write_done(struct closure *cl)
{
struct btree *b = container_of(cl, struct btree, io.cl);
struct btree_write *w = btree_prev_write(b);
@@ -304,7 +290,7 @@ static void __btree_write_done(struct closure *cl)
closure_return(cl);
}
-static void btree_write_done(struct closure *cl)
+static void btree_node_write_done(struct closure *cl)
{
struct btree *b = container_of(cl, struct btree, io.cl);
struct bio_vec *bv;
@@ -313,10 +299,22 @@ static void btree_write_done(struct closure *cl)
__bio_for_each_segment(bv, b->bio, n, 0)
__free_page(bv->bv_page);
- __btree_write_done(cl);
+ __btree_node_write_done(cl);
}
-static void do_btree_write(struct btree *b)
+static void btree_node_write_endio(struct bio *bio, int error)
+{
+ struct closure *cl = bio->bi_private;
+ struct btree *b = container_of(cl, struct btree, io.cl);
+
+ if (error)
+ set_btree_node_io_error(b);
+
+ bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
+ closure_put(cl);
+}
+
+static void do_btree_node_write(struct btree *b)
{
struct closure *cl = &b->io.cl;
struct bset *i = b->sets[b->nsets].data;
@@ -325,15 +323,34 @@ static void do_btree_write(struct btree *b)
i->version = BCACHE_BSET_VERSION;
i->csum = btree_csum_set(b, i);
- btree_bio_init(b);
- b->bio->bi_rw = REQ_META|WRITE_SYNC;
- b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
+ BUG_ON(b->bio);
+ b->bio = bch_bbio_alloc(b->c);
+
+ b->bio->bi_end_io = btree_node_write_endio;
+ b->bio->bi_private = &b->io.cl;
+ b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
+ b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c);
bch_bio_map(b->bio, i);
+ /*
+ * If we're appending to a leaf node, we don't technically need FUA -
+ * this write just needs to be persisted before the next journal write,
+ * which will be marked FLUSH|FUA.
+ *
+ * Similarly if we're writing a new btree root - the pointer is going to
+ * be in the next journal entry.
+ *
+ * But if we're writing a new btree node (that isn't a root) or
+ * appending to a non leaf btree node, we need either FUA or a flush
+ * when we write the parent with the new pointer. FUA is cheaper than a
+ * flush, and writes appending to leaf nodes aren't blocking anything so
+ * just make all btree node writes FUA to keep things sane.
+ */
+
bkey_copy(&k.key, &b->key);
SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) + bset_offset(b, i));
- if (!bch_bio_alloc_pages(b->bio, GFP_NOIO)) {
+ if (!bio_alloc_pages(b->bio, GFP_NOIO)) {
int j;
struct bio_vec *bv;
void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
@@ -342,40 +359,41 @@ static void do_btree_write(struct btree *b)
memcpy(page_address(bv->bv_page),
base + j * PAGE_SIZE, PAGE_SIZE);
- trace_bcache_btree_write(b->bio);
bch_submit_bbio(b->bio, b->c, &k.key, 0);
- continue_at(cl, btree_write_done, NULL);
+ continue_at(cl, btree_node_write_done, NULL);
} else {
b->bio->bi_vcnt = 0;
bch_bio_map(b->bio, i);
- trace_bcache_btree_write(b->bio);
bch_submit_bbio(b->bio, b->c, &k.key, 0);
closure_sync(cl);
- __btree_write_done(cl);
+ __btree_node_write_done(cl);
}
}
-static void __btree_write(struct btree *b)
+void bch_btree_node_write(struct btree *b, struct closure *parent)
{
struct bset *i = b->sets[b->nsets].data;
+ trace_bcache_btree_write(b);
+
BUG_ON(current->bio_list);
+ BUG_ON(b->written >= btree_blocks(b));
+ BUG_ON(b->written && !i->keys);
+ BUG_ON(b->sets->data->seq != i->seq);
+ bch_check_key_order(b, i);
- closure_lock(&b->io, &b->c->cl);
cancel_delayed_work(&b->work);
+ /* If caller isn't waiting for write, parent refcount is cache set */
+ closure_lock(&b->io, parent ?: &b->c->cl);
+
clear_bit(BTREE_NODE_dirty, &b->flags);
change_bit(BTREE_NODE_write_idx, &b->flags);
- bch_check_key_order(b, i);
- BUG_ON(b->written && !i->keys);
-
- do_btree_write(b);
-
- pr_debug("%s block %i keys %i", pbtree(b), b->written, i->keys);
+ do_btree_node_write(b);
b->written += set_blocks(i, b->c);
atomic_long_add(set_blocks(i, b->c) * b->c->sb.block_size,
@@ -387,37 +405,31 @@ static void __btree_write(struct btree *b)
bch_bset_init_next(b);
}
-static void btree_write_work(struct work_struct *w)
+static void btree_node_write_work(struct work_struct *w)
{
struct btree *b = container_of(to_delayed_work(w), struct btree, work);
- down_write(&b->lock);
+ rw_lock(true, b, b->level);
if (btree_node_dirty(b))
- __btree_write(b);
- up_write(&b->lock);
+ bch_btree_node_write(b, NULL);
+ rw_unlock(true, b);
}
-void bch_btree_write(struct btree *b, bool now, struct btree_op *op)
+static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
{
struct bset *i = b->sets[b->nsets].data;
struct btree_write *w = btree_current_write(b);
- BUG_ON(b->written &&
- (b->written >= btree_blocks(b) ||
- i->seq != b->sets[0].data->seq ||
- !i->keys));
+ BUG_ON(!b->written);
+ BUG_ON(!i->keys);
- if (!btree_node_dirty(b)) {
- set_btree_node_dirty(b);
- queue_delayed_work(btree_io_wq, &b->work,
- msecs_to_jiffies(30000));
- }
+ if (!btree_node_dirty(b))
+ queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
- w->prio_blocked += b->prio_blocked;
- b->prio_blocked = 0;
+ set_btree_node_dirty(b);
- if (op && op->journal && !b->level) {
+ if (op && op->journal) {
if (w->journal &&
journal_pin_cmp(b->c, w, op)) {
atomic_dec_bug(w->journal);
@@ -430,23 +442,10 @@ void bch_btree_write(struct btree *b, bool now, struct btree_op *op)
}
}
- if (current->bio_list)
- return;
-
/* Force write if set is too big */
- if (now ||
- b->level ||
- set_bytes(i) > PAGE_SIZE - 48) {
- if (op && now) {
- /* Must wait on multiple writes */
- BUG_ON(w->owner);
- w->owner = &op->cl;
- closure_get(&op->cl);
- }
-
- __btree_write(b);
- }
- BUG_ON(!b->written);
+ if (set_bytes(i) > PAGE_SIZE - 48 &&
+ !current->bio_list)
+ bch_btree_node_write(b, NULL);
}
/*
@@ -559,7 +558,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
init_rwsem(&b->lock);
lockdep_set_novalidate_class(&b->lock);
INIT_LIST_HEAD(&b->list);
- INIT_DELAYED_WORK(&b->work, btree_write_work);
+ INIT_DELAYED_WORK(&b->work, btree_node_write_work);
b->c = c;
closure_init_unlocked(&b->io);
@@ -582,7 +581,7 @@ static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
BUG_ON(btree_node_dirty(b) && !b->sets[0].data);
if (cl && btree_node_dirty(b))
- bch_btree_write(b, true, NULL);
+ bch_btree_node_write(b, NULL);
if (cl)
closure_wait_event_async(&b->io.wait, cl,
@@ -623,6 +622,13 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
else if (!mutex_trylock(&c->bucket_lock))
return -1;
+ /*
+ * It's _really_ critical that we don't free too many btree nodes - we
+ * have to always leave ourselves a reserve. The reserve is how we
+ * guarantee that allocating memory for a new btree node can always
+ * succeed, so that inserting keys into the btree can always succeed and
+ * IO can always make forward progress:
+ */
nr /= c->btree_pages;
nr = min_t(unsigned long, nr, mca_can_free(c));
@@ -766,6 +772,8 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
int ret = -ENOMEM;
struct btree *i;
+ trace_bcache_btree_cache_cannibalize(c);
+
if (!cl)
return ERR_PTR(-ENOMEM);
@@ -784,7 +792,6 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
return ERR_PTR(-EAGAIN);
}
- /* XXX: tracepoint */
c->try_harder = cl;
c->try_harder_start = local_clock();
retry:
@@ -905,6 +912,9 @@ retry:
b = mca_find(c, k);
if (!b) {
+ if (current->bio_list)
+ return ERR_PTR(-EAGAIN);
+
mutex_lock(&c->bucket_lock);
b = mca_alloc(c, k, level, &op->cl);
mutex_unlock(&c->bucket_lock);
@@ -914,7 +924,7 @@ retry:
if (IS_ERR(b))
return b;
- bch_btree_read(b);
+ bch_btree_node_read(b);
if (!write)
downgrade_write(&b->lock);
@@ -937,15 +947,12 @@ retry:
for (; i <= b->nsets; i++)
prefetch(b->sets[i].data);
- if (!closure_wait_event(&b->io.wait, &op->cl,
- btree_node_read_done(b))) {
- rw_unlock(write, b);
- b = ERR_PTR(-EAGAIN);
- } else if (btree_node_io_error(b)) {
+ if (btree_node_io_error(b)) {
rw_unlock(write, b);
- b = ERR_PTR(-EIO);
- } else
- BUG_ON(!b->written);
+ return ERR_PTR(-EIO);
+ }
+
+ BUG_ON(!b->written);
return b;
}
@@ -959,7 +966,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level)
mutex_unlock(&c->bucket_lock);
if (!IS_ERR_OR_NULL(b)) {
- bch_btree_read(b);
+ bch_btree_node_read(b);
rw_unlock(true, b);
}
}
@@ -970,24 +977,19 @@ static void btree_node_free(struct btree *b, struct btree_op *op)
{
unsigned i;
+ trace_bcache_btree_node_free(b);
+
/*
* The BUG_ON() in btree_node_get() implies that we must have a write
* lock on parent to free or even invalidate a node
*/
BUG_ON(op->lock <= b->level);
BUG_ON(b == b->c->root);
- pr_debug("bucket %s", pbtree(b));
if (btree_node_dirty(b))
btree_complete_write(b, btree_current_write(b));
clear_bit(BTREE_NODE_dirty, &b->flags);
- if (b->prio_blocked &&
- !atomic_sub_return(b->prio_blocked, &b->c->prio_blocked))
- wake_up(&b->c->alloc_wait);
-
- b->prio_blocked = 0;
-
cancel_delayed_work(&b->work);
mutex_lock(&b->c->bucket_lock);
@@ -1028,17 +1030,20 @@ retry:
goto retry;
}
- set_btree_node_read_done(b);
b->accessed = 1;
bch_bset_init_next(b);
mutex_unlock(&c->bucket_lock);
+
+ trace_bcache_btree_node_alloc(b);
return b;
err_free:
bch_bucket_free(c, &k.key);
__bkey_put(c, &k.key);
err:
mutex_unlock(&c->bucket_lock);
+
+ trace_bcache_btree_node_alloc_fail(b);
return b;
}
@@ -1137,11 +1142,8 @@ static int btree_gc_mark_node(struct btree *b, unsigned *keys,
gc->nkeys++;
gc->data += KEY_SIZE(k);
- if (KEY_DIRTY(k)) {
+ if (KEY_DIRTY(k))
gc->dirty += KEY_SIZE(k);
- if (d)
- d->sectors_dirty_gc += KEY_SIZE(k);
- }
}
for (t = b->sets; t <= &b->sets[b->nsets]; t++)
@@ -1166,14 +1168,11 @@ static struct btree *btree_gc_alloc(struct btree *b, struct bkey *k,
if (!IS_ERR_OR_NULL(n)) {
swap(b, n);
+ __bkey_put(b->c, &b->key);
memcpy(k->ptr, b->key.ptr,
sizeof(uint64_t) * KEY_PTRS(&b->key));
- __bkey_put(b->c, &b->key);
- atomic_inc(&b->c->prio_blocked);
- b->prio_blocked++;
-
btree_node_free(n, op);
up_write(&n->lock);
}
@@ -1278,7 +1277,7 @@ static void btree_gc_coalesce(struct btree *b, struct btree_op *op,
btree_node_free(r->b, op);
up_write(&r->b->lock);
- pr_debug("coalesced %u nodes", nodes);
+ trace_bcache_btree_gc_coalesce(nodes);
gc->nodes--;
nodes--;
@@ -1293,14 +1292,9 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
void write(struct btree *r)
{
if (!r->written)
- bch_btree_write(r, true, op);
- else if (btree_node_dirty(r)) {
- BUG_ON(btree_current_write(r)->owner);
- btree_current_write(r)->owner = writes;
- closure_get(writes);
-
- bch_btree_write(r, true, NULL);
- }
+ bch_btree_node_write(r, &op->cl);
+ else if (btree_node_dirty(r))
+ bch_btree_node_write(r, writes);
up_write(&r->lock);
}
@@ -1386,9 +1380,7 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
ret = btree_gc_recurse(b, op, writes, gc);
if (!b->written || btree_node_dirty(b)) {
- atomic_inc(&b->c->prio_blocked);
- b->prio_blocked++;
- bch_btree_write(b, true, n ? op : NULL);
+ bch_btree_node_write(b, n ? &op->cl : NULL);
}
if (!IS_ERR_OR_NULL(n)) {
@@ -1405,7 +1397,6 @@ static void btree_gc_start(struct cache_set *c)
{
struct cache *ca;
struct bucket *b;
- struct bcache_device **d;
unsigned i;
if (!c->gc_mark_valid)
@@ -1419,16 +1410,12 @@ static void btree_gc_start(struct cache_set *c)
for_each_cache(ca, c, i)
for_each_bucket(b, ca) {
b->gc_gen = b->gen;
- if (!atomic_read(&b->pin))
+ if (!atomic_read(&b->pin)) {
SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ SET_GC_SECTORS_USED(b, 0);
+ }
}
- for (d = c->devices;
- d < c->devices + c->nr_uuids;
- d++)
- if (*d)
- (*d)->sectors_dirty_gc = 0;
-
mutex_unlock(&c->bucket_lock);
}
@@ -1437,7 +1424,6 @@ size_t bch_btree_gc_finish(struct cache_set *c)
size_t available = 0;
struct bucket *b;
struct cache *ca;
- struct bcache_device **d;
unsigned i;
mutex_lock(&c->bucket_lock);
@@ -1480,22 +1466,6 @@ size_t bch_btree_gc_finish(struct cache_set *c)
}
}
- for (d = c->devices;
- d < c->devices + c->nr_uuids;
- d++)
- if (*d) {
- unsigned long last =
- atomic_long_read(&((*d)->sectors_dirty));
- long difference = (*d)->sectors_dirty_gc - last;
-
- pr_debug("sectors dirty off by %li", difference);
-
- (*d)->sectors_dirty_last += difference;
-
- atomic_long_set(&((*d)->sectors_dirty),
- (*d)->sectors_dirty_gc);
- }
-
mutex_unlock(&c->bucket_lock);
return available;
}
@@ -1508,10 +1478,9 @@ static void bch_btree_gc(struct closure *cl)
struct gc_stat stats;
struct closure writes;
struct btree_op op;
-
uint64_t start_time = local_clock();
- trace_bcache_gc_start(c->sb.set_uuid);
- blktrace_msg_all(c, "Starting gc");
+
+ trace_bcache_gc_start(c);
memset(&stats, 0, sizeof(struct gc_stat));
closure_init_stack(&writes);
@@ -1520,14 +1489,14 @@ static void bch_btree_gc(struct closure *cl)
btree_gc_start(c);
+ atomic_inc(&c->prio_blocked);
+
ret = btree_root(gc_root, c, &op, &writes, &stats);
closure_sync(&op.cl);
closure_sync(&writes);
if (ret) {
- blktrace_msg_all(c, "Stopped gc");
pr_warn("gc failed!");
-
continue_at(cl, bch_btree_gc, bch_gc_wq);
}
@@ -1537,6 +1506,9 @@ static void bch_btree_gc(struct closure *cl)
available = bch_btree_gc_finish(c);
+ atomic_dec(&c->prio_blocked);
+ wake_up_allocators(c);
+
bch_time_stats_update(&c->btree_gc_time, start_time);
stats.key_bytes *= sizeof(uint64_t);
@@ -1544,10 +1516,8 @@ static void bch_btree_gc(struct closure *cl)
stats.data <<= 9;
stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
- blktrace_msg_all(c, "Finished gc");
- trace_bcache_gc_end(c->sb.set_uuid);
- wake_up(&c->alloc_wait);
+ trace_bcache_gc_end(c);
continue_at(cl, bch_moving_gc, bch_gc_wq);
}
@@ -1654,14 +1624,14 @@ static bool fix_overlapping_extents(struct btree *b,
struct btree_iter *iter,
struct btree_op *op)
{
- void subtract_dirty(struct bkey *k, int sectors)
+ void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
{
- struct bcache_device *d = b->c->devices[KEY_INODE(k)];
-
- if (KEY_DIRTY(k) && d)
- atomic_long_sub(sectors, &d->sectors_dirty);
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
+ offset, -sectors);
}
+ uint64_t old_offset;
unsigned old_size, sectors_found = 0;
while (1) {
@@ -1673,6 +1643,7 @@ static bool fix_overlapping_extents(struct btree *b,
if (bkey_cmp(k, &START_KEY(insert)) <= 0)
continue;
+ old_offset = KEY_START(k);
old_size = KEY_SIZE(k);
/*
@@ -1728,7 +1699,7 @@ static bool fix_overlapping_extents(struct btree *b,
struct bkey *top;
- subtract_dirty(k, KEY_SIZE(insert));
+ subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
if (bkey_written(b, k)) {
/*
@@ -1775,7 +1746,7 @@ static bool fix_overlapping_extents(struct btree *b,
}
}
- subtract_dirty(k, old_size - KEY_SIZE(k));
+ subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
}
check_failed:
@@ -1798,7 +1769,7 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
{
struct bset *i = b->sets[b->nsets].data;
struct bkey *m, *prev;
- const char *status = "insert";
+ unsigned status = BTREE_INSERT_STATUS_INSERT;
BUG_ON(bkey_cmp(k, &b->key) > 0);
BUG_ON(b->level && !KEY_PTRS(k));
@@ -1831,17 +1802,17 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
goto insert;
/* prev is in the tree, if we merge we're done */
- status = "back merging";
+ status = BTREE_INSERT_STATUS_BACK_MERGE;
if (prev &&
bch_bkey_try_merge(b, prev, k))
goto merged;
- status = "overwrote front";
+ status = BTREE_INSERT_STATUS_OVERWROTE;
if (m != end(i) &&
KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
goto copy;
- status = "front merge";
+ status = BTREE_INSERT_STATUS_FRONT_MERGE;
if (m != end(i) &&
bch_bkey_try_merge(b, k, m))
goto copy;
@@ -1851,21 +1822,21 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
insert: shift_keys(b, m, k);
copy: bkey_copy(m, k);
merged:
- bch_check_keys(b, "%s for %s at %s: %s", status,
- op_type(op), pbtree(b), pkey(k));
- bch_check_key_order_msg(b, i, "%s for %s at %s: %s", status,
- op_type(op), pbtree(b), pkey(k));
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
+ KEY_START(k), KEY_SIZE(k));
+
+ bch_check_keys(b, "%u for %s", status, op_type(op));
if (b->level && !KEY_OFFSET(k))
- b->prio_blocked++;
+ btree_current_write(b)->prio_blocked++;
- pr_debug("%s for %s at %s: %s", status,
- op_type(op), pbtree(b), pkey(k));
+ trace_bcache_btree_insert_key(b, k, op->type, status);
return true;
}
-bool bch_btree_insert_keys(struct btree *b, struct btree_op *op)
+static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op)
{
bool ret = false;
struct bkey *k;
@@ -1896,7 +1867,7 @@ bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
should_split(b))
goto out;
- op->replace = KEY(op->inode, bio_end(bio), bio_sectors(bio));
+ op->replace = KEY(op->inode, bio_end_sector(bio), bio_sectors(bio));
SET_KEY_PTRS(&op->replace, 1);
get_random_bytes(&op->replace.ptr[0], sizeof(uint64_t));
@@ -1907,7 +1878,6 @@ bool bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
BUG_ON(op->type != BTREE_INSERT);
BUG_ON(!btree_insert_key(b, op, &tmp.k));
- bch_btree_write(b, false, NULL);
ret = true;
out:
downgrade_write(&b->lock);
@@ -1929,12 +1899,11 @@ static int btree_split(struct btree *b, struct btree_op *op)
split = set_blocks(n1->sets[0].data, n1->c) > (btree_blocks(b) * 4) / 5;
- pr_debug("%ssplitting at %s keys %i", split ? "" : "not ",
- pbtree(b), n1->sets[0].data->keys);
-
if (split) {
unsigned keys = 0;
+ trace_bcache_btree_node_split(b, n1->sets[0].data->keys);
+
n2 = bch_btree_node_alloc(b->c, b->level, &op->cl);
if (IS_ERR(n2))
goto err_free1;
@@ -1967,18 +1936,21 @@ static int btree_split(struct btree *b, struct btree_op *op)
bkey_copy_key(&n2->key, &b->key);
bch_keylist_add(&op->keys, &n2->key);
- bch_btree_write(n2, true, op);
+ bch_btree_node_write(n2, &op->cl);
rw_unlock(true, n2);
- } else
+ } else {
+ trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
+
bch_btree_insert_keys(n1, op);
+ }
bch_keylist_add(&op->keys, &n1->key);
- bch_btree_write(n1, true, op);
+ bch_btree_node_write(n1, &op->cl);
if (n3) {
bkey_copy_key(&n3->key, &MAX_KEY);
bch_btree_insert_keys(n3, op);
- bch_btree_write(n3, true, op);
+ bch_btree_node_write(n3, &op->cl);
closure_sync(&op->cl);
bch_btree_set_root(n3);
@@ -2082,8 +2054,12 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
BUG_ON(write_block(b) != b->sets[b->nsets].data);
- if (bch_btree_insert_keys(b, op))
- bch_btree_write(b, false, op);
+ if (bch_btree_insert_keys(b, op)) {
+ if (!b->level)
+ bch_btree_leaf_dirty(b, op);
+ else
+ bch_btree_node_write(b, &op->cl);
+ }
}
return 0;
@@ -2140,6 +2116,11 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c)
void bch_btree_set_root(struct btree *b)
{
unsigned i;
+ struct closure cl;
+
+ closure_init_stack(&cl);
+
+ trace_bcache_btree_set_root(b);
BUG_ON(!b->written);
@@ -2153,8 +2134,8 @@ void bch_btree_set_root(struct btree *b)
b->c->root = b;
__bkey_put(b->c, &b->key);
- bch_journal_meta(b->c, NULL);
- pr_debug("%s for %pf", pbtree(b), __builtin_return_address(0));
+ bch_journal_meta(b->c, &cl);
+ closure_sync(&cl);
}
/* Cache lookup */
@@ -2215,9 +2196,6 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
KEY_OFFSET(k) - bio->bi_sector);
n = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
- if (!n)
- return -EAGAIN;
-
if (n == bio)
op->lookup_done = true;
@@ -2240,7 +2218,6 @@ static int submit_partial_cache_hit(struct btree *b, struct btree_op *op,
n->bi_end_io = bch_cache_read_endio;
n->bi_private = &s->cl;
- trace_bcache_cache_hit(n);
__bch_submit_bbio(n, b->c);
}
@@ -2257,9 +2234,6 @@ int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
struct btree_iter iter;
bch_btree_iter_init(b, &iter, &KEY(op->inode, bio->bi_sector, 0));
- pr_debug("at %s searching for %u:%llu", pbtree(b), op->inode,
- (uint64_t) bio->bi_sector);
-
do {
k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad);
if (!k) {
@@ -2303,7 +2277,8 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
}
static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
- struct keybuf *buf, struct bkey *end)
+ struct keybuf *buf, struct bkey *end,
+ keybuf_pred_fn *pred)
{
struct btree_iter iter;
bch_btree_iter_init(b, &iter, &buf->last_scanned);
@@ -2322,11 +2297,9 @@ static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
if (bkey_cmp(&buf->last_scanned, end) >= 0)
break;
- if (buf->key_predicate(buf, k)) {
+ if (pred(buf, k)) {
struct keybuf_key *w;
- pr_debug("%s", pkey(k));
-
spin_lock(&buf->lock);
w = array_alloc(&buf->freelist);
@@ -2343,7 +2316,7 @@ static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
if (!k)
break;
- btree(refill_keybuf, k, b, op, buf, end);
+ btree(refill_keybuf, k, b, op, buf, end, pred);
/*
* Might get an error here, but can't really do anything
* and it'll get logged elsewhere. Just read what we
@@ -2361,7 +2334,7 @@ static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
}
void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
- struct bkey *end)
+ struct bkey *end, keybuf_pred_fn *pred)
{
struct bkey start = buf->last_scanned;
struct btree_op op;
@@ -2369,7 +2342,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
cond_resched();
- btree_root(refill_keybuf, c, &op, buf, end);
+ btree_root(refill_keybuf, c, &op, buf, end, pred);
closure_sync(&op.cl);
pr_debug("found %s keys from %llu:%llu to %llu:%llu",
@@ -2455,7 +2428,8 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
struct keybuf *buf,
- struct bkey *end)
+ struct bkey *end,
+ keybuf_pred_fn *pred)
{
struct keybuf_key *ret;
@@ -2469,15 +2443,14 @@ struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
break;
}
- bch_refill_keybuf(c, buf, end);
+ bch_refill_keybuf(c, buf, end, pred);
}
return ret;
}
-void bch_keybuf_init(struct keybuf *buf, keybuf_pred_fn *fn)
+void bch_keybuf_init(struct keybuf *buf)
{
- buf->key_predicate = fn;
buf->last_scanned = MAX_KEY;
buf->keys = RB_ROOT;
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index af4a7092a28c..3333d3723633 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -102,7 +102,6 @@
#include "debug.h"
struct btree_write {
- struct closure *owner;
atomic_t *journal;
/* If btree_split() frees a btree node, it writes a new pointer to that
@@ -142,16 +141,12 @@ struct btree {
*/
struct bset_tree sets[MAX_BSETS];
- /* Used to refcount bio splits, also protects b->bio */
+ /* For outstanding btree writes, used as a lock - protects write_idx */
struct closure_with_waitlist io;
- /* Gets transferred to w->prio_blocked - see the comment there */
- int prio_blocked;
-
struct list_head list;
struct delayed_work work;
- uint64_t io_start_time;
struct btree_write writes[2];
struct bio *bio;
};
@@ -164,13 +159,11 @@ static inline void set_btree_node_ ## flag(struct btree *b) \
{ set_bit(BTREE_NODE_ ## flag, &b->flags); } \
enum btree_flags {
- BTREE_NODE_read_done,
BTREE_NODE_io_error,
BTREE_NODE_dirty,
BTREE_NODE_write_idx,
};
-BTREE_FLAG(read_done);
BTREE_FLAG(io_error);
BTREE_FLAG(dirty);
BTREE_FLAG(write_idx);
@@ -278,6 +271,13 @@ struct btree_op {
BKEY_PADDED(replace);
};
+enum {
+ BTREE_INSERT_STATUS_INSERT,
+ BTREE_INSERT_STATUS_BACK_MERGE,
+ BTREE_INSERT_STATUS_OVERWROTE,
+ BTREE_INSERT_STATUS_FRONT_MERGE,
+};
+
void bch_btree_op_init_stack(struct btree_op *);
static inline void rw_lock(bool w, struct btree *b, int level)
@@ -293,9 +293,7 @@ static inline void rw_unlock(bool w, struct btree *b)
#ifdef CONFIG_BCACHE_EDEBUG
unsigned i;
- if (w &&
- b->key.ptr[0] &&
- btree_node_read_done(b))
+ if (w && b->key.ptr[0])
for (i = 0; i <= b->nsets; i++)
bch_check_key_order(b, b->sets[i].data);
#endif
@@ -370,9 +368,8 @@ static inline bool should_split(struct btree *b)
> btree_blocks(b));
}
-void bch_btree_read_done(struct closure *);
-void bch_btree_read(struct btree *);
-void bch_btree_write(struct btree *b, bool now, struct btree_op *op);
+void bch_btree_node_read(struct btree *);
+void bch_btree_node_write(struct btree *, struct closure *);
void bch_cannibalize_unlock(struct cache_set *, struct closure *);
void bch_btree_set_root(struct btree *);
@@ -380,7 +377,6 @@ struct btree *bch_btree_node_alloc(struct cache_set *, int, struct closure *);
struct btree *bch_btree_node_get(struct cache_set *, struct bkey *,
int, struct btree_op *);
-bool bch_btree_insert_keys(struct btree *, struct btree_op *);
bool bch_btree_insert_check_key(struct btree *, struct btree_op *,
struct bio *);
int bch_btree_insert(struct btree_op *, struct cache_set *);
@@ -393,13 +389,14 @@ void bch_moving_gc(struct closure *);
int bch_btree_check(struct cache_set *, struct btree_op *);
uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
-void bch_keybuf_init(struct keybuf *, keybuf_pred_fn *);
-void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *);
+void bch_keybuf_init(struct keybuf *);
+void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *,
+ keybuf_pred_fn *);
bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
struct bkey *);
void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
struct keybuf_key *bch_keybuf_next(struct keybuf *);
-struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *,
- struct keybuf *, struct bkey *);
+struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
+ struct bkey *, keybuf_pred_fn *);
#endif
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index bd05a9a8c7cf..9aba2017f0d1 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -66,16 +66,18 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
} else {
struct closure *parent = cl->parent;
struct closure_waitlist *wait = closure_waitlist(cl);
+ closure_fn *destructor = cl->fn;
closure_debug_destroy(cl);
+ smp_mb();
atomic_set(&cl->remaining, -1);
if (wait)
closure_wake_up(wait);
- if (cl->fn)
- cl->fn(cl);
+ if (destructor)
+ destructor(cl);
if (parent)
closure_put(parent);
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 89fd5204924e..88e6411eab4f 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -47,11 +47,10 @@ const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
return "";
}
-struct keyprint_hack bch_pkey(const struct bkey *k)
+int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
{
unsigned i = 0;
- struct keyprint_hack r;
- char *out = r.s, *end = r.s + KEYHACK_SIZE;
+ char *out = buf, *end = buf + size;
#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
@@ -75,16 +74,14 @@ struct keyprint_hack bch_pkey(const struct bkey *k)
if (KEY_CSUM(k))
p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
#undef p
- return r;
+ return out - buf;
}
-struct keyprint_hack bch_pbtree(const struct btree *b)
+int bch_btree_to_text(char *buf, size_t size, const struct btree *b)
{
- struct keyprint_hack r;
-
- snprintf(r.s, 40, "%zu level %i/%i", PTR_BUCKET_NR(b->c, &b->key, 0),
- b->level, b->c->root ? b->c->root->level : -1);
- return r;
+ return scnprintf(buf, size, "%zu level %i/%i",
+ PTR_BUCKET_NR(b->c, &b->key, 0),
+ b->level, b->c->root ? b->c->root->level : -1);
}
#if defined(CONFIG_BCACHE_DEBUG) || defined(CONFIG_BCACHE_EDEBUG)
@@ -100,10 +97,12 @@ static void dump_bset(struct btree *b, struct bset *i)
{
struct bkey *k;
unsigned j;
+ char buf[80];
for (k = i->start; k < end(i); k = bkey_next(k)) {
+ bch_bkey_to_text(buf, sizeof(buf), k);
printk(KERN_ERR "block %zu key %zi/%u: %s", index(i, b),
- (uint64_t *) k - i->d, i->keys, pkey(k));
+ (uint64_t *) k - i->d, i->keys, buf);
for (j = 0; j < KEY_PTRS(k); j++) {
size_t n = PTR_BUCKET_NR(b->c, k, j);
@@ -144,7 +143,7 @@ void bch_btree_verify(struct btree *b, struct bset *new)
v->written = 0;
v->level = b->level;
- bch_btree_read(v);
+ bch_btree_node_read(v);
closure_wait_event(&v->io.wait, &cl,
atomic_read(&b->io.cl.remaining) == -1);
@@ -200,7 +199,7 @@ void bch_data_verify(struct search *s)
if (!check)
return;
- if (bch_bio_alloc_pages(check, GFP_NOIO))
+ if (bio_alloc_pages(check, GFP_NOIO))
goto out_put;
check->bi_rw = READ_SYNC;
@@ -252,6 +251,7 @@ static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
va_list args)
{
unsigned i;
+ char buf[80];
console_lock();
@@ -262,7 +262,8 @@ static void vdump_bucket_and_panic(struct btree *b, const char *fmt,
console_unlock();
- panic("at %s\n", pbtree(b));
+ bch_btree_to_text(buf, sizeof(buf), b);
+ panic("at %s\n", buf);
}
void bch_check_key_order_msg(struct btree *b, struct bset *i,
@@ -337,6 +338,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
{
struct dump_iterator *i = file->private_data;
ssize_t ret = 0;
+ char kbuf[80];
while (size) {
struct keybuf_key *w;
@@ -355,11 +357,12 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
if (i->bytes)
break;
- w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY);
+ w = bch_keybuf_next_rescan(i->c, &i->keys, &MAX_KEY, dump_pred);
if (!w)
break;
- i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", pkey(&w->key));
+ bch_bkey_to_text(kbuf, sizeof(kbuf), &w->key);
+ i->bytes = snprintf(i->buf, PAGE_SIZE, "%s\n", kbuf);
bch_keybuf_del(&i->keys, w);
}
@@ -377,7 +380,7 @@ static int bch_dump_open(struct inode *inode, struct file *file)
file->private_data = i;
i->c = c;
- bch_keybuf_init(&i->keys, dump_pred);
+ bch_keybuf_init(&i->keys);
i->keys.last_scanned = KEY(0, 0, 0);
return 0;
@@ -409,142 +412,6 @@ void bch_debug_init_cache_set(struct cache_set *c)
#endif
-/* Fuzz tester has rotted: */
-#if 0
-
-static ssize_t btree_fuzz(struct kobject *k, struct kobj_attribute *a,
- const char *buffer, size_t size)
-{
- void dump(struct btree *b)
- {
- struct bset *i;
-
- for (i = b->sets[0].data;
- index(i, b) < btree_blocks(b) &&
- i->seq == b->sets[0].data->seq;
- i = ((void *) i) + set_blocks(i, b->c) * block_bytes(b->c))
- dump_bset(b, i);
- }
-
- struct cache_sb *sb;
- struct cache_set *c;
- struct btree *all[3], *b, *fill, *orig;
- int j;
-
- struct btree_op op;
- bch_btree_op_init_stack(&op);
-
- sb = kzalloc(sizeof(struct cache_sb), GFP_KERNEL);
- if (!sb)
- return -ENOMEM;
-
- sb->bucket_size = 128;
- sb->block_size = 4;
-
- c = bch_cache_set_alloc(sb);
- if (!c)
- return -ENOMEM;
-
- for (j = 0; j < 3; j++) {
- BUG_ON(list_empty(&c->btree_cache));
- all[j] = list_first_entry(&c->btree_cache, struct btree, list);
- list_del_init(&all[j]->list);
-
- all[j]->key = KEY(0, 0, c->sb.bucket_size);
- bkey_copy_key(&all[j]->key, &MAX_KEY);
- }
-
- b = all[0];
- fill = all[1];
- orig = all[2];
-
- while (1) {
- for (j = 0; j < 3; j++)
- all[j]->written = all[j]->nsets = 0;
-
- bch_bset_init_next(b);
-
- while (1) {
- struct bset *i = write_block(b);
- struct bkey *k = op.keys.top;
- unsigned rand;
-
- bkey_init(k);
- rand = get_random_int();
-
- op.type = rand & 1
- ? BTREE_INSERT
- : BTREE_REPLACE;
- rand >>= 1;
-
- SET_KEY_SIZE(k, bucket_remainder(c, rand));
- rand >>= c->bucket_bits;
- rand &= 1024 * 512 - 1;
- rand += c->sb.bucket_size;
- SET_KEY_OFFSET(k, rand);
-#if 0
- SET_KEY_PTRS(k, 1);
-#endif
- bch_keylist_push(&op.keys);
- bch_btree_insert_keys(b, &op);
-
- if (should_split(b) ||
- set_blocks(i, b->c) !=
- __set_blocks(i, i->keys + 15, b->c)) {
- i->csum = csum_set(i);
-
- memcpy(write_block(fill),
- i, set_bytes(i));
-
- b->written += set_blocks(i, b->c);
- fill->written = b->written;
- if (b->written == btree_blocks(b))
- break;
-
- bch_btree_sort_lazy(b);
- bch_bset_init_next(b);
- }
- }
-
- memcpy(orig->sets[0].data,
- fill->sets[0].data,
- btree_bytes(c));
-
- bch_btree_sort(b);
- fill->written = 0;
- bch_btree_read_done(&fill->io.cl);
-
- if (b->sets[0].data->keys != fill->sets[0].data->keys ||
- memcmp(b->sets[0].data->start,
- fill->sets[0].data->start,
- b->sets[0].data->keys * sizeof(uint64_t))) {
- struct bset *i = b->sets[0].data;
- struct bkey *k, *l;
-
- for (k = i->start,
- l = fill->sets[0].data->start;
- k < end(i);
- k = bkey_next(k), l = bkey_next(l))
- if (bkey_cmp(k, l) ||
- KEY_SIZE(k) != KEY_SIZE(l))
- pr_err("key %zi differs: %s != %s",
- (uint64_t *) k - i->d,
- pkey(k), pkey(l));
-
- for (j = 0; j < 3; j++) {
- pr_err("**** Set %i ****", j);
- dump(all[j]);
- }
- panic("\n");
- }
-
- pr_info("fuzz complete: %i keys", b->sets[0].data->keys);
- }
-}
-
-kobj_attribute_write(fuzz, btree_fuzz);
-#endif
-
void bch_debug_exit(void)
{
if (!IS_ERR_OR_NULL(debug))
@@ -554,11 +421,6 @@ void bch_debug_exit(void)
int __init bch_debug_init(struct kobject *kobj)
{
int ret = 0;
-#if 0
- ret = sysfs_create_file(kobj, &ksysfs_fuzz.attr);
- if (ret)
- return ret;
-#endif
debug = debugfs_create_dir("bcache", NULL);
return ret;
diff --git a/drivers/md/bcache/debug.h b/drivers/md/bcache/debug.h
index f9378a218148..1c39b5a2489b 100644
--- a/drivers/md/bcache/debug.h
+++ b/drivers/md/bcache/debug.h
@@ -3,15 +3,8 @@
/* Btree/bkey debug printing */
-#define KEYHACK_SIZE 80
-struct keyprint_hack {
- char s[KEYHACK_SIZE];
-};
-
-struct keyprint_hack bch_pkey(const struct bkey *k);
-struct keyprint_hack bch_pbtree(const struct btree *b);
-#define pkey(k) (&bch_pkey(k).s[0])
-#define pbtree(b) (&bch_pbtree(b).s[0])
+int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k);
+int bch_btree_to_text(char *buf, size_t size, const struct btree *b);
#ifdef CONFIG_BCACHE_EDEBUG
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 48efd4dea645..9056632995b1 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -9,6 +9,8 @@
#include "bset.h"
#include "debug.h"
+#include <linux/blkdev.h>
+
static void bch_bi_idx_hack_endio(struct bio *bio, int error)
{
struct bio *p = bio->bi_private;
@@ -66,13 +68,6 @@ static void bch_generic_make_request_hack(struct bio *bio)
* The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
* bvec boundry; it is the caller's responsibility to ensure that @bio is not
* freed before the split.
- *
- * If bch_bio_split() is running under generic_make_request(), it's not safe to
- * allocate more than one bio from the same bio set. Therefore, if it is running
- * under generic_make_request() it masks out __GFP_WAIT when doing the
- * allocation. The caller must check for failure if there's any possibility of
- * it being called from under generic_make_request(); it is then the caller's
- * responsibility to retry from a safe context (by e.g. punting to workqueue).
*/
struct bio *bch_bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs)
@@ -83,20 +78,13 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
BUG_ON(sectors <= 0);
- /*
- * If we're being called from underneath generic_make_request() and we
- * already allocated any bios from this bio set, we risk deadlock if we
- * use the mempool. So instead, we possibly fail and let the caller punt
- * to workqueue or somesuch and retry in a safe context.
- */
- if (current->bio_list)
- gfp &= ~__GFP_WAIT;
-
if (sectors >= bio_sectors(bio))
return bio;
if (bio->bi_rw & REQ_DISCARD) {
ret = bio_alloc_bioset(gfp, 1, bs);
+ if (!ret)
+ return NULL;
idx = 0;
goto out;
}
@@ -160,17 +148,18 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES,
queue_max_segments(q));
- struct bio_vec *bv, *end = bio_iovec(bio) +
- min_t(int, bio_segments(bio), max_segments);
if (bio->bi_rw & REQ_DISCARD)
return min(ret, q->limits.max_discard_sectors);
if (bio_segments(bio) > max_segments ||
q->merge_bvec_fn) {
+ struct bio_vec *bv;
+ int i, seg = 0;
+
ret = 0;
- for (bv = bio_iovec(bio); bv < end; bv++) {
+ bio_for_each_segment(bv, bio, i) {
struct bvec_merge_data bvm = {
.bi_bdev = bio->bi_bdev,
.bi_sector = bio->bi_sector,
@@ -178,10 +167,14 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
.bi_rw = bio->bi_rw,
};
+ if (seg == max_segments)
+ break;
+
if (q->merge_bvec_fn &&
q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
break;
+ seg++;
ret += bv->bv_len >> 9;
}
}
@@ -218,30 +211,10 @@ static void bch_bio_submit_split_endio(struct bio *bio, int error)
closure_put(cl);
}
-static void __bch_bio_submit_split(struct closure *cl)
-{
- struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
- struct bio *bio = s->bio, *n;
-
- do {
- n = bch_bio_split(bio, bch_bio_max_sectors(bio),
- GFP_NOIO, s->p->bio_split);
- if (!n)
- continue_at(cl, __bch_bio_submit_split, system_wq);
-
- n->bi_end_io = bch_bio_submit_split_endio;
- n->bi_private = cl;
-
- closure_get(cl);
- bch_generic_make_request_hack(n);
- } while (n != bio);
-
- continue_at(cl, bch_bio_submit_split_done, NULL);
-}
-
void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
{
struct bio_split_hook *s;
+ struct bio *n;
if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
goto submit;
@@ -250,6 +223,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
goto submit;
s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
+ closure_init(&s->cl, NULL);
s->bio = bio;
s->p = p;
@@ -257,8 +231,18 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
s->bi_private = bio->bi_private;
bio_get(bio);
- closure_call(&s->cl, __bch_bio_submit_split, NULL, NULL);
- return;
+ do {
+ n = bch_bio_split(bio, bch_bio_max_sectors(bio),
+ GFP_NOIO, s->p->bio_split);
+
+ n->bi_end_io = bch_bio_submit_split_endio;
+ n->bi_private = &s->cl;
+
+ closure_get(&s->cl);
+ bch_generic_make_request_hack(n);
+ } while (n != bio);
+
+ continue_at(&s->cl, bch_bio_submit_split_done, NULL);
submit:
bch_generic_make_request_hack(bio);
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 8c8dfdcd9d4c..ba95ab84b2be 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -9,6 +9,8 @@
#include "debug.h"
#include "request.h"
+#include <trace/events/bcache.h>
+
/*
* Journal replay/recovery:
*
@@ -182,9 +184,14 @@ bsearch:
pr_debug("starting binary search, l %u r %u", l, r);
while (l + 1 < r) {
+ seq = list_entry(list->prev, struct journal_replay,
+ list)->j.seq;
+
m = (l + r) >> 1;
+ read_bucket(m);
- if (read_bucket(m))
+ if (seq != list_entry(list->prev, struct journal_replay,
+ list)->j.seq)
l = m;
else
r = m;
@@ -300,7 +307,8 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
for (k = i->j.start;
k < end(&i->j);
k = bkey_next(k)) {
- pr_debug("%s", pkey(k));
+ trace_bcache_journal_replay_key(k);
+
bkey_copy(op->keys.top, k);
bch_keylist_push(&op->keys);
@@ -384,7 +392,7 @@ out:
return;
found:
if (btree_node_dirty(best))
- bch_btree_write(best, true, NULL);
+ bch_btree_node_write(best, NULL);
rw_unlock(true, best);
}
@@ -617,7 +625,7 @@ static void journal_write_unlocked(struct closure *cl)
bio_reset(bio);
bio->bi_sector = PTR_OFFSET(k, i);
bio->bi_bdev = ca->bdev;
- bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH;
+ bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
bio->bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio;
@@ -712,7 +720,8 @@ void bch_journal(struct closure *cl)
spin_lock(&c->journal.lock);
if (journal_full(&c->journal)) {
- /* XXX: tracepoint */
+ trace_bcache_journal_full(c);
+
closure_wait(&c->journal.wait, cl);
journal_reclaim(c);
@@ -728,13 +737,15 @@ void bch_journal(struct closure *cl)
if (b * c->sb.block_size > PAGE_SECTORS << JSET_BITS ||
b > c->journal.blocks_free) {
- /* XXX: If we were inserting so many keys that they won't fit in
+ trace_bcache_journal_entry_full(c);
+
+ /*
+ * XXX: If we were inserting so many keys that they won't fit in
* an _empty_ journal write, we'll deadlock. For now, handle
* this in bch_keylist_realloc() - but something to think about.
*/
BUG_ON(!w->data->keys);
- /* XXX: tracepoint */
BUG_ON(!closure_wait(&w->wait, cl));
closure_flush(&c->journal.io);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 8589512c972e..1a3b4f4786c3 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -9,6 +9,8 @@
#include "debug.h"
#include "request.h"
+#include <trace/events/bcache.h>
+
struct moving_io {
struct keybuf_key *w;
struct search s;
@@ -44,14 +46,14 @@ static void write_moving_finish(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, s.cl);
struct bio *bio = &io->bio.bio;
- struct bio_vec *bv = bio_iovec_idx(bio, bio->bi_vcnt);
+ struct bio_vec *bv;
+ int i;
- while (bv-- != bio->bi_io_vec)
+ bio_for_each_segment_all(bv, bio, i)
__free_page(bv->bv_page);
- pr_debug("%s %s", io->s.op.insert_collision
- ? "collision moving" : "moved",
- pkey(&io->w->key));
+ if (io->s.op.insert_collision)
+ trace_bcache_gc_copy_collision(&io->w->key);
bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w);
@@ -94,8 +96,6 @@ static void write_moving(struct closure *cl)
struct moving_io *io = container_of(s, struct moving_io, s);
if (!s->error) {
- trace_bcache_write_moving(&io->bio.bio);
-
moving_init(io);
io->bio.bio.bi_sector = KEY_START(&io->w->key);
@@ -122,7 +122,6 @@ static void read_moving_submit(struct closure *cl)
struct moving_io *io = container_of(s, struct moving_io, s);
struct bio *bio = &io->bio.bio;
- trace_bcache_read_moving(bio);
bch_submit_bbio(bio, s->op.c, &io->w->key, 0);
continue_at(cl, write_moving, bch_gc_wq);
@@ -138,7 +137,8 @@ static void read_moving(struct closure *cl)
/* XXX: if we error, background writeback could stall indefinitely */
while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
- w = bch_keybuf_next_rescan(c, &c->moving_gc_keys, &MAX_KEY);
+ w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
+ &MAX_KEY, moving_pred);
if (!w)
break;
@@ -159,10 +159,10 @@ static void read_moving(struct closure *cl)
bio->bi_rw = READ;
bio->bi_end_io = read_moving_endio;
- if (bch_bio_alloc_pages(bio, GFP_KERNEL))
+ if (bio_alloc_pages(bio, GFP_KERNEL))
goto err;
- pr_debug("%s", pkey(&w->key));
+ trace_bcache_gc_copy(&w->key);
closure_call(&io->s.cl, read_moving_submit, NULL, &c->gc.cl);
@@ -250,5 +250,5 @@ void bch_moving_gc(struct closure *cl)
void bch_moving_init_cache_set(struct cache_set *c)
{
- bch_keybuf_init(&c->moving_gc_keys, moving_pred);
+ bch_keybuf_init(&c->moving_gc_keys);
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index e5ff12e52d5b..786a1a4f74d8 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -10,6 +10,7 @@
#include "btree.h"
#include "debug.h"
#include "request.h"
+#include "writeback.h"
#include <linux/cgroup.h>
#include <linux/module.h>
@@ -21,8 +22,6 @@
#define CUTOFF_CACHE_ADD 95
#define CUTOFF_CACHE_READA 90
-#define CUTOFF_WRITEBACK 50
-#define CUTOFF_WRITEBACK_SYNC 75
struct kmem_cache *bch_search_cache;
@@ -489,6 +488,12 @@ static void bch_insert_data_loop(struct closure *cl)
bch_queue_gc(op->c);
}
+ /*
+ * Journal writes are marked REQ_FLUSH; if the original write was a
+ * flush, it'll wait on the journal write.
+ */
+ bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
+
do {
unsigned i;
struct bkey *k;
@@ -510,10 +515,6 @@ static void bch_insert_data_loop(struct closure *cl)
goto err;
n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split);
- if (!n) {
- __bkey_put(op->c, k);
- continue_at(cl, bch_insert_data_loop, bcache_wq);
- }
n->bi_end_io = bch_insert_data_endio;
n->bi_private = cl;
@@ -530,10 +531,9 @@ static void bch_insert_data_loop(struct closure *cl)
if (KEY_CSUM(k))
bio_csum(n, k);
- pr_debug("%s", pkey(k));
+ trace_bcache_cache_insert(k);
bch_keylist_push(&op->keys);
- trace_bcache_cache_insert(n, n->bi_sector, n->bi_bdev);
n->bi_rw |= REQ_WRITE;
bch_submit_bbio(n, op->c, k, 0);
} while (n != bio);
@@ -716,7 +716,7 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
s->task = current;
s->orig_bio = bio;
s->write = (bio->bi_rw & REQ_WRITE) != 0;
- s->op.flush_journal = (bio->bi_rw & REQ_FLUSH) != 0;
+ s->op.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
s->op.skip = (bio->bi_rw & REQ_DISCARD) != 0;
s->recoverable = 1;
s->start_time = jiffies;
@@ -784,11 +784,8 @@ static void request_read_error(struct closure *cl)
int i;
if (s->recoverable) {
- /* The cache read failed, but we can retry from the backing
- * device.
- */
- pr_debug("recovering at sector %llu",
- (uint64_t) s->orig_bio->bi_sector);
+ /* Retry from the backing device: */
+ trace_bcache_read_retry(s->orig_bio);
s->error = 0;
bv = s->bio.bio.bi_io_vec;
@@ -806,7 +803,6 @@ static void request_read_error(struct closure *cl)
/* XXX: invalidate cache */
- trace_bcache_read_retry(&s->bio.bio);
closure_bio_submit(&s->bio.bio, &s->cl, s->d);
}
@@ -827,53 +823,13 @@ static void request_read_done(struct closure *cl)
*/
if (s->op.cache_bio) {
- struct bio_vec *src, *dst;
- unsigned src_offset, dst_offset, bytes;
- void *dst_ptr;
-
bio_reset(s->op.cache_bio);
s->op.cache_bio->bi_sector = s->cache_miss->bi_sector;
s->op.cache_bio->bi_bdev = s->cache_miss->bi_bdev;
s->op.cache_bio->bi_size = s->cache_bio_sectors << 9;
bch_bio_map(s->op.cache_bio, NULL);
- src = bio_iovec(s->op.cache_bio);
- dst = bio_iovec(s->cache_miss);
- src_offset = src->bv_offset;
- dst_offset = dst->bv_offset;
- dst_ptr = kmap(dst->bv_page);
-
- while (1) {
- if (dst_offset == dst->bv_offset + dst->bv_len) {
- kunmap(dst->bv_page);
- dst++;
- if (dst == bio_iovec_idx(s->cache_miss,
- s->cache_miss->bi_vcnt))
- break;
-
- dst_offset = dst->bv_offset;
- dst_ptr = kmap(dst->bv_page);
- }
-
- if (src_offset == src->bv_offset + src->bv_len) {
- src++;
- if (src == bio_iovec_idx(s->op.cache_bio,
- s->op.cache_bio->bi_vcnt))
- BUG();
-
- src_offset = src->bv_offset;
- }
-
- bytes = min(dst->bv_offset + dst->bv_len - dst_offset,
- src->bv_offset + src->bv_len - src_offset);
-
- memcpy(dst_ptr + dst_offset,
- page_address(src->bv_page) + src_offset,
- bytes);
-
- src_offset += bytes;
- dst_offset += bytes;
- }
+ bio_copy_data(s->cache_miss, s->op.cache_bio);
bio_put(s->cache_miss);
s->cache_miss = NULL;
@@ -899,6 +855,7 @@ static void request_read_done_bh(struct closure *cl)
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
bch_mark_cache_accounting(s, !s->cache_miss, s->op.skip);
+ trace_bcache_read(s->orig_bio, !s->cache_miss, s->op.skip);
if (s->error)
continue_at_nobarrier(cl, request_read_error, bcache_wq);
@@ -917,9 +874,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *miss;
miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
- if (!miss)
- return -EAGAIN;
-
if (miss == bio)
s->op.lookup_done = true;
@@ -938,8 +892,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
reada = min(dc->readahead >> 9,
sectors - bio_sectors(miss));
- if (bio_end(miss) + reada > bdev_sectors(miss->bi_bdev))
- reada = bdev_sectors(miss->bi_bdev) - bio_end(miss);
+ if (bio_end_sector(miss) + reada > bdev_sectors(miss->bi_bdev))
+ reada = bdev_sectors(miss->bi_bdev) -
+ bio_end_sector(miss);
}
s->cache_bio_sectors = bio_sectors(miss) + reada;
@@ -963,13 +918,12 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_put;
bch_bio_map(s->op.cache_bio, NULL);
- if (bch_bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
+ if (bio_alloc_pages(s->op.cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put;
s->cache_miss = miss;
bio_get(s->op.cache_bio);
- trace_bcache_cache_miss(s->orig_bio);
closure_bio_submit(s->op.cache_bio, &s->cl, s->d);
return ret;
@@ -1002,24 +956,13 @@ static void cached_dev_write_complete(struct closure *cl)
cached_dev_bio_complete(cl);
}
-static bool should_writeback(struct cached_dev *dc, struct bio *bio)
-{
- unsigned threshold = (bio->bi_rw & REQ_SYNC)
- ? CUTOFF_WRITEBACK_SYNC
- : CUTOFF_WRITEBACK;
-
- return !atomic_read(&dc->disk.detaching) &&
- cache_mode(dc, bio) == CACHE_MODE_WRITEBACK &&
- dc->disk.c->gc_stats.in_use < threshold;
-}
-
static void request_write(struct cached_dev *dc, struct search *s)
{
struct closure *cl = &s->cl;
struct bio *bio = &s->bio.bio;
struct bkey start, end;
start = KEY(dc->disk.id, bio->bi_sector, 0);
- end = KEY(dc->disk.id, bio_end(bio), 0);
+ end = KEY(dc->disk.id, bio_end_sector(bio), 0);
bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys, &start, &end);
@@ -1034,22 +977,37 @@ static void request_write(struct cached_dev *dc, struct search *s)
if (bio->bi_rw & REQ_DISCARD)
goto skip;
+ if (should_writeback(dc, s->orig_bio,
+ cache_mode(dc, bio),
+ s->op.skip)) {
+ s->op.skip = false;
+ s->writeback = true;
+ }
+
if (s->op.skip)
goto skip;
- if (should_writeback(dc, s->orig_bio))
- s->writeback = true;
+ trace_bcache_write(s->orig_bio, s->writeback, s->op.skip);
if (!s->writeback) {
s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
dc->disk.bio_split);
- trace_bcache_writethrough(s->orig_bio);
closure_bio_submit(bio, cl, s->d);
} else {
- s->op.cache_bio = bio;
- trace_bcache_writeback(s->orig_bio);
- bch_writeback_add(dc, bio_sectors(bio));
+ bch_writeback_add(dc);
+
+ if (s->op.flush_journal) {
+ /* Also need to send a flush to the backing device */
+ s->op.cache_bio = bio_clone_bioset(bio, GFP_NOIO,
+ dc->disk.bio_split);
+
+ bio->bi_size = 0;
+ bio->bi_vcnt = 0;
+ closure_bio_submit(bio, cl, s->d);
+ } else {
+ s->op.cache_bio = bio;
+ }
}
out:
closure_call(&s->op.cl, bch_insert_data, NULL, cl);
@@ -1058,7 +1016,6 @@ skip:
s->op.skip = true;
s->op.cache_bio = s->orig_bio;
bio_get(s->op.cache_bio);
- trace_bcache_write_skip(s->orig_bio);
if ((bio->bi_rw & REQ_DISCARD) &&
!blk_queue_discard(bdev_get_queue(dc->bdev)))
@@ -1088,9 +1045,10 @@ static void request_nodata(struct cached_dev *dc, struct search *s)
/* Cached devices - read & write stuff */
-int bch_get_congested(struct cache_set *c)
+unsigned bch_get_congested(struct cache_set *c)
{
int i;
+ long rand;
if (!c->congested_read_threshold_us &&
!c->congested_write_threshold_us)
@@ -1106,7 +1064,13 @@ int bch_get_congested(struct cache_set *c)
i += CONGESTED_MAX;
- return i <= 0 ? 1 : fract_exp_two(i, 6);
+ if (i > 0)
+ i = fract_exp_two(i, 6);
+
+ rand = get_random_int();
+ i -= bitmap_weight(&rand, BITS_PER_LONG);
+
+ return i > 0 ? i : 1;
}
static void add_sequential(struct task_struct *t)
@@ -1126,10 +1090,8 @@ static void check_should_skip(struct cached_dev *dc, struct search *s)
{
struct cache_set *c = s->op.c;
struct bio *bio = &s->bio.bio;
-
- long rand;
- int cutoff = bch_get_congested(c);
unsigned mode = cache_mode(dc, bio);
+ unsigned sectors, congested = bch_get_congested(c);
if (atomic_read(&dc->disk.detaching) ||
c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
@@ -1147,17 +1109,14 @@ static void check_should_skip(struct cached_dev *dc, struct search *s)
goto skip;
}
- if (!cutoff) {
- cutoff = dc->sequential_cutoff >> 9;
+ if (!congested && !dc->sequential_cutoff)
+ goto rescale;
- if (!cutoff)
- goto rescale;
-
- if (mode == CACHE_MODE_WRITEBACK &&
- (bio->bi_rw & REQ_WRITE) &&
- (bio->bi_rw & REQ_SYNC))
- goto rescale;
- }
+ if (!congested &&
+ mode == CACHE_MODE_WRITEBACK &&
+ (bio->bi_rw & REQ_WRITE) &&
+ (bio->bi_rw & REQ_SYNC))
+ goto rescale;
if (dc->sequential_merge) {
struct io *i;
@@ -1177,7 +1136,7 @@ found:
if (i->sequential + bio->bi_size > i->sequential)
i->sequential += bio->bi_size;
- i->last = bio_end(bio);
+ i->last = bio_end_sector(bio);
i->jiffies = jiffies + msecs_to_jiffies(5000);
s->task->sequential_io = i->sequential;
@@ -1192,12 +1151,19 @@ found:
add_sequential(s->task);
}
- rand = get_random_int();
- cutoff -= bitmap_weight(&rand, BITS_PER_LONG);
+ sectors = max(s->task->sequential_io,
+ s->task->sequential_io_avg) >> 9;
- if (cutoff <= (int) (max(s->task->sequential_io,
- s->task->sequential_io_avg) >> 9))
+ if (dc->sequential_cutoff &&
+ sectors >= dc->sequential_cutoff >> 9) {
+ trace_bcache_bypass_sequential(s->orig_bio);
goto skip;
+ }
+
+ if (congested && sectors >= congested) {
+ trace_bcache_bypass_congested(s->orig_bio);
+ goto skip;
+ }
rescale:
bch_rescale_priorities(c, bio_sectors(bio));
@@ -1288,30 +1254,25 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
static int flash_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors)
{
+ struct bio_vec *bv;
+ int i;
+
/* Zero fill bio */
- while (bio->bi_idx != bio->bi_vcnt) {
- struct bio_vec *bv = bio_iovec(bio);
+ bio_for_each_segment(bv, bio, i) {
unsigned j = min(bv->bv_len >> 9, sectors);
void *p = kmap(bv->bv_page);
memset(p + bv->bv_offset, 0, j << 9);
kunmap(bv->bv_page);
- bv->bv_len -= j << 9;
- bv->bv_offset += j << 9;
-
- if (bv->bv_len)
- return 0;
-
- bio->bi_sector += j;
- bio->bi_size -= j << 9;
-
- bio->bi_idx++;
- sectors -= j;
+ sectors -= j;
}
- s->op.lookup_done = true;
+ bio_advance(bio, min(sectors << 9, bio->bi_size));
+
+ if (!bio->bi_size)
+ s->op.lookup_done = true;
return 0;
}
@@ -1338,8 +1299,8 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
closure_call(&s->op.cl, btree_read_async, NULL, cl);
} else if (bio_has_data(bio) || s->op.skip) {
bch_keybuf_check_overlapping(&s->op.c->moving_gc_keys,
- &KEY(d->id, bio->bi_sector, 0),
- &KEY(d->id, bio_end(bio), 0));
+ &KEY(d->id, bio->bi_sector, 0),
+ &KEY(d->id, bio_end_sector(bio), 0));
s->writeback = true;
s->op.cache_bio = bio;
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 254d9ab5707c..57dc4784f4f4 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -30,7 +30,7 @@ struct search {
};
void bch_cache_read_endio(struct bio *, int);
-int bch_get_congested(struct cache_set *);
+unsigned bch_get_congested(struct cache_set *);
void bch_insert_data(struct closure *cl);
void bch_btree_insert_async(struct closure *);
void bch_cache_read_endio(struct bio *, int);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f88e2b653a3f..547c4c57b052 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -10,10 +10,13 @@
#include "btree.h"
#include "debug.h"
#include "request.h"
+#include "writeback.h"
+#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/debugfs.h>
#include <linux/genhd.h>
+#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/reboot.h>
@@ -342,6 +345,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
struct closure *cl = &c->uuid_write.cl;
struct uuid_entry *u;
unsigned i;
+ char buf[80];
BUG_ON(!parent);
closure_lock(&c->uuid_write, parent);
@@ -362,8 +366,8 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
break;
}
- pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read",
- pkey(&c->uuid_bucket));
+ bch_bkey_to_text(buf, sizeof(buf), k);
+ pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
if (!bch_is_zero(u->uuid, 16))
@@ -543,7 +547,6 @@ void bch_prio_write(struct cache *ca)
pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
fifo_used(&ca->free_inc), fifo_used(&ca->unused));
- blktrace_msg(ca, "Starting priorities: " buckets_free(ca));
for (i = prio_buckets(ca) - 1; i >= 0; --i) {
long bucket;
@@ -704,7 +707,8 @@ static void bcache_device_detach(struct bcache_device *d)
atomic_set(&d->detaching, 0);
}
- bcache_device_unlink(d);
+ if (!d->flush_done)
+ bcache_device_unlink(d);
d->c->devices[d->id] = NULL;
closure_put(&d->c->caching);
@@ -743,13 +747,35 @@ static void bcache_device_free(struct bcache_device *d)
mempool_destroy(d->unaligned_bvec);
if (d->bio_split)
bioset_free(d->bio_split);
+ if (is_vmalloc_addr(d->stripe_sectors_dirty))
+ vfree(d->stripe_sectors_dirty);
+ else
+ kfree(d->stripe_sectors_dirty);
closure_debug_destroy(&d->cl);
}
-static int bcache_device_init(struct bcache_device *d, unsigned block_size)
+static int bcache_device_init(struct bcache_device *d, unsigned block_size,
+ sector_t sectors)
{
struct request_queue *q;
+ size_t n;
+
+ if (!d->stripe_size_bits)
+ d->stripe_size_bits = 31;
+
+ d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >>
+ d->stripe_size_bits;
+
+ if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t))
+ return -ENOMEM;
+
+ n = d->nr_stripes * sizeof(atomic_t);
+ d->stripe_sectors_dirty = n < PAGE_SIZE << 6
+ ? kzalloc(n, GFP_KERNEL)
+ : vzalloc(n);
+ if (!d->stripe_sectors_dirty)
+ return -ENOMEM;
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
!(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
@@ -759,6 +785,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
!(q = blk_alloc_queue(GFP_KERNEL)))
return -ENOMEM;
+ set_capacity(d->disk, sectors);
snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor);
d->disk->major = bcache_major;
@@ -781,6 +808,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size)
set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
+ blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
+
return 0;
}
@@ -800,6 +829,17 @@ static void calc_cached_dev_sectors(struct cache_set *c)
void bch_cached_dev_run(struct cached_dev *dc)
{
struct bcache_device *d = &dc->disk;
+ char buf[SB_LABEL_SIZE + 1];
+ char *env[] = {
+ "DRIVER=bcache",
+ kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
+ NULL,
+ NULL,
+ };
+
+ memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
+ buf[SB_LABEL_SIZE] = '\0';
+ env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
if (atomic_xchg(&dc->running, 1))
return;
@@ -816,10 +856,12 @@ void bch_cached_dev_run(struct cached_dev *dc)
add_disk(d->disk);
bd_link_disk_holder(dc->bdev, dc->disk.disk);
-#if 0
- char *env[] = { "SYMLINK=label" , NULL };
+ /* won't show up in the uevent file, use udevadm monitor -e instead
+ * only class / kset properties are persistent */
kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
-#endif
+ kfree(env[1]);
+ kfree(env[2]);
+
if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
pr_debug("error creating sysfs link");
@@ -960,6 +1002,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
atomic_set(&dc->count, 1);
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+ bch_sectors_dirty_init(dc);
atomic_set(&dc->has_dirty, 1);
atomic_inc(&dc->count);
bch_writeback_queue(dc);
@@ -1014,6 +1057,14 @@ static void cached_dev_flush(struct closure *cl)
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
struct bcache_device *d = &dc->disk;
+ mutex_lock(&bch_register_lock);
+ d->flush_done = 1;
+
+ if (d->c)
+ bcache_device_unlink(d);
+
+ mutex_unlock(&bch_register_lock);
+
bch_cache_accounting_destroy(&dc->accounting);
kobject_del(&d->kobj);
@@ -1045,7 +1096,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
}
- ret = bcache_device_init(&dc->disk, block_size);
+ ret = bcache_device_init(&dc->disk, block_size,
+ dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
if (ret)
return ret;
@@ -1144,11 +1196,10 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
kobject_init(&d->kobj, &bch_flash_dev_ktype);
- if (bcache_device_init(d, block_bytes(c)))
+ if (bcache_device_init(d, block_bytes(c), u->sectors))
goto err;
bcache_device_attach(d, c, u - c->uuids);
- set_capacity(d->disk, u->sectors);
bch_flash_dev_request_init(d);
add_disk(d->disk);
@@ -1255,9 +1306,10 @@ static void cache_set_free(struct closure *cl)
free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
- kfree(c->fill_iter);
if (c->bio_split)
bioset_free(c->bio_split);
+ if (c->fill_iter)
+ mempool_destroy(c->fill_iter);
if (c->bio_meta)
mempool_destroy(c->bio_meta);
if (c->search)
@@ -1278,11 +1330,9 @@ static void cache_set_free(struct closure *cl)
static void cache_set_flush(struct closure *cl)
{
struct cache_set *c = container_of(cl, struct cache_set, caching);
+ struct cache *ca;
struct btree *b;
-
- /* Shut down allocator threads */
- set_bit(CACHE_SET_STOPPING_2, &c->flags);
- wake_up(&c->alloc_wait);
+ unsigned i;
bch_cache_accounting_destroy(&c->accounting);
@@ -1295,7 +1345,11 @@ static void cache_set_flush(struct closure *cl)
/* Should skip this if we're unregistering because of an error */
list_for_each_entry(b, &c->btree_cache, list)
if (btree_node_dirty(b))
- bch_btree_write(b, true, NULL);
+ bch_btree_node_write(b, NULL);
+
+ for_each_cache(ca, c, i)
+ if (ca->alloc_thread)
+ kthread_stop(ca->alloc_thread);
closure_return(cl);
}
@@ -1303,18 +1357,22 @@ static void cache_set_flush(struct closure *cl)
static void __cache_set_unregister(struct closure *cl)
{
struct cache_set *c = container_of(cl, struct cache_set, caching);
- struct cached_dev *dc, *t;
+ struct cached_dev *dc;
size_t i;
mutex_lock(&bch_register_lock);
- if (test_bit(CACHE_SET_UNREGISTERING, &c->flags))
- list_for_each_entry_safe(dc, t, &c->cached_devs, list)
- bch_cached_dev_detach(dc);
-
for (i = 0; i < c->nr_uuids; i++)
- if (c->devices[i] && UUID_FLASH_ONLY(&c->uuids[i]))
- bcache_device_stop(c->devices[i]);
+ if (c->devices[i]) {
+ if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
+ test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
+ dc = container_of(c->devices[i],
+ struct cached_dev, disk);
+ bch_cached_dev_detach(dc);
+ } else {
+ bcache_device_stop(c->devices[i]);
+ }
+ }
mutex_unlock(&bch_register_lock);
@@ -1373,9 +1431,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
c->btree_pages = max_t(int, c->btree_pages / 4,
BTREE_MAX_PAGES);
- init_waitqueue_head(&c->alloc_wait);
+ c->sort_crit_factor = int_sqrt(c->btree_pages);
+
mutex_init(&c->bucket_lock);
- mutex_init(&c->fill_lock);
mutex_init(&c->sort_lock);
spin_lock_init(&c->sort_time_lock);
closure_init_unlocked(&c->sb_write);
@@ -1401,8 +1459,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
!(c->bio_meta = mempool_create_kmalloc_pool(2,
sizeof(struct bbio) + sizeof(struct bio_vec) *
bucket_pages(c))) ||
+ !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
!(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) ||
!(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
!(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
bch_journal_alloc(c) ||
@@ -1410,8 +1468,6 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
bch_open_buckets_alloc(c))
goto err;
- c->fill_iter->size = sb->bucket_size / sb->block_size;
-
c->congested_read_threshold_us = 2000;
c->congested_write_threshold_us = 20000;
c->error_limit = 8 << IO_ERROR_SHIFT;
@@ -1496,9 +1552,10 @@ static void run_cache_set(struct cache_set *c)
*/
bch_journal_next(&c->journal);
+ err = "error starting allocator thread";
for_each_cache(ca, c, i)
- closure_call(&ca->alloc, bch_allocator_thread,
- system_wq, &c->cl);
+ if (bch_cache_allocator_start(ca))
+ goto err;
/*
* First place it's safe to allocate: btree_check() and
@@ -1531,17 +1588,16 @@ static void run_cache_set(struct cache_set *c)
bch_btree_gc_finish(c);
+ err = "error starting allocator thread";
for_each_cache(ca, c, i)
- closure_call(&ca->alloc, bch_allocator_thread,
- ca->alloc_workqueue, &c->cl);
+ if (bch_cache_allocator_start(ca))
+ goto err;
mutex_lock(&c->bucket_lock);
for_each_cache(ca, c, i)
bch_prio_write(ca);
mutex_unlock(&c->bucket_lock);
- wake_up(&c->alloc_wait);
-
err = "cannot allocate new UUID bucket";
if (__uuid_write(c))
goto err_unlock_gc;
@@ -1552,7 +1608,7 @@ static void run_cache_set(struct cache_set *c)
goto err_unlock_gc;
bkey_copy_key(&c->root->key, &MAX_KEY);
- bch_btree_write(c->root, true, &op);
+ bch_btree_node_write(c->root, &op.cl);
bch_btree_set_root(c->root);
rw_unlock(true, c->root);
@@ -1673,9 +1729,6 @@ void bch_cache_release(struct kobject *kobj)
bio_split_pool_free(&ca->bio_split_hook);
- if (ca->alloc_workqueue)
- destroy_workqueue(ca->alloc_workqueue);
-
free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
kfree(ca->prio_buckets);
vfree(ca->buckets);
@@ -1723,7 +1776,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
!(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
2, GFP_KERNEL)) ||
!(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
- !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) ||
bio_split_pool_init(&ca->bio_split_hook))
return -ENOMEM;
@@ -1786,6 +1838,36 @@ static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
kobj_attribute_write(register, register_bcache);
kobj_attribute_write(register_quiet, register_bcache);
+static bool bch_is_open_backing(struct block_device *bdev) {
+ struct cache_set *c, *tc;
+ struct cached_dev *dc, *t;
+
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+ list_for_each_entry_safe(dc, t, &c->cached_devs, list)
+ if (dc->bdev == bdev)
+ return true;
+ list_for_each_entry_safe(dc, t, &uncached_devices, list)
+ if (dc->bdev == bdev)
+ return true;
+ return false;
+}
+
+static bool bch_is_open_cache(struct block_device *bdev) {
+ struct cache_set *c, *tc;
+ struct cache *ca;
+ unsigned i;
+
+ list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
+ for_each_cache(ca, c, i)
+ if (ca->bdev == bdev)
+ return true;
+ return false;
+}
+
+static bool bch_is_open(struct block_device *bdev) {
+ return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
+}
+
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
const char *buffer, size_t size)
{
@@ -1810,8 +1892,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
FMODE_READ|FMODE_WRITE|FMODE_EXCL,
sb);
if (IS_ERR(bdev)) {
- if (bdev == ERR_PTR(-EBUSY))
- err = "device busy";
+ if (bdev == ERR_PTR(-EBUSY)) {
+ bdev = lookup_bdev(strim(path));
+ if (!IS_ERR(bdev) && bch_is_open(bdev))
+ err = "device already registered";
+ else
+ err = "device busy";
+ }
goto err;
}
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 4d9cca47e4c6..12a2c2846f99 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -9,7 +9,9 @@
#include "sysfs.h"
#include "btree.h"
#include "request.h"
+#include "writeback.h"
+#include <linux/blkdev.h>
#include <linux/sort.h>
static const char * const cache_replacement_policies[] = {
@@ -79,6 +81,9 @@ rw_attribute(writeback_rate_p_term_inverse);
rw_attribute(writeback_rate_d_smooth);
read_attribute(writeback_rate_debug);
+read_attribute(stripe_size);
+read_attribute(partial_stripes_expensive);
+
rw_attribute(synchronous);
rw_attribute(journal_delay_ms);
rw_attribute(discard);
@@ -127,7 +132,7 @@ SHOW(__bch_cached_dev)
char derivative[20];
char target[20];
bch_hprint(dirty,
- atomic_long_read(&dc->disk.sectors_dirty) << 9);
+ bcache_dev_sectors_dirty(&dc->disk) << 9);
bch_hprint(derivative, dc->writeback_rate_derivative << 9);
bch_hprint(target, dc->writeback_rate_target << 9);
@@ -143,7 +148,10 @@ SHOW(__bch_cached_dev)
}
sysfs_hprint(dirty_data,
- atomic_long_read(&dc->disk.sectors_dirty) << 9);
+ bcache_dev_sectors_dirty(&dc->disk) << 9);
+
+ sysfs_hprint(stripe_size, (1 << dc->disk.stripe_size_bits) << 9);
+ var_printf(partial_stripes_expensive, "%u");
var_printf(sequential_merge, "%i");
var_hprint(sequential_cutoff);
@@ -170,6 +178,7 @@ STORE(__cached_dev)
disk.kobj);
unsigned v = size;
struct cache_set *c;
+ struct kobj_uevent_env *env;
#define d_strtoul(var) sysfs_strtoul(var, dc->var)
#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
@@ -214,6 +223,7 @@ STORE(__cached_dev)
}
if (attr == &sysfs_label) {
+ /* note: endlines are preserved */
memcpy(dc->sb.label, buf, SB_LABEL_SIZE);
bch_write_bdev_super(dc, NULL);
if (dc->disk.c) {
@@ -221,6 +231,15 @@ STORE(__cached_dev)
buf, SB_LABEL_SIZE);
bch_uuid_write(dc->disk.c);
}
+ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
+ if (!env)
+ return -ENOMEM;
+ add_uevent_var(env, "DRIVER=bcache");
+ add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
+ add_uevent_var(env, "CACHED_LABEL=%s", buf);
+ kobject_uevent_env(
+ &disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp);
+ kfree(env);
}
if (attr == &sysfs_attach) {
@@ -284,6 +303,8 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_rate_d_smooth,
&sysfs_writeback_rate_debug,
&sysfs_dirty_data,
+ &sysfs_stripe_size,
+ &sysfs_partial_stripes_expensive,
&sysfs_sequential_cutoff,
&sysfs_sequential_merge,
&sysfs_clear_stats,
@@ -665,12 +686,10 @@ SHOW(__bch_cache)
int cmp(const void *l, const void *r)
{ return *((uint16_t *) r) - *((uint16_t *) l); }
- /* Number of quantiles we compute */
- const unsigned nq = 31;
-
size_t n = ca->sb.nbuckets, i, unused, btree;
uint64_t sum = 0;
- uint16_t q[nq], *p, *cached;
+ /* Compute 31 quantiles */
+ uint16_t q[31], *p, *cached;
ssize_t ret;
cached = p = vmalloc(ca->sb.nbuckets * sizeof(uint16_t));
@@ -703,26 +722,29 @@ SHOW(__bch_cache)
if (n)
do_div(sum, n);
- for (i = 0; i < nq; i++)
- q[i] = INITIAL_PRIO - cached[n * (i + 1) / (nq + 1)];
+ for (i = 0; i < ARRAY_SIZE(q); i++)
+ q[i] = INITIAL_PRIO - cached[n * (i + 1) /
+ (ARRAY_SIZE(q) + 1)];
vfree(p);
- ret = snprintf(buf, PAGE_SIZE,
- "Unused: %zu%%\n"
- "Metadata: %zu%%\n"
- "Average: %llu\n"
- "Sectors per Q: %zu\n"
- "Quantiles: [",
- unused * 100 / (size_t) ca->sb.nbuckets,
- btree * 100 / (size_t) ca->sb.nbuckets, sum,
- n * ca->sb.bucket_size / (nq + 1));
-
- for (i = 0; i < nq && ret < (ssize_t) PAGE_SIZE; i++)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
- i < nq - 1 ? "%u " : "%u]\n", q[i]);
-
- buf[PAGE_SIZE - 1] = '\0';
+ ret = scnprintf(buf, PAGE_SIZE,
+ "Unused: %zu%%\n"
+ "Metadata: %zu%%\n"
+ "Average: %llu\n"
+ "Sectors per Q: %zu\n"
+ "Quantiles: [",
+ unused * 100 / (size_t) ca->sb.nbuckets,
+ btree * 100 / (size_t) ca->sb.nbuckets, sum,
+ n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
+
+ for (i = 0; i < ARRAY_SIZE(q); i++)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+ "%u ", q[i]);
+ ret--;
+
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
+
return ret;
}
diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c
index 983f9bb411bc..f7b6c197f90f 100644
--- a/drivers/md/bcache/trace.c
+++ b/drivers/md/bcache/trace.c
@@ -2,6 +2,7 @@
#include "btree.h"
#include "request.h"
+#include <linux/blktrace_api.h>
#include <linux/module.h>
#define CREATE_TRACE_POINTS
@@ -9,18 +10,44 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_request_end);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_passthrough);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_hit);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_miss);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_bypass_sequential);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_bypass_congested);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_retry);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writethrough);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write_skip);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_insert);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_replay_key);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_write);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_full);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_entry_full);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_cache_cannibalize);
+
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_read);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_write);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_write_dirty);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_read_dirty);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_journal_write);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_cache_insert);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_alloc);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_alloc_fail);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_free);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_gc_coalesce);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_end);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_copy);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_gc_copy_collision);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_insert_key);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_split);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_compact);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_set_root);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_invalidate);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_fail);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback_collision);
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index da3a99e85b1e..98eb81159a22 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -228,23 +228,6 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
}
}
-int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp)
-{
- int i;
- struct bio_vec *bv;
-
- bio_for_each_segment(bv, bio, i) {
- bv->bv_page = alloc_page(gfp);
- if (!bv->bv_page) {
- while (bv-- != bio->bi_io_vec + bio->bi_idx)
- __free_page(bv->bv_page);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
/*
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group (Any
* use permitted, subject to terms of PostgreSQL license; see.)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 577393e38c3a..1ae2a73ad85f 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -15,8 +15,6 @@
struct closure;
-#include <trace/events/bcache.h>
-
#ifdef CONFIG_BCACHE_EDEBUG
#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
@@ -566,12 +564,8 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
return x;
}
-#define bio_end(bio) ((bio)->bi_sector + bio_sectors(bio))
-
void bch_bio_map(struct bio *bio, void *base);
-int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp);
-
static inline sector_t bdev_sectors(struct block_device *bdev)
{
return bdev->bd_inode->i_size >> 9;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 2714ed3991d1..22cbff551628 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -9,6 +9,9 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
+#include "writeback.h"
+
+#include <trace/events/bcache.h>
static struct workqueue_struct *dirty_wq;
@@ -36,7 +39,7 @@ static void __update_writeback_rate(struct cached_dev *dc)
int change = 0;
int64_t error;
- int64_t dirty = atomic_long_read(&dc->disk.sectors_dirty);
+ int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
int64_t derivative = dirty - dc->disk.sectors_dirty_last;
dc->disk.sectors_dirty_last = dirty;
@@ -105,6 +108,31 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
return KEY_DIRTY(k);
}
+static bool dirty_full_stripe_pred(struct keybuf *buf, struct bkey *k)
+{
+ uint64_t stripe;
+ unsigned nr_sectors = KEY_SIZE(k);
+ struct cached_dev *dc = container_of(buf, struct cached_dev,
+ writeback_keys);
+ unsigned stripe_size = 1 << dc->disk.stripe_size_bits;
+
+ if (!KEY_DIRTY(k))
+ return false;
+
+ stripe = KEY_START(k) >> dc->disk.stripe_size_bits;
+ while (1) {
+ if (atomic_read(dc->disk.stripe_sectors_dirty + stripe) !=
+ stripe_size)
+ return false;
+
+ if (nr_sectors <= stripe_size)
+ return true;
+
+ nr_sectors -= stripe_size;
+ stripe++;
+ }
+}
+
static void dirty_init(struct keybuf_key *w)
{
struct dirty_io *io = w->private;
@@ -149,7 +177,22 @@ static void refill_dirty(struct closure *cl)
searched_from_start = true;
}
- bch_refill_keybuf(dc->disk.c, buf, &end);
+ if (dc->partial_stripes_expensive) {
+ uint64_t i;
+
+ for (i = 0; i < dc->disk.nr_stripes; i++)
+ if (atomic_read(dc->disk.stripe_sectors_dirty + i) ==
+ 1 << dc->disk.stripe_size_bits)
+ goto full_stripes;
+
+ goto normal_refill;
+full_stripes:
+ bch_refill_keybuf(dc->disk.c, buf, &end,
+ dirty_full_stripe_pred);
+ } else {
+normal_refill:
+ bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
+ }
if (bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start) {
/* Searched the entire btree - delay awhile */
@@ -181,10 +224,8 @@ void bch_writeback_queue(struct cached_dev *dc)
}
}
-void bch_writeback_add(struct cached_dev *dc, unsigned sectors)
+void bch_writeback_add(struct cached_dev *dc)
{
- atomic_long_add(sectors, &dc->disk.sectors_dirty);
-
if (!atomic_read(&dc->has_dirty) &&
!atomic_xchg(&dc->has_dirty, 1)) {
atomic_inc(&dc->count);
@@ -203,6 +244,34 @@ void bch_writeback_add(struct cached_dev *dc, unsigned sectors)
}
}
+void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
+ uint64_t offset, int nr_sectors)
+{
+ struct bcache_device *d = c->devices[inode];
+ unsigned stripe_size, stripe_offset;
+ uint64_t stripe;
+
+ if (!d)
+ return;
+
+ stripe_size = 1 << d->stripe_size_bits;
+ stripe = offset >> d->stripe_size_bits;
+ stripe_offset = offset & (stripe_size - 1);
+
+ while (nr_sectors) {
+ int s = min_t(unsigned, abs(nr_sectors),
+ stripe_size - stripe_offset);
+
+ if (nr_sectors < 0)
+ s = -s;
+
+ atomic_add(s, d->stripe_sectors_dirty + stripe);
+ nr_sectors -= s;
+ stripe_offset = 0;
+ stripe++;
+ }
+}
+
/* Background writeback - IO loop */
static void dirty_io_destructor(struct closure *cl)
@@ -216,9 +285,10 @@ static void write_dirty_finish(struct closure *cl)
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
struct cached_dev *dc = io->dc;
- struct bio_vec *bv = bio_iovec_idx(&io->bio, io->bio.bi_vcnt);
+ struct bio_vec *bv;
+ int i;
- while (bv-- != io->bio.bi_io_vec)
+ bio_for_each_segment_all(bv, &io->bio, i)
__free_page(bv->bv_page);
/* This is kind of a dumb way of signalling errors. */
@@ -236,10 +306,12 @@ static void write_dirty_finish(struct closure *cl)
for (i = 0; i < KEY_PTRS(&w->key); i++)
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
- pr_debug("clearing %s", pkey(&w->key));
bch_btree_insert(&op, dc->disk.c);
closure_sync(&op.cl);
+ if (op.insert_collision)
+ trace_bcache_writeback_collision(&w->key);
+
atomic_long_inc(op.insert_collision
? &dc->disk.c->writeback_keys_failed
: &dc->disk.c->writeback_keys_done);
@@ -275,7 +347,6 @@ static void write_dirty(struct closure *cl)
io->bio.bi_bdev = io->dc->bdev;
io->bio.bi_end_io = dirty_endio;
- trace_bcache_write_dirty(&io->bio);
closure_bio_submit(&io->bio, cl, &io->dc->disk);
continue_at(cl, write_dirty_finish, dirty_wq);
@@ -296,7 +367,6 @@ static void read_dirty_submit(struct closure *cl)
{
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
- trace_bcache_read_dirty(&io->bio);
closure_bio_submit(&io->bio, cl, &io->dc->disk);
continue_at(cl, write_dirty, dirty_wq);
@@ -349,10 +419,10 @@ static void read_dirty(struct closure *cl)
io->bio.bi_rw = READ;
io->bio.bi_end_io = read_dirty_endio;
- if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
+ if (bio_alloc_pages(&io->bio, GFP_KERNEL))
goto err_free;
- pr_debug("%s", pkey(&w->key));
+ trace_bcache_writeback(&w->key);
closure_call(&io->cl, read_dirty_submit, NULL, &dc->disk.cl);
@@ -375,12 +445,49 @@ err:
refill_dirty(cl);
}
+/* Init */
+
+static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
+ struct cached_dev *dc)
+{
+ struct bkey *k;
+ struct btree_iter iter;
+
+ bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
+ while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
+ if (!b->level) {
+ if (KEY_INODE(k) > dc->disk.id)
+ break;
+
+ if (KEY_DIRTY(k))
+ bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
+ KEY_START(k),
+ KEY_SIZE(k));
+ } else {
+ btree(sectors_dirty_init, k, b, op, dc);
+ if (KEY_INODE(k) > dc->disk.id)
+ break;
+
+ cond_resched();
+ }
+
+ return 0;
+}
+
+void bch_sectors_dirty_init(struct cached_dev *dc)
+{
+ struct btree_op op;
+
+ bch_btree_op_init_stack(&op);
+ btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
+}
+
void bch_cached_dev_writeback_init(struct cached_dev *dc)
{
closure_init_unlocked(&dc->writeback);
init_rwsem(&dc->writeback_lock);
- bch_keybuf_init(&dc->writeback_keys, dirty_pred);
+ bch_keybuf_init(&dc->writeback_keys);
dc->writeback_metadata = true;
dc->writeback_running = true;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
new file mode 100644
index 000000000000..c91f61bb95b6
--- /dev/null
+++ b/drivers/md/bcache/writeback.h
@@ -0,0 +1,64 @@
+#ifndef _BCACHE_WRITEBACK_H
+#define _BCACHE_WRITEBACK_H
+
+#define CUTOFF_WRITEBACK 40
+#define CUTOFF_WRITEBACK_SYNC 70
+
+static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
+{
+ uint64_t i, ret = 0;
+
+ for (i = 0; i < d->nr_stripes; i++)
+ ret += atomic_read(d->stripe_sectors_dirty + i);
+
+ return ret;
+}
+
+static inline bool bcache_dev_stripe_dirty(struct bcache_device *d,
+ uint64_t offset,
+ unsigned nr_sectors)
+{
+ uint64_t stripe = offset >> d->stripe_size_bits;
+
+ while (1) {
+ if (atomic_read(d->stripe_sectors_dirty + stripe))
+ return true;
+
+ if (nr_sectors <= 1 << d->stripe_size_bits)
+ return false;
+
+ nr_sectors -= 1 << d->stripe_size_bits;
+ stripe++;
+ }
+}
+
+static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
+ unsigned cache_mode, bool would_skip)
+{
+ unsigned in_use = dc->disk.c->gc_stats.in_use;
+
+ if (cache_mode != CACHE_MODE_WRITEBACK ||
+ atomic_read(&dc->disk.detaching) ||
+ in_use > CUTOFF_WRITEBACK_SYNC)
+ return false;
+
+ if (dc->partial_stripes_expensive &&
+ bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector,
+ bio_sectors(bio)))
+ return true;
+
+ if (would_skip)
+ return false;
+
+ return bio->bi_rw & REQ_SYNC ||
+ in_use <= CUTOFF_WRITEBACK;
+}
+
+void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
+void bch_writeback_queue(struct cached_dev *);
+void bch_writeback_add(struct cached_dev *);
+
+void bch_sectors_dirty_init(struct cached_dev *dc);
+void bch_cached_dev_writeback_init(struct cached_dev *);
+
+#endif
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 957a719e8c2f..df7b0a06b0ea 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2290,12 +2290,18 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
d = r10_bio->devs[1].devnum;
wbio = r10_bio->devs[1].bio;
wbio2 = r10_bio->devs[1].repl_bio;
+ /* Need to test wbio2->bi_end_io before we call
+ * generic_make_request as if the former is NULL,
+ * the latter is free to free wbio2.
+ */
+ if (wbio2 && !wbio2->bi_end_io)
+ wbio2 = NULL;
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
generic_make_request(wbio);
}
- if (wbio2 && wbio2->bi_end_io) {
+ if (wbio2) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending);
md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(wbio2));
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2bf094a587cb..78ea44336e75 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3462,6 +3462,7 @@ static void handle_stripe(struct stripe_head *sh)
test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
set_bit(STRIPE_SYNCING, &sh->state);
clear_bit(STRIPE_INSYNC, &sh->state);
+ clear_bit(STRIPE_REPLACED, &sh->state);
}
spin_unlock(&sh->stripe_lock);
}
@@ -3607,19 +3608,23 @@ static void handle_stripe(struct stripe_head *sh)
handle_parity_checks5(conf, sh, &s, disks);
}
- if (s.replacing && s.locked == 0
- && !test_bit(STRIPE_INSYNC, &sh->state)) {
+ if ((s.replacing || s.syncing) && s.locked == 0
+ && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
+ && !test_bit(STRIPE_REPLACED, &sh->state)) {
/* Write out to replacement devices where possible */
for (i = 0; i < conf->raid_disks; i++)
- if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
- test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
+ if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
+ WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
set_bit(R5_WantReplace, &sh->dev[i].flags);
set_bit(R5_LOCKED, &sh->dev[i].flags);
s.locked++;
}
- set_bit(STRIPE_INSYNC, &sh->state);
+ if (s.replacing)
+ set_bit(STRIPE_INSYNC, &sh->state);
+ set_bit(STRIPE_REPLACED, &sh->state);
}
if ((s.syncing || s.replacing) && s.locked == 0 &&
+ !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
clear_bit(STRIPE_SYNCING, &sh->state);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index b0b663b119a8..70c49329ca9a 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -306,6 +306,7 @@ enum {
STRIPE_SYNC_REQUESTED,
STRIPE_SYNCING,
STRIPE_INSYNC,
+ STRIPE_REPLACED,
STRIPE_PREREAD_ACTIVE,
STRIPE_DELAYED,
STRIPE_DEGRADED,
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 551ddde63c57..5be808406edc 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -65,14 +65,19 @@ EXPORT_SYMBOL(ssc_request);
void ssc_free(struct ssc_device *ssc)
{
+ bool disable_clk = true;
+
spin_lock(&user_lock);
- if (ssc->user) {
+ if (ssc->user)
ssc->user--;
- clk_disable_unprepare(ssc->clk);
- } else {
+ else {
+ disable_clk = false;
dev_dbg(&ssc->pdev->dev, "device already free\n");
}
spin_unlock(&user_lock);
+
+ if (disable_clk)
+ clk_disable_unprepare(ssc->clk);
}
EXPORT_SYMBOL(ssc_free);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index f9296abcf02a..6127ab64bb39 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -167,7 +167,7 @@ int mei_hbm_start_req(struct mei_device *dev)
dev->hbm_state = MEI_HBM_IDLE;
if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
- dev_err(&dev->pdev->dev, "version message writet failed\n");
+ dev_err(&dev->pdev->dev, "version message write failed\n");
dev->dev_state = MEI_DEV_RESETTING;
mei_reset(dev, 1);
return -ENODEV;
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index e4f8dec4dc3c..b22c7e247225 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -239,14 +239,18 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
if (mei_me_hw_is_ready(dev))
return 0;
+ dev->recvd_hw_ready = false;
mutex_unlock(&dev->device_lock);
err = wait_event_interruptible_timeout(dev->wait_hw_ready,
- dev->recvd_hw_ready, MEI_INTEROP_TIMEOUT);
+ dev->recvd_hw_ready,
+ mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
mutex_lock(&dev->device_lock);
if (!err && !dev->recvd_hw_ready) {
+ if (!err)
+ err = -ETIMEDOUT;
dev_err(&dev->pdev->dev,
- "wait hw ready failed. status = 0x%x\n", err);
- return -ETIMEDOUT;
+ "wait hw ready failed. status = %d\n", err);
+ return err;
}
dev->recvd_hw_ready = false;
@@ -483,7 +487,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if ME wants a reset */
if (!mei_hw_is_ready(dev) &&
dev->dev_state != MEI_DEV_RESETTING &&
- dev->dev_state != MEI_DEV_INITIALIZING) {
+ dev->dev_state != MEI_DEV_INITIALIZING &&
+ dev->dev_state != MEI_DEV_POWER_DOWN &&
+ dev->dev_state != MEI_DEV_POWER_UP) {
dev_dbg(&dev->pdev->dev, "FW not ready.\n");
mei_reset(dev, 1);
mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 557eed2a1595..92c73118b13c 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -148,13 +148,12 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
dev->hbm_state = MEI_HBM_IDLE;
- if (dev->dev_state != MEI_DEV_INITIALIZING) {
-
+ if (dev->dev_state != MEI_DEV_INITIALIZING &&
+ dev->dev_state != MEI_DEV_POWER_UP) {
if (dev->dev_state != MEI_DEV_DISABLED &&
dev->dev_state != MEI_DEV_POWER_DOWN)
dev->dev_state = MEI_DEV_RESETTING;
-
/* remove all waiting requests */
mei_cl_all_write_clear(dev);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 847b1996ce8e..2c5a91bb8ec3 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -128,7 +128,7 @@ static inline int pxamci_set_power(struct pxamci_host *host,
!!on ^ host->pdata->gpio_power_invert);
}
if (!host->vcc && host->pdata && host->pdata->setpower)
- host->pdata->setpower(mmc_dev(host->mmc), vdd);
+ return host->pdata->setpower(mmc_dev(host->mmc), vdd);
return 0;
}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index a3c1c5aae6a9..1264923ade0f 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -345,6 +345,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
if (r && irq) {
const char *name = NULL;
+ memset(r, 0, sizeof(*r));
/*
* Get optional "interrupts-names" property to add a name
* to the resource.
@@ -482,8 +483,9 @@ void __init of_irq_init(const struct of_device_id *matches)
}
/* Get the next pending parent that might have children */
- desc = list_first_entry(&intc_parent_list, typeof(*desc), list);
- if (list_empty(&intc_parent_list) || !desc) {
+ desc = list_first_entry_or_null(&intc_parent_list,
+ typeof(*desc), list);
+ if (!desc) {
pr_err("of_irq_init: children remain, but no parents\n");
break;
}
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index b29e20b7862f..bb7af78e4eed 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -388,7 +388,6 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
/* Remove the EADS bridge device itself */
BUG_ON(!bus->self);
pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self));
- eeh_remove_bus_device(bus->self, true);
pci_stop_and_remove_bus_device(bus->self);
return 0;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5b272bfd261d..2a00239661b3 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1193,6 +1193,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
list_for_each_entry(maps_node, &pinctrl_maps, node) {
if (maps_node->maps == map) {
list_del(&maps_node->node);
+ kfree(maps_node);
mutex_unlock(&pinctrl_maps_mutex);
return;
}
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 6866548fab31..7323cca440b5 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1483,6 +1483,7 @@ static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs)
return ret;
}
+#ifdef CONFIG_PM
static int pinctrl_single_suspend(struct platform_device *pdev,
pm_message_t state)
{
@@ -1505,6 +1506,7 @@ static int pinctrl_single_resume(struct platform_device *pdev)
return pinctrl_force_default(pcs->pctl);
}
+#endif
static int pcs_probe(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index 7956df58d751..31f7d0e04aaa 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -3785,6 +3785,7 @@ static const struct regulator_desc sh73a0_vccq_mc0_desc = {
static struct regulator_consumer_supply sh73a0_vccq_mc0_consumers[] = {
REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "ee100000.sdhi"),
};
static const struct regulator_init_data sh73a0_vccq_mc0_init_data = {
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c
index 1fa39a444171..867c9681763c 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas6.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas6.c
@@ -496,7 +496,7 @@ static const unsigned sdmmc5_pins[] = { 24, 25, 26 };
static const struct sirfsoc_muxmask usp0_muxmask[] = {
{
.group = 1,
- .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22),
+ .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
},
};
@@ -507,8 +507,21 @@ static const struct sirfsoc_padmux usp0_padmux = {
.funcval = 0,
};
-static const unsigned usp0_pins[] = { 51, 52, 53, 54 };
+static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
+static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(20) | BIT(21),
+ },
+};
+
+static const struct sirfsoc_padmux usp0_uart_nostreamctrl_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp0_uart_nostreamctrl_muxmask),
+ .muxmask = usp0_uart_nostreamctrl_muxmask,
+};
+
+static const unsigned usp0_uart_nostreamctrl_pins[] = { 52, 53 };
static const struct sirfsoc_muxmask usp1_muxmask[] = {
{
.group = 0,
@@ -822,6 +835,8 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
SIRFSOC_PIN_GROUP("uart2grp", uart2_pins),
SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins),
SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
+ SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
+ usp0_uart_nostreamctrl_pins),
SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins),
SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins),
@@ -862,6 +877,8 @@ static const char * const uart0grp[] = { "uart0grp" };
static const char * const uart1grp[] = { "uart1grp" };
static const char * const uart2grp[] = { "uart2grp" };
static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
+static const char * const usp0_uart_nostreamctrl_grp[] = {
+ "usp0_uart_nostreamctrl_grp" };
static const char * const usp0grp[] = { "usp0grp" };
static const char * const usp1grp[] = { "usp1grp" };
static const char * const i2c0grp[] = { "i2c0grp" };
@@ -904,6 +921,9 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
+ SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
+ usp0_uart_nostreamctrl_grp,
+ usp0_uart_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux),
SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux),
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 7b082157eb79..99d2930b18c8 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -185,7 +185,7 @@ static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
cmd_iu->_r_c = 0;
sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd,
- task->ssp_task.cmd->cmd_len / sizeof(u32));
+ (task->ssp_task.cmd->cmd_len+3) / sizeof(u32));
}
static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 9bb020ac089c..0d30ca849e8f 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -491,6 +491,7 @@ int isci_task_abort_task(struct sas_task *task)
struct isci_tmf tmf;
int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags;
+ int target_done_already = 0;
/* Get the isci_request reference from the task. Note that
* this check does not depend on the pending request list
@@ -505,9 +506,11 @@ int isci_task_abort_task(struct sas_task *task)
/* If task is already done, the request isn't valid */
if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
(task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
- old_request)
+ old_request) {
idev = isci_get_device(task->dev->lldd_dev);
-
+ target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
+ &old_request->flags);
+ }
spin_unlock(&task->task_state_lock);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
@@ -561,7 +564,7 @@ int isci_task_abort_task(struct sas_task *task)
if (task->task_proto == SAS_PROTOCOL_SMP ||
sas_protocol_ata(task->task_proto) ||
- test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) ||
+ target_done_already ||
test_bit(IDEV_GONE, &idev->flags)) {
spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index f14665a6293d..6b1b4e91e53f 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1857,11 +1857,16 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
goto out;
}
- /* error info record present */
- if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
+ /*
+ * error info record present; slot->response is 32 bit aligned but may
+ * not be 64 bit aligned, so check for zero in two 32 bit reads
+ */
+ if (unlikely((rx_desc & RXQ_ERR)
+ && (*((u32 *)slot->response)
+ || *(((u32 *)slot->response) + 1)))) {
mv_dprintk("port %d slot %d rx_desc %X has error info"
"%016llX.\n", slot->port->sas_port.id, slot_idx,
- rx_desc, (u64)(*(u64 *)slot->response));
+ rx_desc, get_unaligned_le64(slot->response));
tstat->stat = mvs_slot_err(mvi, task, slot_idx);
tstat->resp = SAS_TASK_COMPLETE;
goto out;
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 60e2fb7f2dca..d6b19dc80bee 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -39,6 +39,7 @@
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <asm/unaligned.h>
#include <scsi/libsas.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 42ef481db942..ef0a5481b9dd 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -419,6 +419,8 @@ qla2x00_start_scsi(srb_t *sp)
__constant_cpu_to_le16(CF_SIMPLE_TAG);
break;
}
+ } else {
+ cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
}
/* Load SCSI command packet. */
@@ -1307,11 +1309,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
fcp_cmnd->task_attribute = TSK_ORDERED;
break;
default:
- fcp_cmnd->task_attribute = 0;
+ fcp_cmnd->task_attribute = TSK_SIMPLE;
break;
}
} else {
- fcp_cmnd->task_attribute = 0;
+ fcp_cmnd->task_attribute = TSK_SIMPLE;
}
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
@@ -1525,7 +1527,12 @@ qla24xx_start_scsi(srb_t *sp)
case ORDERED_QUEUE_TAG:
cmd_pkt->task = TSK_ORDERED;
break;
+ default:
+ cmd_pkt->task = TSK_SIMPLE;
+ break;
}
+ } else {
+ cmd_pkt->task = TSK_SIMPLE;
}
/* Load SCSI command packet. */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 80f39b8b0223..86fcf2c313ad 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -838,10 +838,17 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
static void sd_unprep_fn(struct request_queue *q, struct request *rq)
{
+ struct scsi_cmnd *SCpnt = rq->special;
+
if (rq->cmd_flags & REQ_DISCARD) {
free_page((unsigned long)rq->buffer);
rq->buffer = NULL;
}
+ if (SCpnt->cmnd != rq->cmd) {
+ mempool_free(SCpnt->cmnd, sd_cdb_pool);
+ SCpnt->cmnd = NULL;
+ SCpnt->cmd_len = 0;
+ }
}
/**
@@ -1720,21 +1727,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
sd_dif_complete(SCpnt, good_bytes);
- if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
- == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
-
- /* We have to print a failed command here as the
- * extended CDB gets freed before scsi_io_completion()
- * is called.
- */
- if (result)
- scsi_print_command(SCpnt);
-
- mempool_free(SCpnt->cmnd, sd_cdb_pool);
- SCpnt->cmnd = NULL;
- SCpnt->cmd_len = 0;
- }
-
return good_bytes;
}
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index 080abf2faf97..a8c344422a77 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -469,7 +469,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t ppos)
{
struct logger_log *log = file_get_log(iocb->ki_filp);
- size_t orig = log->w_off;
+ size_t orig;
struct logger_entry header;
struct timespec now;
ssize_t ret = 0;
@@ -490,6 +490,8 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
mutex_lock(&log->mutex);
+ orig = log->w_off;
+
/*
* Fix up any readers, pulling them forward to the first readable
* entry after (what will be) the new write offset. We do this now
diff --git a/drivers/staging/comedi/TODO b/drivers/staging/comedi/TODO
index b10f739b7e3e..fa8da9aada30 100644
--- a/drivers/staging/comedi/TODO
+++ b/drivers/staging/comedi/TODO
@@ -9,4 +9,4 @@ TODO:
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
copy:
Ian Abbott <abbotti@mev.co.uk>
- Frank Mori Hess <fmhess@users.sourceforge.net>
+ H Hartley Sweeten <hsweeten@visionengravers.com>
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 8647518259f6..f4a197b2d1fd 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1413,22 +1413,19 @@ static int do_cmd_ioctl(struct comedi_device *dev,
DPRINTK("subdevice busy\n");
return -EBUSY;
}
- s->busy = file;
/* make sure channel/gain list isn't too long */
if (cmd.chanlist_len > s->len_chanlist) {
DPRINTK("channel/gain list too long %u > %d\n",
cmd.chanlist_len, s->len_chanlist);
- ret = -EINVAL;
- goto cleanup;
+ return -EINVAL;
}
/* make sure channel/gain list isn't too short */
if (cmd.chanlist_len < 1) {
DPRINTK("channel/gain list too short %u < 1\n",
cmd.chanlist_len);
- ret = -EINVAL;
- goto cleanup;
+ return -EINVAL;
}
async->cmd = cmd;
@@ -1438,8 +1435,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
if (!async->cmd.chanlist) {
DPRINTK("allocation failed\n");
- ret = -ENOMEM;
- goto cleanup;
+ return -ENOMEM;
}
if (copy_from_user(async->cmd.chanlist, user_chanlist,
@@ -1491,6 +1487,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,
comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
+ /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
+ * comedi_read() or comedi_write() */
+ s->busy = file;
ret = s->do_cmd(dev, s);
if (ret == 0)
return 0;
@@ -1705,6 +1704,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
void *file)
{
struct comedi_subdevice *s;
+ int ret;
if (arg >= dev->n_subdevices)
return -EINVAL;
@@ -1721,7 +1721,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
if (s->busy != file)
return -EBUSY;
- return do_cancel(dev, s);
+ ret = do_cancel(dev, s);
+ if (comedi_get_subdevice_runflags(s) & SRF_USER)
+ wake_up_interruptible(&s->async->wait_head);
+
+ return ret;
}
/*
@@ -2053,11 +2057,13 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
if (!comedi_is_subdevice_running(s)) {
if (count == 0) {
+ mutex_lock(&dev->mutex);
if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
else
retval = 0;
do_become_nonbusy(dev, s);
+ mutex_unlock(&dev->mutex);
}
break;
}
@@ -2156,11 +2162,13 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
if (n == 0) {
if (!comedi_is_subdevice_running(s)) {
+ mutex_lock(&dev->mutex);
do_become_nonbusy(dev, s);
if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
else
retval = 0;
+ mutex_unlock(&dev->mutex);
break;
}
if (file->f_flags & O_NONBLOCK) {
@@ -2198,9 +2206,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
buf += n;
break; /* makes device work like a pipe */
}
- if (comedi_is_subdevice_idle(s) &&
- async->buf_read_count - async->buf_write_count == 0) {
- do_become_nonbusy(dev, s);
+ if (comedi_is_subdevice_idle(s)) {
+ mutex_lock(&dev->mutex);
+ if (async->buf_read_count - async->buf_write_count == 0)
+ do_become_nonbusy(dev, s);
+ mutex_unlock(&dev->mutex);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&async->wait_head, &wait);
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index 5590ebf1da15..817f837b240d 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -827,11 +827,11 @@ static void usb_alphatrack_disconnect(struct usb_interface *intf)
mutex_unlock(&dev->mtx);
usb_alphatrack_delete(dev);
} else {
+ atomic_set(&dev->writes_pending, 0);
dev->intf = NULL;
mutex_unlock(&dev->mtx);
}
- atomic_set(&dev->writes_pending, 0);
mutex_unlock(&disconnect_mutex);
dev_info(&intf->dev, "Alphatrack Surface #%d now disconnected\n",
diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c
index b795353e8348..cc3692439a5c 100644
--- a/drivers/staging/gdm72xx/gdm_qos.c
+++ b/drivers/staging/gdm72xx/gdm_qos.c
@@ -250,8 +250,8 @@ static void send_qos_list(struct nic *nic, struct list_head *head)
list_for_each_entry_safe(entry, n, head, list) {
list_del(&entry->list);
- free_qos_entry(entry);
gdm_wimax_send_tx(entry->skb, entry->dev);
+ free_qos_entry(entry);
}
}
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 22339059837f..bd0f2fd01db4 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -33,7 +33,6 @@ config DRM_IMX_TVE
config DRM_IMX_LDB
tristate "Support for LVDS displays"
depends on DRM_IMX
- select OF_VIDEOMODE
help
Choose this to enable the internal LVDS Display Bridge (LDB)
found on i.MX53 and i.MX6 processors.
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index c191ae203565..41e88abe47af 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -1120,8 +1120,11 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
or DYN_EXTERNAL, then mem granularity information is present
within the section name - only process if there are at least three
tokens within the section name (just a minor optimization) */
- if (count >= 3)
- strict_strtol(sz_last_token, 10, (long *)&req);
+ if (count >= 3) {
+ status = kstrtos32(sz_last_token, 10, &req);
+ if (status)
+ goto func_cont;
+ }
if ((req == 0) || (req == 1)) {
if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) {
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 82c7202fd5cc..e77fb6ea40c9 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -527,8 +527,11 @@ static void zram_reset_device(struct zram *zram)
size_t index;
struct zram_meta *meta;
- if (!zram->init_done)
+ down_write(&zram->init_lock);
+ if (!zram->init_done) {
+ up_write(&zram->init_lock);
return;
+ }
meta = zram->meta;
zram->init_done = 0;
@@ -549,6 +552,7 @@ static void zram_reset_device(struct zram *zram)
zram->disksize = 0;
set_capacity(zram->disk, 0);
+ up_write(&zram->init_lock);
}
static void zram_init_device(struct zram *zram, struct zram_meta *meta)
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 5de56f671a9d..f36950e4134f 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -54,6 +54,8 @@ MODULE_PARM_DESC(notify_delay_ms,
* is some wrong values returned by cpuid for number of thresholds.
*/
#define MAX_NUMBER_OF_TRIPS 2
+/* Limit number of package temp zones */
+#define MAX_PKG_TEMP_ZONE_IDS 256
struct phy_dev_entry {
struct list_head list;
@@ -394,12 +396,16 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
char buffer[30];
int thres_count;
u32 eax, ebx, ecx, edx;
+ u8 *temp;
cpuid(6, &eax, &ebx, &ecx, &edx);
thres_count = ebx & 0x07;
if (!thres_count)
return -ENODEV;
+ if (topology_physical_package_id(cpu) > MAX_PKG_TEMP_ZONE_IDS)
+ return -ENODEV;
+
thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS);
err = get_tj_max(cpu, &tj_max);
@@ -417,13 +423,14 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
spin_lock(&pkg_work_lock);
if (topology_physical_package_id(cpu) > max_phy_id)
max_phy_id = topology_physical_package_id(cpu);
- pkg_work_scheduled = krealloc(pkg_work_scheduled,
- (max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
- if (!pkg_work_scheduled) {
+ temp = krealloc(pkg_work_scheduled,
+ (max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
+ if (!temp) {
spin_unlock(&pkg_work_lock);
err = -ENOMEM;
goto err_ret_free;
}
+ pkg_work_scheduled = temp;
pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
spin_unlock(&pkg_work_lock);
@@ -511,7 +518,7 @@ static int get_core_online(unsigned int cpu)
/* Check if there is already an instance for this package */
if (!phdev) {
- if (!cpu_has(c, X86_FEATURE_DTHERM) &&
+ if (!cpu_has(c, X86_FEATURE_DTHERM) ||
!cpu_has(c, X86_FEATURE_PTS))
return -ENODEV;
if (pkg_temp_thermal_device_add(cpu))
@@ -562,7 +569,7 @@ static struct notifier_block pkg_temp_thermal_notifier __refdata = {
};
static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_PTS },
{}
};
MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids);
@@ -592,7 +599,6 @@ static int __init pkg_temp_thermal_init(void)
return 0;
err_ret:
- get_online_cpus();
for_each_online_cpu(i)
put_core_offline(i);
put_online_cpus();
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 721904f8efa9..946ddd2b3a54 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -193,7 +193,8 @@ static int __init parse_options(struct early_serial8250_device *device,
if (options) {
options++;
device->baud = simple_strtoul(options, NULL, 0);
- length = min(strcspn(options, " "), sizeof(device->options));
+ length = min(strcspn(options, " ") + 1,
+ sizeof(device->options));
strlcpy(device->options, options, length);
} else {
device->baud = probe_baud(port);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 5e3d68917ffe..1456673bcca0 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -277,7 +277,7 @@ config SERIAL_TEGRA
select SERIAL_CORE
help
Support for the on-chip UARTs on the NVIDIA Tegra series SOCs
- providing /dev/ttyHS0, 1, 2, 3 and 4 (note, some machines may not
+ providing /dev/ttyTHS0, 1, 2, 3 and 4 (note, some machines may not
provide all of these ports, depending on how the serial port
are enabled). This driver uses the APB DMA to achieve higher baudrate
and better performance.
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index ff171384ea52..dc6e96996ead 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -3478,7 +3478,7 @@ static int alloc_buf_list(SLMP_INFO *info)
for ( i = 0; i < info->rx_buf_count; i++ ) {
/* calculate and store physical address of this buffer entry */
info->rx_buf_list_ex[i].phys_entry =
- info->buffer_list_phys + (i * sizeof(SCABUFSIZE));
+ info->buffer_list_phys + (i * SCABUFSIZE);
/* calculate and store physical address of */
/* next entry in cirular list of entries */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4191db32f12c..4a8a1d68002c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -668,6 +668,15 @@ resubmit:
static inline int
hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
{
+ /* Need to clear both directions for control ep */
+ if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_CONTROL) {
+ int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
+ HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
+ devinfo ^ 0x8000, tt, NULL, 0, 1000);
+ if (status)
+ return status;
+ }
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
tt, NULL, 0, 1000);
@@ -2848,6 +2857,15 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev)
USB_CTRL_SET_TIMEOUT);
}
+/* Count of wakeup-enabled devices at or below udev */
+static unsigned wakeup_enabled_descendants(struct usb_device *udev)
+{
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev);
+
+ return udev->do_remote_wakeup +
+ (hub ? hub->wakeup_enabled_descendants : 0);
+}
+
/*
* usb_port_suspend - suspend a usb device's upstream port
* @udev: device that's no longer in active use, not a root hub
@@ -2888,8 +2906,8 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev)
* Linux (2.6) currently has NO mechanisms to initiate that: no khubd
* timer, no SRP, no requests through sysfs.
*
- * If Runtime PM isn't enabled or used, non-SuperSpeed devices really get
- * suspended only when their bus goes into global suspend (i.e., the root
+ * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get
+ * suspended until their bus goes into global suspend (i.e., the root
* hub is suspended). Nevertheless, we change @udev->state to
* USB_STATE_SUSPENDED as this is the device's "logical" state. The actual
* upstream port setting is stored in @udev->port_is_suspended.
@@ -2960,15 +2978,21 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
/* see 7.1.7.6 */
if (hub_is_superspeed(hub->hdev))
status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
- else if (PMSG_IS_AUTO(msg))
- status = set_port_feature(hub->hdev, port1,
- USB_PORT_FEAT_SUSPEND);
+
/*
* For system suspend, we do not need to enable the suspend feature
* on individual USB-2 ports. The devices will automatically go
* into suspend a few ms after the root hub stops sending packets.
* The USB 2.0 spec calls this "global suspend".
+ *
+ * However, many USB hubs have a bug: They don't relay wakeup requests
+ * from a downstream port if the port's suspend feature isn't on.
+ * Therefore we will turn on the suspend feature if udev or any of its
+ * descendants is enabled for remote wakeup.
*/
+ else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
+ status = set_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_SUSPEND);
else {
really_suspend = false;
status = 0;
@@ -3003,15 +3027,16 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
if (!PMSG_IS_AUTO(msg))
status = 0;
} else {
- /* device has up to 10 msec to fully suspend */
dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""),
udev->do_remote_wakeup);
- usb_set_device_state(udev, USB_STATE_SUSPENDED);
if (really_suspend) {
udev->port_is_suspended = 1;
+
+ /* device has up to 10 msec to fully suspend */
msleep(10);
}
+ usb_set_device_state(udev, USB_STATE_SUSPENDED);
}
/*
@@ -3293,7 +3318,11 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
unsigned port1;
int status;
- /* Warn if children aren't already suspended */
+ /*
+ * Warn if children aren't already suspended.
+ * Also, add up the number of wakeup-enabled descendants.
+ */
+ hub->wakeup_enabled_descendants = 0;
for (port1 = 1; port1 <= hdev->maxchild; port1++) {
struct usb_device *udev;
@@ -3303,6 +3332,9 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
if (PMSG_IS_AUTO(msg))
return -EBUSY;
}
+ if (udev)
+ hub->wakeup_enabled_descendants +=
+ wakeup_enabled_descendants(udev);
}
if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) {
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 6508e02b3dac..4e4790dea343 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -59,6 +59,9 @@ struct usb_hub {
struct usb_tt tt; /* Transaction Translator */
unsigned mA_per_port; /* current for each child */
+#ifdef CONFIG_PM
+ unsigned wakeup_enabled_descendants;
+#endif
unsigned limited_power:1;
unsigned quiescing:1;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 757aa18027d0..2378958ea63e 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,6 +1,6 @@
config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
- depends on (USB || USB_GADGET) && GENERIC_HARDIRQS
+ depends on (USB || USB_GADGET) && GENERIC_HARDIRQS && HAS_DMA
select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD
help
Say Y or M here if your system has a Dual Role SuperSpeed
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c35d49d39b76..358375e0b291 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -450,7 +450,7 @@ static int dwc3_probe(struct platform_device *pdev)
}
if (IS_ERR(dwc->usb3_phy)) {
- ret = PTR_ERR(dwc->usb2_phy);
+ ret = PTR_ERR(dwc->usb3_phy);
/*
* if -ENXIO is returned, it means PHY layer wasn't
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index b69d322e3cab..27dad993b007 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -759,8 +759,8 @@ struct dwc3 {
struct dwc3_event_type {
u32 is_devspec:1;
- u32 type:6;
- u32 reserved8_31:25;
+ u32 type:7;
+ u32 reserved8_31:24;
} __packed;
#define DWC3_DEPEVT_XFERCOMPLETE 0x01
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index b5e5b35df49c..f77083fedc68 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1584,6 +1584,7 @@ err1:
__dwc3_gadget_ep_disable(dwc->eps[0]);
err0:
+ dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 62f6802f6e0f..8e9368330b10 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -193,6 +193,7 @@ config USB_FUSB300
Faraday usb device controller FUSB300 driver
config USB_FOTG210_UDC
+ depends on HAS_DMA
tristate "Faraday FOTG210 USB Peripheral Controller"
help
Faraday USB2.0 OTG controller which can be configured as
@@ -328,13 +329,14 @@ config USB_S3C_HSUDC
config USB_MV_UDC
tristate "Marvell USB2.0 Device Controller"
- depends on GENERIC_HARDIRQS
+ depends on GENERIC_HARDIRQS && HAS_DMA
help
Marvell Socs (including PXA and MMP series) include a high speed
USB2.0 OTG controller, which can be configured as high speed or
full speed USB peripheral.
config USB_MV_U3D
+ depends on HAS_DMA
tristate "MARVELL PXA2128 USB 3.0 controller"
help
MARVELL PXA2128 Processor series include a super speed USB3.0 device
@@ -639,6 +641,7 @@ config USB_CONFIGFS_RNDIS
depends on USB_CONFIGFS
depends on NET
select USB_U_ETHER
+ select USB_U_RNDIS
select USB_F_RNDIS
help
Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 073b938f9135..d9a6add0c852 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -870,8 +870,8 @@ static void clk_on(struct at91_udc *udc)
if (udc->clocked)
return;
udc->clocked = 1;
- clk_enable(udc->iclk);
- clk_enable(udc->fclk);
+ clk_prepare_enable(udc->iclk);
+ clk_prepare_enable(udc->fclk);
}
static void clk_off(struct at91_udc *udc)
@@ -880,8 +880,8 @@ static void clk_off(struct at91_udc *udc)
return;
udc->clocked = 0;
udc->gadget.speed = USB_SPEED_UNKNOWN;
- clk_disable(udc->fclk);
- clk_disable(udc->iclk);
+ clk_disable_unprepare(udc->fclk);
+ clk_disable_unprepare(udc->iclk);
}
/*
@@ -1725,7 +1725,7 @@ static int at91udc_probe(struct platform_device *pdev)
/* init software state */
udc = &controller;
udc->gadget.dev.parent = dev;
- if (pdev->dev.of_node)
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node)
at91udc_of_init(udc, pdev->dev.of_node);
else
memcpy(&udc->board, dev->platform_data,
@@ -1782,12 +1782,14 @@ static int at91udc_probe(struct platform_device *pdev)
}
/* don't do anything until we have both gadget driver and VBUS */
- clk_enable(udc->iclk);
+ retval = clk_prepare_enable(udc->iclk);
+ if (retval)
+ goto fail1;
at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff);
/* Clear all pending interrupts - UDP may be used by bootloader. */
at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff);
- clk_disable(udc->iclk);
+ clk_disable_unprepare(udc->iclk);
/* request UDC and maybe VBUS irqs */
udc->udp_irq = platform_get_irq(pdev, 0);
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 5d3561ea1c15..edab45da3741 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -959,8 +959,11 @@ static struct usb_function_instance *ecm_alloc_inst(void)
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = ecm_free_inst;
opts->net = gether_setup_default();
- if (IS_ERR(opts->net))
- return ERR_PTR(PTR_ERR(opts->net));
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
config_group_init_type_name(&opts->func_inst.group, "", &ecm_func_type);
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 90ee8022e8d8..d00392d879db 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -593,8 +593,11 @@ static struct usb_function_instance *eem_alloc_inst(void)
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = eem_free_inst;
opts->net = gether_setup_default();
- if (IS_ERR(opts->net))
- return ERR_CAST(opts->net);
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
config_group_init_type_name(&opts->func_inst.group, "", &eem_func_type);
diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
index 952177f7eb9b..1c28fe13328a 100644
--- a/drivers/usb/gadget/f_ncm.c
+++ b/drivers/usb/gadget/f_ncm.c
@@ -1350,8 +1350,11 @@ static struct usb_function_instance *ncm_alloc_inst(void)
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = ncm_free_inst;
opts->net = gether_setup_default();
- if (IS_ERR(opts->net))
- return ERR_PTR(PTR_ERR(opts->net));
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 7944fb0efe3b..1bf26e9f38cd 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -656,8 +656,11 @@ static struct usb_function_instance *phonet_alloc_inst(void)
opts->func_inst.free_func_inst = phonet_free_inst;
opts->net = gphonet_setup_default();
- if (IS_ERR(opts->net))
- return ERR_PTR(PTR_ERR(opts->net));
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
config_group_init_type_name(&opts->func_inst.group, "",
&phonet_func_type);
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 191df35ae69d..717ed7f95639 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -963,8 +963,11 @@ static struct usb_function_instance *rndis_alloc_inst(void)
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = rndis_free_inst;
opts->net = gether_setup_default();
- if (IS_ERR(opts->net))
- return ERR_CAST(opts->net);
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
config_group_init_type_name(&opts->func_inst.group, "",
&rndis_func_type);
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index 5601e1d96c4f..7c8674fa7e80 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -505,8 +505,11 @@ static struct usb_function_instance *geth_alloc_inst(void)
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = geth_free_inst;
opts->net = gether_setup_default();
- if (IS_ERR(opts->net))
- return ERR_CAST(opts->net);
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
config_group_init_type_name(&opts->func_inst.group, "",
&gether_func_type);
diff --git a/drivers/usb/gadget/fotg210-udc.c b/drivers/usb/gadget/fotg210-udc.c
index cce5535b1dc6..10cd18ddd0d4 100644
--- a/drivers/usb/gadget/fotg210-udc.c
+++ b/drivers/usb/gadget/fotg210-udc.c
@@ -1074,7 +1074,7 @@ static struct usb_gadget_ops fotg210_gadget_ops = {
.udc_stop = fotg210_udc_stop,
};
-static int __exit fotg210_udc_remove(struct platform_device *pdev)
+static int fotg210_udc_remove(struct platform_device *pdev)
{
struct fotg210_udc *fotg210 = dev_get_drvdata(&pdev->dev);
@@ -1088,7 +1088,7 @@ static int __exit fotg210_udc_remove(struct platform_device *pdev)
return 0;
}
-static int __init fotg210_udc_probe(struct platform_device *pdev)
+static int fotg210_udc_probe(struct platform_device *pdev)
{
struct resource *res, *ires;
struct fotg210_udc *fotg210 = NULL;
diff --git a/drivers/usb/gadget/mv_u3d_core.c b/drivers/usb/gadget/mv_u3d_core.c
index 07fdb3eaf48a..ec6a2d290398 100644
--- a/drivers/usb/gadget/mv_u3d_core.c
+++ b/drivers/usb/gadget/mv_u3d_core.c
@@ -1776,7 +1776,7 @@ static int mv_u3d_remove(struct platform_device *dev)
kfree(u3d->eps);
if (u3d->irq)
- free_irq(u3d->irq, &dev->dev);
+ free_irq(u3d->irq, u3d);
if (u3d->cap_regs)
iounmap(u3d->cap_regs);
@@ -1974,7 +1974,7 @@ static int mv_u3d_probe(struct platform_device *dev)
return 0;
err_unregister:
- free_irq(u3d->irq, &dev->dev);
+ free_irq(u3d->irq, u3d);
err_request_irq:
err_get_irq:
kfree(u3d->status_req);
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index ffd8fa541101..c28ac9872030 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -50,6 +50,8 @@ static DEFINE_MUTEX(udc_lock);
/* ------------------------------------------------------------------------- */
+#ifdef CONFIG_HAS_DMA
+
int usb_gadget_map_request(struct usb_gadget *gadget,
struct usb_request *req, int is_in)
{
@@ -99,6 +101,8 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
}
EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
+#endif /* CONFIG_HAS_DMA */
+
/* ------------------------------------------------------------------------- */
void usb_gadget_set_state(struct usb_gadget *gadget,
@@ -194,9 +198,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
dev_set_name(&gadget->dev, "gadget");
gadget->dev.parent = parent;
+#ifdef CONFIG_HAS_DMA
dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
gadget->dev.dma_parms = parent->dma_parms;
gadget->dev.dma_mask = parent->dma_mask;
+#endif
if (release)
gadget->dev.release = release;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 2b702772d04d..6dce37555c4f 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -874,6 +874,7 @@ static int ehci_hub_control (
ehci->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
usb_hcd_start_port_resume(&hcd->self, wIndex);
+ set_bit(wIndex, &ehci->resuming_ports);
/* check the port again */
mod_timer(&ehci_to_hcd(ehci)->rh_timer,
ehci->reset_done[wIndex]);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 4b8a2092432f..978c849f9c9a 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -13,6 +13,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
void sb800_prefetch(struct device *dev, int on);
#else
+struct pci_dev;
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
static inline void usb_amd_dev_put(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cc24e39b97d5..f00cb203faea 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -93,7 +93,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
- xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
xhci->limit_active_eps = 64;
xhci->quirks |= XHCI_SW_BW_CHECKING;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1e57eafa6910..5b08cd85f8e7 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -434,7 +434,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
/* A ring has pending URBs if its TD list is not empty */
if (!(ep->ep_state & EP_HAS_STREAMS)) {
- if (!(list_empty(&ep->ring->td_list)))
+ if (ep->ring && !(list_empty(&ep->ring->td_list)))
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
return;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2c49f00260ca..41eb4fc33453 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -329,7 +329,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
return;
}
-static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
{
int i;
@@ -1181,9 +1181,6 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
}
xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_HALTED)
- return -ENODEV;
-
if (check_virt_dev) {
if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
printk(KERN_DEBUG "xHCI %s called with unaddressed "
@@ -1199,6 +1196,9 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
}
}
+ if (xhci->xhc_state & XHCI_STATE_HALTED)
+ return -ENODEV;
+
return 1;
}
@@ -3898,7 +3898,7 @@ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
* Issue an Evaluate Context command to change the Maximum Exit Latency in the
* slot context. If that succeeds, store the new MEL in the xhci_virt_device.
*/
-static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
+static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
struct usb_device *udev, u16 max_exit_latency)
{
struct xhci_virt_device *virt_dev;
@@ -4892,6 +4892,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
get_quirks(dev, xhci);
+ /* In xhci controllers which follow xhci 1.0 spec gives a spurious
+ * success event after a short transfer. This quirk will ignore such
+ * spurious event.
+ */
+ if (xhci->hci_version > 0x96)
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index c21386ec5d35..de98906f786d 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3247,6 +3247,7 @@ static const struct usb_device_id sisusb_table[] = {
{ USB_DEVICE(0x0711, 0x0903) },
{ USB_DEVICE(0x0711, 0x0918) },
{ USB_DEVICE(0x0711, 0x0920) },
+ { USB_DEVICE(0x0711, 0x0950) },
{ USB_DEVICE(0x182d, 0x021c) },
{ USB_DEVICE(0x182d, 0x0269) },
{ }
diff --git a/drivers/usb/phy/phy-omap-usb3.c b/drivers/usb/phy/phy-omap-usb3.c
index efe6e1464f45..a2fb30bbb971 100644
--- a/drivers/usb/phy/phy-omap-usb3.c
+++ b/drivers/usb/phy/phy-omap-usb3.c
@@ -71,9 +71,9 @@ static struct usb_dpll_params omap_usb3_dpll_params[NUM_SYS_CLKS] = {
{1250, 5, 4, 20, 0}, /* 12 MHz */
{3125, 20, 4, 20, 0}, /* 16.8 MHz */
{1172, 8, 4, 20, 65537}, /* 19.2 MHz */
+ {1000, 7, 4, 10, 0}, /* 20 MHz */
{1250, 12, 4, 20, 0}, /* 26 MHz */
{3125, 47, 4, 20, 92843}, /* 38.4 MHz */
- {1000, 7, 4, 10, 0}, /* 20 MHz */
};
diff --git a/drivers/usb/phy/phy-samsung-usb2.c b/drivers/usb/phy/phy-samsung-usb2.c
index 1011c16ade7e..758b86d0fcb3 100644
--- a/drivers/usb/phy/phy-samsung-usb2.c
+++ b/drivers/usb/phy/phy-samsung-usb2.c
@@ -388,7 +388,7 @@ static int samsung_usb2phy_probe(struct platform_device *pdev)
clk = devm_clk_get(dev, "otg");
if (IS_ERR(clk)) {
- dev_err(dev, "Failed to get otg clock\n");
+ dev_err(dev, "Failed to get usbhost/otg clock\n");
return PTR_ERR(clk);
}
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index ed4949faa70d..805940c37353 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -855,10 +855,6 @@ static int usbhsg_gadget_stop(struct usb_gadget *gadget,
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
- if (!driver ||
- !driver->unbind)
- return -EINVAL;
-
usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
gpriv->driver = NULL;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index d6ef2f8da37d..0eae4ba3760e 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
+ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
@@ -118,6 +119,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
@@ -148,6 +151,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 0a818b238508..603fb70dde80 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -905,20 +905,20 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading Spreg failed\n");
- return -1;
+ goto err;
}
Data |= 0x80;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Spreg failed\n");
- return -1;
+ goto err;
}
Data &= ~0x80;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Spreg failed\n");
- return -1;
+ goto err;
}
/* End of block to be checked */
@@ -927,7 +927,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
&Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading Controlreg failed\n");
- return -1;
+ goto err;
}
Data |= 0x08; /* Driver done bit */
Data |= 0x20; /* rx_disable */
@@ -935,7 +935,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Controlreg failed\n");
- return -1;
+ goto err;
}
/* do register settings here */
/* Set all regs to the device default values. */
@@ -946,21 +946,21 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "disabling interrupts failed\n");
- return -1;
+ goto err;
}
/* Set FIFO_CONTROL_REGISTER to the default value */
Data = 0x00;
status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
- return -1;
+ goto err;
}
Data = 0xcf;
status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
- return -1;
+ goto err;
}
Data = 0x03;
@@ -1103,6 +1103,15 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
/* mos7840_change_port_settings(mos7840_port,old_termios); */
return 0;
+err:
+ for (j = 0; j < NUM_URBS; ++j) {
+ urb = mos7840_port->write_urb_pool[j];
+ if (!urb)
+ continue;
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
+ return status;
}
/*****************************************************************************
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5dd857de05b0..1cf6f125f5f0 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -341,17 +341,12 @@ static void option_instat_callback(struct urb *urb);
#define OLIVETTI_VENDOR_ID 0x0b3c
#define OLIVETTI_PRODUCT_OLICARD100 0xc000
#define OLIVETTI_PRODUCT_OLICARD145 0xc003
+#define OLIVETTI_PRODUCT_OLICARD200 0xc005
/* Celot products */
#define CELOT_VENDOR_ID 0x211f
#define CELOT_PRODUCT_CT680M 0x6801
-/* ONDA Communication vendor id */
-#define ONDA_VENDOR_ID 0x1ee8
-
-/* ONDA MT825UP HSDPA 14.2 modem */
-#define ONDA_MT825UP 0x000b
-
/* Samsung products */
#define SAMSUNG_VENDOR_ID 0x04e8
#define SAMSUNG_PRODUCT_GT_B3730 0x6889
@@ -444,7 +439,8 @@ static void option_instat_callback(struct urb *urb);
/* Hyundai Petatel Inc. products */
#define PETATEL_VENDOR_ID 0x1ff4
-#define PETATEL_PRODUCT_NP10T 0x600e
+#define PETATEL_PRODUCT_NP10T_600A 0x600a
+#define PETATEL_PRODUCT_NP10T_600E 0x600e
/* TP-LINK Incorporated products */
#define TPLINK_VENDOR_ID 0x2357
@@ -782,6 +778,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
@@ -817,7 +814,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1256,8 +1254,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
- { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
@@ -1329,9 +1327,12 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
- { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
+ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
+ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
@@ -1339,6 +1340,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 7182bb774b79..375b5a400b6f 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -371,7 +371,7 @@ static int ti_startup(struct usb_serial *serial)
usb_set_serial_data(serial, tdev);
/* determine device type */
- if (usb_match_id(serial->interface, ti_id_table_3410))
+ if (serial->type == &ti_1port_device)
tdev->td_is_3410 = 1;
dev_dbg(&dev->dev, "%s - device type is %s\n", __func__,
tdev->td_is_3410 ? "3410" : "5052");
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1799335288bd..c015f2c16729 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -665,6 +665,13 @@ UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
+/* Submitted by Ren Bigcren <bigcren.ren@sonymobile.com> */
+UNUSUAL_DEV( 0x054c, 0x02a5, 0x0100, 0x0100,
+ "Sony Corp.",
+ "MicroVault Flash Drive",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_READ_CAPACITY_16 ),
+
/* floppy reports multiple luns */
UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
"SAMSUNG",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 027be91db139..969a85960e9f 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -15,7 +15,6 @@
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
-#include <linux/rcupdate.h>
#include <linux/file.h>
#include <linux/slab.h>
@@ -346,12 +345,11 @@ static void handle_tx(struct vhost_net *net)
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
bool zcopy, zcopy_used;
- /* TODO: check that we are running from vhost_worker? */
- sock = rcu_dereference_check(vq->private_data, 1);
+ mutex_lock(&vq->mutex);
+ sock = vq->private_data;
if (!sock)
- return;
+ goto out;
- mutex_lock(&vq->mutex);
vhost_disable_notify(&net->dev, vq);
hdr_size = nvq->vhost_hlen;
@@ -461,7 +459,7 @@ static void handle_tx(struct vhost_net *net)
break;
}
}
-
+out:
mutex_unlock(&vq->mutex);
}
@@ -570,14 +568,14 @@ static void handle_rx(struct vhost_net *net)
s16 headcount;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
- /* TODO: check that we are running from vhost_worker? */
- struct socket *sock = rcu_dereference_check(vq->private_data, 1);
-
- if (!sock)
- return;
+ struct socket *sock;
mutex_lock(&vq->mutex);
+ sock = vq->private_data;
+ if (!sock)
+ goto out;
vhost_disable_notify(&net->dev, vq);
+
vhost_hlen = nvq->vhost_hlen;
sock_hlen = nvq->sock_hlen;
@@ -652,7 +650,7 @@ static void handle_rx(struct vhost_net *net)
break;
}
}
-
+out:
mutex_unlock(&vq->mutex);
}
@@ -750,8 +748,7 @@ static int vhost_net_enable_vq(struct vhost_net *n,
struct vhost_poll *poll = n->poll + (nvq - n->vqs);
struct socket *sock;
- sock = rcu_dereference_protected(vq->private_data,
- lockdep_is_held(&vq->mutex));
+ sock = vq->private_data;
if (!sock)
return 0;
@@ -764,10 +761,9 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
struct socket *sock;
mutex_lock(&vq->mutex);
- sock = rcu_dereference_protected(vq->private_data,
- lockdep_is_held(&vq->mutex));
+ sock = vq->private_data;
vhost_net_disable_vq(n, vq);
- rcu_assign_pointer(vq->private_data, NULL);
+ vq->private_data = NULL;
mutex_unlock(&vq->mutex);
return sock;
}
@@ -923,8 +919,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
/* start polling new socket */
- oldsock = rcu_dereference_protected(vq->private_data,
- lockdep_is_held(&vq->mutex));
+ oldsock = vq->private_data;
if (sock != oldsock) {
ubufs = vhost_net_ubuf_alloc(vq,
sock && vhost_sock_zcopy(sock));
@@ -934,7 +929,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
}
vhost_net_disable_vq(n, vq);
- rcu_assign_pointer(vq->private_data, sock);
+ vq->private_data = sock;
r = vhost_init_used(vq);
if (r)
goto err_used;
@@ -968,7 +963,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
return 0;
err_used:
- rcu_assign_pointer(vq->private_data, oldsock);
+ vq->private_data = oldsock;
vhost_net_enable_vq(n, vq);
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 06adf31a9248..0c27c7df1b09 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -902,19 +902,15 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
int head, ret;
u8 target;
+ mutex_lock(&vq->mutex);
/*
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl.
- *
- * TODO: Check that we are running from vhost_worker which acts
- * as read-side critical section for vhost kind of RCU.
- * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
*/
- vs_tpg = rcu_dereference_check(vq->private_data, 1);
+ vs_tpg = vq->private_data;
if (!vs_tpg)
- return;
+ goto out;
- mutex_lock(&vq->mutex);
vhost_disable_notify(&vs->dev, vq);
for (;;) {
@@ -1064,6 +1060,7 @@ err_free:
vhost_scsi_free_cmd(cmd);
err_cmd:
vhost_scsi_send_bad_target(vs, vq, head, out);
+out:
mutex_unlock(&vq->mutex);
}
@@ -1232,9 +1229,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
sizeof(vs->vs_vhost_wwpn));
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq;
- /* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex);
- rcu_assign_pointer(vq->private_data, vs_tpg);
+ vq->private_data = vs_tpg;
vhost_init_used(vq);
mutex_unlock(&vq->mutex);
}
@@ -1313,9 +1309,8 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
if (match) {
for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
vq = &vs->vqs[i].vq;
- /* Flushing the vhost_work acts as synchronize_rcu */
mutex_lock(&vq->mutex);
- rcu_assign_pointer(vq->private_data, NULL);
+ vq->private_data = NULL;
mutex_unlock(&vq->mutex);
}
}
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index a73ea217f24d..339eae85859a 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
-#include <linux/rcupdate.h>
#include <linux/file.h>
#include <linux/slab.h>
@@ -200,9 +199,8 @@ static long vhost_test_run(struct vhost_test *n, int test)
priv = test ? n : NULL;
/* start polling new socket */
- oldpriv = rcu_dereference_protected(vq->private_data,
- lockdep_is_held(&vq->mutex));
- rcu_assign_pointer(vq->private_data, priv);
+ oldpriv = vq->private_data;
+ vq->private_data = priv;
r = vhost_init_used(&n->vqs[index]);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 42298cd23c73..4465ed5f316d 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -103,14 +103,8 @@ struct vhost_virtqueue {
struct iovec iov[UIO_MAXIOV];
struct iovec *indirect;
struct vring_used_elem *heads;
- /* We use a kind of RCU to access private pointer.
- * All readers access it from worker, which makes it possible to
- * flush the vhost_work instead of synchronize_rcu. Therefore readers do
- * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
- * vhost_work execution acts instead of rcu_read_lock() and the end of
- * vhost_work execution acts instead of rcu_read_unlock().
- * Writers use virtqueue mutex. */
- void __rcu *private_data;
+ /* Protected by virtqueue mutex. */
+ void *private_data;
/* Log write descriptors */
void __user *log_base;
struct vhost_log *log;
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 5ca11b066b7e..886e797f75f9 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -101,33 +101,37 @@ static const struct backlight_ops max8925_backlight_ops = {
.get_brightness = max8925_backlight_get_brightness,
};
-#ifdef CONFIG_OF
-static int max8925_backlight_dt_init(struct platform_device *pdev,
- struct max8925_backlight_pdata *pdata)
+static void max8925_backlight_dt_init(struct platform_device *pdev)
{
struct device_node *nproot = pdev->dev.parent->of_node, *np;
- int dual_string;
+ struct max8925_backlight_pdata *pdata;
+ u32 val;
+
+ if (!nproot || !IS_ENABLED(CONFIG_OF))
+ return;
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct max8925_backlight_pdata),
+ GFP_KERNEL);
+ if (!pdata)
+ return;
- if (!nproot)
- return -ENODEV;
np = of_find_node_by_name(nproot, "backlight");
if (!np) {
dev_err(&pdev->dev, "failed to find backlight node\n");
- return -ENODEV;
+ return;
}
- of_property_read_u32(np, "maxim,max8925-dual-string", &dual_string);
- pdata->dual_string = dual_string;
- return 0;
+ if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val))
+ pdata->dual_string = val;
+
+ pdev->dev.platform_data = pdata;
}
-#else
-#define max8925_backlight_dt_init(x, y) (-1)
-#endif
static int max8925_backlight_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct max8925_backlight_pdata *pdata = pdev->dev.platform_data;
+ struct max8925_backlight_pdata *pdata;
struct max8925_backlight_data *data;
struct backlight_device *bl;
struct backlight_properties props;
@@ -170,13 +174,10 @@ static int max8925_backlight_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bl);
value = 0;
- if (pdev->dev.parent->of_node && !pdata) {
- pdata = devm_kzalloc(&pdev->dev,
- sizeof(struct max8925_backlight_pdata),
- GFP_KERNEL);
- max8925_backlight_dt_init(pdev, pdata);
- }
+ if (!pdev->dev.platform_data)
+ max8925_backlight_dt_init(pdev);
+ pdata = pdev->dev.platform_data;
if (pdata) {
if (pdata->lxw_scl)
value |= (1 << 7);