From ae38853249538719c12cb7e7bc302550f5513dae Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 26 Sep 2020 22:58:44 +0200 Subject: phy: fsl-imx8mq-usb: Constify imx8mp_usb_phy_ops The only usage of imx8mp_usb_phy_ops is to assign its address to the data field in the of_device_id struct, which is a const void pointer. Make it const to allow the compiler to put it in read-only memory. Signed-off-by: Rikard Falkeborn Link: https://lore.kernel.org/r/20200926205844.34218-1-rikard.falkeborn@gmail.com Signed-off-by: Vinod Koul --- drivers/phy/freescale/phy-fsl-imx8mq-usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c index 62d6d6849ad6..47a0c09bc9fa 100644 --- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c +++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c @@ -131,7 +131,7 @@ static const struct phy_ops imx8mq_usb_phy_ops = { .owner = THIS_MODULE, }; -static struct phy_ops imx8mp_usb_phy_ops = { +static const struct phy_ops imx8mp_usb_phy_ops = { .init = imx8mp_usb_phy_init, .power_on = imx8mq_phy_power_on, .power_off = imx8mq_phy_power_off, -- cgit v1.2.3 From b36773c3912e770e92ba57fa0957c1cf4db6e8a7 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Mon, 26 Oct 2020 18:00:26 +0000 Subject: misc: hisi_hikey_usb: use PTR_ERR_OR_ZERO Coccinelle suggested using PTR_ERR_OR_ZERO() and looking at the code, we can use PTR_ERR_OR_ZERO() instead of checking IS_ERR() and then doing 'return 0'. Tested-by: Lukas Bulwahn Signed-off-by: Sudip Mukherjee Link: https://lore.kernel.org/r/20201026180026.3350-1-sudipm.mukherjee@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/hisi_hikey_usb.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/hisi_hikey_usb.c b/drivers/misc/hisi_hikey_usb.c index cc93569e601c..989d7d129469 100644 --- a/drivers/misc/hisi_hikey_usb.c +++ b/drivers/misc/hisi_hikey_usb.c @@ -168,10 +168,7 @@ static int hisi_hikey_usb_parse_kirin970(struct platform_device *pdev, hisi_hikey_usb->reset = devm_gpiod_get(&pdev->dev, "hub_reset_en_gpio", GPIOD_OUT_HIGH); - if (IS_ERR(hisi_hikey_usb->reset)) - return PTR_ERR(hisi_hikey_usb->reset); - - return 0; + return PTR_ERR_OR_ZERO(hisi_hikey_usb->reset); } static int hisi_hikey_usb_probe(struct platform_device *pdev) -- cgit v1.2.3 From 3f7566f4947834db9b36aeb8b308e91448ba74fc Mon Sep 17 00:00:00 2001 From: Harshal Chaudhari Date: Mon, 26 Oct 2020 21:28:01 +0530 Subject: misc: xilinx_sdfec: add compat_ptr_ioctl() Driver has a trivial helper function to convert the pointer argument and then call the native ioctl handler. But now we have a generic implementation for that, so we can use it. Signed-off-by: Harshal Chaudhari Link: https://lore.kernel.org/r/20201026155801.16053-1-harshalchau04@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/xilinx_sdfec.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index 92291292756a..6f252793dceb 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -1016,14 +1016,6 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, return rval; } -#ifdef CONFIG_COMPAT -static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd, - unsigned long data) -{ - return xsdfec_dev_ioctl(file, cmd, (unsigned long)compat_ptr(data)); -} -#endif - static __poll_t xsdfec_poll(struct file *file, poll_table *wait) { __poll_t mask = 0; @@ -1054,9 +1046,7 @@ static const struct file_operations xsdfec_fops = { .release = xsdfec_dev_release, .unlocked_ioctl = xsdfec_dev_ioctl, .poll = xsdfec_poll, -#ifdef CONFIG_COMPAT - .compat_ioctl = xsdfec_dev_compat_ioctl, -#endif + .compat_ioctl = compat_ptr_ioctl, }; static int xsdfec_parse_of(struct xsdfec_dev *xsdfec) -- cgit v1.2.3 From 0347c69214f4b600f40c0dc58945dc2798e2b3d9 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Sat, 26 Sep 2020 01:58:29 +0200 Subject: phy: Add USB HSIC PHY driver for Marvell MMP3 SoC Add PHY driver for the HSICs found on Marvell MMP3 SoC. The driver is rather straightforward -- the PHY essentially just needs to be enabled. Signed-off-by: Lubomir Rintel Link: https://lore.kernel.org/r/20200925235828.228626-4-lkundrak@v3.sk Signed-off-by: Vinod Koul --- drivers/phy/marvell/Kconfig | 12 ++++++ drivers/phy/marvell/Makefile | 1 + drivers/phy/marvell/phy-mmp3-hsic.c | 82 +++++++++++++++++++++++++++++++++++++ 3 files changed, 95 insertions(+) create mode 100644 drivers/phy/marvell/phy-mmp3-hsic.c (limited to 'drivers') diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig index 8f6273c837ec..6c96f2bf5266 100644 --- a/drivers/phy/marvell/Kconfig +++ b/drivers/phy/marvell/Kconfig @@ -116,3 +116,15 @@ config PHY_MMP3_USB The PHY driver will be used by Marvell udc/ehci/otg driver. To compile this driver as a module, choose M here. + +config PHY_MMP3_HSIC + tristate "Marvell MMP3 USB HSIC PHY Driver" + depends on MACH_MMP3_DT || COMPILE_TEST + select GENERIC_PHY + help + Enable this to support Marvell MMP3 USB HSIC PHY driver for + Marvell MMP3 SoC. This driver will be used my the Marvell EHCI + driver to initialize the interface to internal USB HSIC + components on MMP3-based boards. + + To compile this driver as a module, choose M here. diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile index 5a106b1549f4..7f296ef02829 100644 --- a/drivers/phy/marvell/Makefile +++ b/drivers/phy/marvell/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY) += phy-armada375-usb2.o obj-$(CONFIG_PHY_BERLIN_SATA) += phy-berlin-sata.o obj-$(CONFIG_PHY_BERLIN_USB) += phy-berlin-usb.o obj-$(CONFIG_PHY_MMP3_USB) += phy-mmp3-usb.o +obj-$(CONFIG_PHY_MMP3_HSIC) += phy-mmp3-hsic.o obj-$(CONFIG_PHY_MVEBU_A3700_COMPHY) += phy-mvebu-a3700-comphy.o obj-$(CONFIG_PHY_MVEBU_A3700_UTMI) += phy-mvebu-a3700-utmi.o obj-$(CONFIG_PHY_MVEBU_A38X_COMPHY) += phy-armada38x-comphy.o diff --git a/drivers/phy/marvell/phy-mmp3-hsic.c b/drivers/phy/marvell/phy-mmp3-hsic.c new file mode 100644 index 000000000000..47c1e8894939 --- /dev/null +++ b/drivers/phy/marvell/phy-mmp3-hsic.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2020 Lubomir Rintel + */ + +#include +#include +#include +#include +#include + +#define HSIC_CTRL 0x08 +#define HSIC_ENABLE BIT(7) +#define PLL_BYPASS BIT(4) + +static int mmp3_hsic_phy_init(struct phy *phy) +{ + void __iomem *base = (void __iomem *)phy_get_drvdata(phy); + u32 hsic_ctrl; + + hsic_ctrl = readl_relaxed(base + HSIC_CTRL); + hsic_ctrl |= HSIC_ENABLE; + hsic_ctrl |= PLL_BYPASS; + writel_relaxed(hsic_ctrl, base + HSIC_CTRL); + + return 0; +} + +static const struct phy_ops mmp3_hsic_phy_ops = { + .init = mmp3_hsic_phy_init, + .owner = THIS_MODULE, +}; + +static const struct of_device_id mmp3_hsic_phy_of_match[] = { + { .compatible = "marvell,mmp3-hsic-phy", }, + { }, +}; +MODULE_DEVICE_TABLE(of, mmp3_hsic_phy_of_match); + +static int mmp3_hsic_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phy_provider *provider; + struct resource *resource; + void __iomem *base; + struct phy *phy; + + resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, resource); + if (IS_ERR(base)) { + dev_err(dev, "failed to remap PHY regs\n"); + return PTR_ERR(base); + } + + phy = devm_phy_create(dev, NULL, &mmp3_hsic_phy_ops); + if (IS_ERR(phy)) { + dev_err(dev, "failed to create PHY\n"); + return PTR_ERR(phy); + } + + phy_set_drvdata(phy, (void *)base); + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(provider)) { + dev_err(dev, "failed to register PHY provider\n"); + return PTR_ERR(provider); + } + + return 0; +} + +static struct platform_driver mmp3_hsic_phy_driver = { + .probe = mmp3_hsic_phy_probe, + .driver = { + .name = "mmp3-hsic-phy", + .of_match_table = mmp3_hsic_phy_of_match, + }, +}; +module_platform_driver(mmp3_hsic_phy_driver); + +MODULE_AUTHOR("Lubomir Rintel "); +MODULE_DESCRIPTION("Marvell MMP3 USB HSIC PHY Driver"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From c62b1f97e744039f01ec789ce01dfa0a8c9a1a99 Mon Sep 17 00:00:00 2001 From: Sangmoon Kim Date: Thu, 29 Oct 2020 15:28:55 +0900 Subject: char: misc: increase DYNAMIC_MINORS value DYNAMIC_MINORS value has been set to 64. Due to this reason, we are facing a module loading fail problem of device driver like below. [ 45.712771] pdic_misc_init - return error : -16 We need to increase this value for registering more misc devices. Signed-off-by: Sangmoon Kim Link: https://lore.kernel.org/r/20201029062855.19757-1-sangmoon.kim@samsung.com Signed-off-by: Greg Kroah-Hartman --- drivers/char/misc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/misc.c b/drivers/char/misc.c index f6a147427029..ca5141ed5ef3 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -60,7 +60,7 @@ static DEFINE_MUTEX(misc_mtx); /* * Assigned numbers, used for dynamic minors */ -#define DYNAMIC_MINORS 64 /* like dynamic majors */ +#define DYNAMIC_MINORS 128 /* like dynamic majors */ static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS); #ifdef CONFIG_PROC_FS -- cgit v1.2.3 From f7a6e6c4073c1e03ab18d2229857cfef5e0f8b7b Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 19 Oct 2020 12:18:08 -0700 Subject: misc: mei: remove unneeded break A break is not needed if it is preceded by a return Signed-off-by: Tom Rix Link: https://lore.kernel.org/r/20201019191808.9891-1-trix@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/hbm.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index a97eb5d47705..686e8b6a4c55 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1377,7 +1377,6 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) dev_info(dev->dev, "hbm: stop response: resetting.\n"); /* force the reset */ return -EPROTO; - break; case CLIENT_DISCONNECT_REQ_CMD: dev_dbg(dev->dev, "hbm: disconnect request: message received\n"); -- cgit v1.2.3 From f0f753da73b386deb274486255b857d013fa6069 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Sat, 31 Oct 2020 08:24:56 -0700 Subject: misc: ti-st: st_core: remove unneeded semicolon A semicolon is not needed after a switch statement. Signed-off-by: Tom Rix Link: https://lore.kernel.org/r/20201031152456.2146104-1-trix@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/ti-st/st_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c index f4ddd1e67015..5a0a5fc3d3ab 100644 --- a/drivers/misc/ti-st/st_core.c +++ b/drivers/misc/ti-st/st_core.c @@ -380,7 +380,7 @@ void st_int_recv(void *disc_data, st_gdata->rx_state = ST_W4_HEADER; st_gdata->rx_count = st_gdata->list[type]->hdr_len; pr_debug("rx_count %ld\n", st_gdata->rx_count); - }; + } ptr++; count--; } -- cgit v1.2.3 From 33fcc5491897504856783335102142676dbee817 Mon Sep 17 00:00:00 2001 From: Gustavo Pimentel Date: Wed, 21 Oct 2020 23:28:10 +0200 Subject: misc: pci_endpoint_test: Remove unnecessary verification The first condition of the if statement can never be true, because bar variable is an enum variable type defined between 0 and 5, and also bar variable acquires the value from arg function parameter which is an unsigned long variable. The constant 5 was replaced the the equivalent enum type, in this case BAR_5. Signed-off-by: Gustavo Pimentel Link: https://lore.kernel.org/r/142cbbc215bed4243a219ea17b46f4256ceccb22.1603315690.git.gustavo.pimentel@synopsys.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/pci_endpoint_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 146ca6fb3260..ea9c2c085298 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -708,7 +708,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case PCITEST_BAR: bar = arg; - if (bar < 0 || bar > 5) + if (bar > BAR_5) goto ret; if (is_am654_pci_dev(pdev) && bar == BAR_0) goto ret; -- cgit v1.2.3 From c78c95f919539cad0308e7de0d5d0d656e4a8e98 Mon Sep 17 00:00:00 2001 From: Harshal Chaudhari Date: Sun, 1 Nov 2020 22:39:49 +0530 Subject: misc: xilinx-sdfec: remove check for ioctl cmd and argument. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit if (_IOC_TYPE(cmd) != PP_IOCTL) return -ENOTTY; Invalid ioctl command check normally performs by “default” case. if (_IOC_DIR(cmd) != _IOC_NONE) { argp = (void __user *)arg; if (!argp) return -EINVAL; } And for checking ioctl arguments, copy_from_user()/copy_to_user() checks are enough. Reviewed-by: Arnd Bergmann Signed-off-by: Harshal Chaudhari Link: https://lore.kernel.org/r/20201101170949.18616-1-harshalchau04@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/xilinx_sdfec.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index 6f252793dceb..23c8448a9c3b 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -944,8 +944,8 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, unsigned long data) { struct xsdfec_dev *xsdfec; - void __user *arg = NULL; - int rval = -EINVAL; + void __user *arg = (void __user *)data; + int rval; xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev); @@ -956,16 +956,6 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, return -EPERM; } - if (_IOC_TYPE(cmd) != XSDFEC_MAGIC) - return -ENOTTY; - - /* check if ioctl argument is present and valid */ - if (_IOC_DIR(cmd) != _IOC_NONE) { - arg = (void __user *)data; - if (!arg) - return rval; - } - switch (cmd) { case XSDFEC_START_DEV: rval = xsdfec_start(xsdfec); @@ -1010,7 +1000,7 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, rval = xsdfec_is_active(xsdfec, (bool __user *)arg); break; default: - /* Should not get here */ + rval = -ENOTTY; break; } return rval; -- cgit v1.2.3 From e8f50d4bfc8deff61adc74146f130860c4fe356f Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Mon, 2 Nov 2020 14:20:00 +0000 Subject: misc: c2port: core: Make copying name from userspace more secure Currently the 'c2dev' device data is not initialised when it's allocated. There maybe an issue when using strncpy() to populate the 'name' attribute since a NUL terminator may not be provided in all use-cases. To prevent such a failing, let's ensure the 'c2dev' device data area is fully zeroed out on allocation. Cc: Rodolfo Giometti Cc: "Eurotech S.p.A" Cc: David Laight Reported-by: Geert Uytterhoeven Acked-by: Arnd Bergmann Signed-off-by: Lee Jones Link: https://lore.kernel.org/r/20201102142001.560490-1-lee.jones@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/misc/c2port/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c index 80d87e8a0bea..fb9a1b49ff6d 100644 --- a/drivers/misc/c2port/core.c +++ b/drivers/misc/c2port/core.c @@ -899,7 +899,7 @@ struct c2port_device *c2port_device_register(char *name, unlikely(!ops->c2d_get) || unlikely(!ops->c2d_set)) return ERR_PTR(-EINVAL); - c2dev = kmalloc(sizeof(struct c2port_device), GFP_KERNEL); + c2dev = kzalloc(sizeof(struct c2port_device), GFP_KERNEL); if (unlikely(!c2dev)) return ERR_PTR(-ENOMEM); -- cgit v1.2.3 From a7c392c76a434c71df30ca748aae00d69684ff48 Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Mon, 2 Nov 2020 14:20:01 +0000 Subject: misc: ocxl: config: Rename function attribute description Fixes the following W=1 kernel build warning(s): drivers/misc/ocxl/config.c:81: warning: Function parameter or member 'dev' not described in 'get_function_0' drivers/misc/ocxl/config.c:81: warning: Excess function parameter 'device' description in 'get_function_0' Cc: Frederic Barrat Cc: Andrew Donnellan Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Cc: linuxppc-dev@lists.ozlabs.org Acked-by: Frederic Barrat Acked-by: Andrew Donnellan Signed-off-by: Lee Jones Link: https://lore.kernel.org/r/20201102142001.560490-2-lee.jones@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/misc/ocxl/config.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c index 4d490b92d951..a68738f38252 100644 --- a/drivers/misc/ocxl/config.c +++ b/drivers/misc/ocxl/config.c @@ -73,7 +73,7 @@ static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx) /** * get_function_0() - Find a related PCI device (function 0) - * @device: PCI device to match + * @dev: PCI device to match * * Returns a pointer to the related device, or null if not found */ -- cgit v1.2.3 From c7a6252b9472fd96a3dbac40c891eb9f4fe5b4f1 Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Thu, 29 Oct 2020 11:54:43 +0200 Subject: mei: bus: do not start a read for disconnected clients Avoid queuing reads and registering rx callbacks in case the client is not connected, to prevent null dereferencing and memory leaks. Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20201029095444.957924-3-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 9cdaa7f3af23..1a54bf3ed0c3 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -276,7 +276,8 @@ static void mei_cl_bus_rx_work(struct work_struct *work) cldev->rx_cb(cldev); mutex_lock(&bus->device_lock); - mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); + if (mei_cl_is_connected(cldev->cl)) + mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); mutex_unlock(&bus->device_lock); } @@ -364,7 +365,10 @@ int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb) INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work); mutex_lock(&bus->device_lock); - ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); + if (mei_cl_is_connected(cldev->cl)) + ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); + else + ret = -ENODEV; mutex_unlock(&bus->device_lock); if (ret && ret != -EBUSY) return ret; -- cgit v1.2.3 From c2192bbc3c507b33dda5858049e0493c073d29fb Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Thu, 29 Oct 2020 11:54:44 +0200 Subject: mei: bus: deinitialize callback functions on init failure If the initialization procedure for receive or receive callback registration on the client bus has failed the caller can't re-run it. Deinitilize the callback pointers and cancel the work to allow the caller to retry. Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20201029095444.957924-4-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 1a54bf3ed0c3..76aa0e93748a 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -370,8 +370,11 @@ int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb) else ret = -ENODEV; mutex_unlock(&bus->device_lock); - if (ret && ret != -EBUSY) + if (ret && ret != -EBUSY) { + cancel_work_sync(&cldev->rx_work); + cldev->rx_cb = NULL; return ret; + } return 0; } @@ -405,8 +408,11 @@ int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, mutex_lock(&bus->device_lock); ret = mei_cl_notify_request(cldev->cl, NULL, 1); mutex_unlock(&bus->device_lock); - if (ret) + if (ret) { + cancel_work_sync(&cldev->notif_work); + cldev->notif_cb = NULL; return ret; + } return 0; } -- cgit v1.2.3 From 061e5379f327c77647b6a16beaf898b1ee71d731 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 19 Oct 2020 12:46:28 -0700 Subject: char: lp: remove unneeded break A break is not needed if it is preceded by a return Signed-off-by: Tom Rix Link: https://lore.kernel.org/r/20201019194628.14731-1-trix@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/char/lp.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 0ec73917d8dd..862c2fd933c7 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -622,7 +622,6 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd, break; case LPSETIRQ: return -EINVAL; - break; case LPGETIRQ: if (copy_to_user(argp, &LP_IRQ(minor), sizeof(int))) -- cgit v1.2.3 From b61fe3b5963db935dad5bae8f9ced3bed695bb62 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 19 Oct 2020 12:45:03 -0700 Subject: char: mwave: remove unneeded break A break is not needed if it is preceded by a return Signed-off-by: Tom Rix Link: https://lore.kernel.org/r/20201019194503.14426-1-trix@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/char/mwave/mwavedd.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c index e43c876a9223..11272d605ecd 100644 --- a/drivers/char/mwave/mwavedd.c +++ b/drivers/char/mwave/mwavedd.c @@ -403,7 +403,6 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, default: return -ENOTTY; - break; } /* switch */ PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval); -- cgit v1.2.3 From 6a80467a21ca4268f1d97daa1f43b66b86fe3887 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 19 Oct 2020 09:51:12 -0700 Subject: vme: remove unneeded break A break is not needed if it is preceded by a return or goto Signed-off-by: Tom Rix Link: https://lore.kernel.org/r/20201019165112.29091-1-trix@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/vme/bridges/vme_tsi148.c | 7 ------- drivers/vme/vme.c | 9 --------- 2 files changed, 16 deletions(-) (limited to 'drivers') diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c index 50ae26977a02..1227ea937059 100644 --- a/drivers/vme/bridges/vme_tsi148.c +++ b/drivers/vme/bridges/vme_tsi148.c @@ -506,7 +506,6 @@ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled, default: dev_err(tsi148_bridge->parent, "Invalid address space\n"); return -EINVAL; - break; } /* Convert 64-bit variables to 2x 32-bit variables */ @@ -995,7 +994,6 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled, dev_err(tsi148_bridge->parent, "Invalid address space\n"); retval = -EINVAL; goto err_aspace; - break; } temp_ctl &= ~(3<<4); @@ -1503,7 +1501,6 @@ static int tsi148_dma_set_vme_src_attributes(struct device *dev, __be32 *attr, default: dev_err(dev, "Invalid address space\n"); return -EINVAL; - break; } if (cycle & VME_SUPER) @@ -1603,7 +1600,6 @@ static int tsi148_dma_set_vme_dest_attributes(struct device *dev, __be32 *attr, default: dev_err(dev, "Invalid address space\n"); return -EINVAL; - break; } if (cycle & VME_SUPER) @@ -1701,7 +1697,6 @@ static int tsi148_dma_list_add(struct vme_dma_list *list, dev_err(tsi148_bridge->parent, "Invalid source type\n"); retval = -EINVAL; goto err_source; - break; } /* Assume last link - this will be over-written by adding another */ @@ -1738,7 +1733,6 @@ static int tsi148_dma_list_add(struct vme_dma_list *list, dev_err(tsi148_bridge->parent, "Invalid destination type\n"); retval = -EINVAL; goto err_dest; - break; } /* Fill out count */ @@ -1964,7 +1958,6 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base, mutex_unlock(&lm->mtx); dev_err(tsi148_bridge->parent, "Invalid address space\n"); return -EINVAL; - break; } if (cycle & VME_SUPER) diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c index b398293980b6..e1a940e43327 100644 --- a/drivers/vme/vme.c +++ b/drivers/vme/vme.c @@ -52,23 +52,18 @@ static struct vme_bridge *find_bridge(struct vme_resource *resource) case VME_MASTER: return list_entry(resource->entry, struct vme_master_resource, list)->parent; - break; case VME_SLAVE: return list_entry(resource->entry, struct vme_slave_resource, list)->parent; - break; case VME_DMA: return list_entry(resource->entry, struct vme_dma_resource, list)->parent; - break; case VME_LM: return list_entry(resource->entry, struct vme_lm_resource, list)->parent; - break; default: printk(KERN_ERR "Unknown resource type\n"); return NULL; - break; } } @@ -179,7 +174,6 @@ size_t vme_get_size(struct vme_resource *resource) return 0; return size; - break; case VME_SLAVE: retval = vme_slave_get(resource, &enabled, &base, &size, &buf_base, &aspace, &cycle); @@ -187,14 +181,11 @@ size_t vme_get_size(struct vme_resource *resource) return 0; return size; - break; case VME_DMA: return 0; - break; default: printk(KERN_ERR "Unknown resource type\n"); return 0; - break; } } EXPORT_SYMBOL(vme_get_size); -- cgit v1.2.3 From 552c08a8e03f09ee7f44950d0f6d9bd4599ff1f6 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 19 Oct 2020 12:32:27 -0700 Subject: ipack: iopctal: remove unneeded break A break is not needed if it is preceded by a return Acked-by: Samuel Iglesias Gonsalvez Signed-off-by: Tom Rix Link: https://lore.kernel.org/r/20201019193227.12738-1-trix@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/ipack/devices/ipoctal.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c index d480a514c983..3940714e4397 100644 --- a/drivers/ipack/devices/ipoctal.c +++ b/drivers/ipack/devices/ipoctal.c @@ -544,7 +544,6 @@ static void ipoctal_set_termios(struct tty_struct *tty, break; default: return; - break; } baud = tty_get_baud_rate(tty); -- cgit v1.2.3 From a79db45fa54e2420c9fe977a6a5835c466888b5a Mon Sep 17 00:00:00 2001 From: Samuel Thibault Date: Sun, 1 Nov 2020 19:39:13 +0100 Subject: speakup_dummy: log about characters received by the dummy driver The dummy speakup driver is used to check that speakup is working properly, without the need for actually owning the hardware. Some drivers require receiving characters, so we need a way to check that this is working properly. Signed-off-by: Samuel Thibault Link: https://lore.kernel.org/r/20201101183913.yg35cbqpn2ba6cew@function Signed-off-by: Greg Kroah-Hartman --- drivers/accessibility/speakup/speakup_dummy.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/accessibility/speakup/speakup_dummy.c b/drivers/accessibility/speakup/speakup_dummy.c index e393438af81b..63c2f2943282 100644 --- a/drivers/accessibility/speakup/speakup_dummy.c +++ b/drivers/accessibility/speakup/speakup_dummy.c @@ -80,6 +80,11 @@ static struct attribute *synth_attrs[] = { NULL, /* need to NULL terminate the list of attributes */ }; +static void read_buff_add(u_char c) +{ + pr_info("speakup_dummy: got character %02x\n", c); +} + static struct spk_synth synth_dummy = { .name = "dummy", .version = DRV_VERSION, @@ -103,7 +108,7 @@ static struct spk_synth synth_dummy = { .flush = spk_synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, - .read_buff_add = NULL, + .read_buff_add = read_buff_add, .get_index = NULL, .indexing = { .command = NULL, -- cgit v1.2.3 From 508155944752f6e51dbc0a7273d933e954e4ed64 Mon Sep 17 00:00:00 2001 From: Samuel Thibault Date: Mon, 2 Nov 2020 10:59:45 +0100 Subject: speakup: document the usage of enum values The cursor tracking modes enum, the edge enum, and the read_all commands enum should be used as such in the source code, to make it more readable and make gcc catch missing values in switches. Also, some values of enums are coupled with others, we at least need to document these. Signed-off-by: Samuel Thibault Link: https://lore.kernel.org/r/20201102095945.ap4pdff2dn47hijh@function Signed-off-by: Greg Kroah-Hartman --- drivers/accessibility/speakup/i18n.h | 6 ++++ drivers/accessibility/speakup/main.c | 64 +++++++++++++++++++----------------- 2 files changed, 39 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/accessibility/speakup/i18n.h b/drivers/accessibility/speakup/i18n.h index 2ec6e659d02b..2a607d263234 100644 --- a/drivers/accessibility/speakup/i18n.h +++ b/drivers/accessibility/speakup/i18n.h @@ -23,12 +23,15 @@ enum msg_index_t { MSG_OFF = MSG_STATUS_START, MSG_ON, MSG_NO_WINDOW, + + /* These must be ordered the same as enum cursor_track */ MSG_CURSOR_MSGS_START, MSG_CURSORING_OFF = MSG_CURSOR_MSGS_START, MSG_CURSORING_ON, MSG_HIGHLIGHT_TRACKING, MSG_READ_WINDOW, MSG_READ_ALL, + MSG_EDIT_DONE, MSG_WINDOW_ALREADY_SET, MSG_END_BEFORE_START, @@ -41,11 +44,14 @@ enum msg_index_t { MSG_LEAVING_HELP, MSG_IS_UNASSIGNED, MSG_HELP_INFO, + + /* These must be ordered the same as enum edge */ MSG_EDGE_MSGS_START, MSG_EDGE_TOP = MSG_EDGE_MSGS_START, MSG_EDGE_BOTTOM, MSG_EDGE_LEFT, MSG_EDGE_RIGHT, + MSG_NUMBER, MSG_SPACE, MSG_START, /* A little confusing, given our convention. */ diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c index be79b2135fac..b9b51bc542df 100644 --- a/drivers/accessibility/speakup/main.c +++ b/drivers/accessibility/speakup/main.c @@ -90,19 +90,18 @@ const u_char spk_key_defaults[] = { #include "speakupmap.h" }; -/* Speakup Cursor Track Variables */ -static int cursor_track = 1, prev_cursor_track = 1; - -/* cursor track modes, must be ordered same as cursor_msgs */ -enum { +/* cursor track modes, must be ordered same as cursor_msgs in enum msg_index_t */ +enum cursor_track { CT_Off = 0, CT_On, CT_Highlight, CT_Window, - CT_Max + CT_Max, + read_all_mode = CT_Max, }; -#define read_all_mode CT_Max +/* Speakup Cursor Track Variables */ +static enum cursor_track cursor_track = 1, prev_cursor_track = 1; static struct tty_struct *tty; @@ -405,15 +404,17 @@ static void say_attributes(struct vc_data *vc) synth_printf("%s\n", spk_msg_get(MSG_COLORS_START + bg)); } -enum { - edge_top = 1, +/* must be ordered same as edge_msgs in enum msg_index_t */ +enum edge { + edge_none = 0, + edge_top, edge_bottom, edge_left, edge_right, edge_quiet }; -static void announce_edge(struct vc_data *vc, int msg_id) +static void announce_edge(struct vc_data *vc, enum edge msg_id) { if (spk_bleeps & 1) bleep(spk_y); @@ -608,7 +609,8 @@ static void say_prev_word(struct vc_data *vc) { u_char temp; u16 ch; - u_short edge_said = 0, last_state = 0, state = 0; + enum edge edge_said = edge_none; + u_short last_state = 0, state = 0; spk_parked |= 0x01; @@ -653,7 +655,7 @@ static void say_prev_word(struct vc_data *vc) } if (spk_x == 0 && edge_said == edge_quiet) edge_said = edge_left; - if (edge_said > 0 && edge_said < edge_quiet) + if (edge_said > edge_none && edge_said < edge_quiet) announce_edge(vc, edge_said); say_word(vc); } @@ -662,7 +664,8 @@ static void say_next_word(struct vc_data *vc) { u_char temp; u16 ch; - u_short edge_said = 0, last_state = 2, state = 0; + enum edge edge_said = edge_none; + u_short last_state = 2, state = 0; spk_parked |= 0x01; if (spk_x == vc->vc_cols - 1 && spk_y == vc->vc_rows - 1) { @@ -694,7 +697,7 @@ static void say_next_word(struct vc_data *vc) spk_pos += 2; last_state = state; } - if (edge_said > 0) + if (edge_said > edge_none) announce_edge(vc, edge_said); say_word(vc); } @@ -1366,31 +1369,30 @@ static void speakup_deallocate(struct vc_data *vc) speakup_console[vc_num] = NULL; } +enum read_all_command { + RA_NEXT_SENT = KVAL(K_DOWN)+1, + RA_PREV_LINE = KVAL(K_LEFT)+1, + RA_NEXT_LINE = KVAL(K_RIGHT)+1, + RA_PREV_SENT = KVAL(K_UP)+1, + RA_DOWN_ARROW, + RA_TIMER, + RA_FIND_NEXT_SENT, + RA_FIND_PREV_SENT, +}; + static u_char is_cursor; static u_long old_cursor_pos, old_cursor_x, old_cursor_y; static int cursor_con; static void reset_highlight_buffers(struct vc_data *); -static int read_all_key; +static enum read_all_command read_all_key; static int in_keyboard_notifier; -static void start_read_all_timer(struct vc_data *vc, int command); - -enum { - RA_NOTHING, - RA_NEXT_SENT, - RA_PREV_LINE, - RA_NEXT_LINE, - RA_PREV_SENT, - RA_DOWN_ARROW, - RA_TIMER, - RA_FIND_NEXT_SENT, - RA_FIND_PREV_SENT, -}; +static void start_read_all_timer(struct vc_data *vc, enum read_all_command command); -static void kbd_fakekey2(struct vc_data *vc, int command) +static void kbd_fakekey2(struct vc_data *vc, enum read_all_command command) { del_timer(&cursor_timer); speakup_fake_down_arrow(); @@ -1427,7 +1429,7 @@ static void stop_read_all(struct vc_data *vc) spk_do_flush(); } -static void start_read_all_timer(struct vc_data *vc, int command) +static void start_read_all_timer(struct vc_data *vc, enum read_all_command command) { struct var_t *cursor_timeout; @@ -1438,7 +1440,7 @@ static void start_read_all_timer(struct vc_data *vc, int command) jiffies + msecs_to_jiffies(cursor_timeout->u.n.value)); } -static void handle_cursor_read_all(struct vc_data *vc, int command) +static void handle_cursor_read_all(struct vc_data *vc, enum read_all_command command) { int indcount, sentcount, rv, sn; -- cgit v1.2.3 From 7bbd2584ead19a8fdd72c19b1fdf25369954aee6 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 23 Oct 2020 18:33:03 +0200 Subject: mei: bus: fix a kernel-doc markup A function has a different name between their prototype and its kernel-doc markup. Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/56b4cc6308fe6b6133b95c7d9116ed6fbaaabe9f.1603469755.git.mchehab+huawei@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 76aa0e93748a..7fe48baa103a 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -1047,7 +1047,7 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus, } /** - * mei_cl_dev_setup - setup me client device + * mei_cl_bus_dev_setup - setup me client device * run fix up routines and set the device name * * @bus: mei device -- cgit v1.2.3 From 9f38abefd37af8726d59706b9b84530630b6b620 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 23 Oct 2020 18:33:17 +0200 Subject: uio: fix some kernel-doc markups The definitions for (devm_)uio_register_device should be at the header file, as the macros are there. The ones inside uio.c refer, instead, to __(devm_)uio_register_device. Update them and add new kernel-doc markups for the macros. Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/82ab7b68d271aeda7396e369ff8a629491b9d628.1603469755.git.mchehab+huawei@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio.c | 4 ++-- include/linux/uio_driver.h | 16 ++++++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 6dca744e39e9..01b349c1b923 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -906,7 +906,7 @@ static void uio_device_release(struct device *dev) } /** - * uio_register_device - register a new userspace IO device + * __uio_register_device - register a new userspace IO device * @owner: module that creates the new device * @parent: parent device * @info: UIO device capabilities @@ -1002,7 +1002,7 @@ static void devm_uio_unregister_device(struct device *dev, void *res) } /** - * devm_uio_register_device - Resource managed uio_register_device() + * __devm_uio_register_device - Resource managed uio_register_device() * @owner: module that creates the new device * @parent: parent device * @info: UIO device capabilities diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 54bf6b118401..47c5962b876b 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -117,6 +117,14 @@ extern int __must_check struct uio_info *info); /* use a define to avoid include chaining to get THIS_MODULE */ + +/** + * uio_register_device - register a new userspace IO device + * @parent: parent device + * @info: UIO device capabilities + * + * returns zero on success or a negative error code. + */ #define uio_register_device(parent, info) \ __uio_register_device(THIS_MODULE, parent, info) @@ -129,6 +137,14 @@ extern int __must_check struct uio_info *info); /* use a define to avoid include chaining to get THIS_MODULE */ + +/** + * devm_uio_register_device - Resource managed uio_register_device() + * @parent: parent device + * @info: UIO device capabilities + * + * returns zero on success or a negative error code. + */ #define devm_uio_register_device(parent, info) \ __devm_uio_register_device(THIS_MODULE, parent, info) -- cgit v1.2.3 From 8bd160690a6c064fd4459d006e50ba7cc7a7669c Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 23 Oct 2020 18:33:20 +0200 Subject: vme: fix two kernel-doc markups Some identifiers have different names between their prototypes and the kernel-doc markup. Signed-off-by: Mauro Carvalho Chehab Link: https://lore.kernel.org/r/51bfb6724975302f66d64e4ad71de451a7cbbf99.1603469755.git.mchehab+huawei@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/vme/vme.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c index e1a940e43327..54d7963c1078 100644 --- a/drivers/vme/vme.c +++ b/drivers/vme/vme.c @@ -68,7 +68,7 @@ static struct vme_bridge *find_bridge(struct vme_resource *resource) } /** - * vme_free_consistent - Allocate contiguous memory. + * vme_alloc_consistent - Allocate contiguous memory. * @resource: Pointer to VME resource. * @size: Size of allocation required. * @dma: Pointer to variable to store physical address of allocation. @@ -638,7 +638,7 @@ int vme_master_get(struct vme_resource *resource, int *enabled, EXPORT_SYMBOL(vme_master_get); /** - * vme_master_write - Read data from VME space into a buffer. + * vme_master_read - Read data from VME space into a buffer. * @resource: Pointer to VME master resource. * @buf: Pointer to buffer where data should be transferred. * @count: Number of bytes to transfer. -- cgit v1.2.3 From a67c43ac37f80b5e7e1876e8f5f7df62027445c8 Mon Sep 17 00:00:00 2001 From: Kai Ye Date: Mon, 2 Nov 2020 17:39:24 +0800 Subject: uacce: delete some redundant code. Delete some redundant code. Reviewed-by: Zhou Wang Reviewed-by: Jonathan Cameron Acked-by: Zhangfei Gao Signed-off-by: Kai Ye Link: https://lore.kernel.org/r/1604309965-21752-2-git-send-email-yekai13@huawei.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/uacce/uacce.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index 56dd98ab5a81..eb827f18c031 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -231,17 +231,6 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma) switch (type) { case UACCE_QFRT_MMIO: - if (!uacce->ops->mmap) { - ret = -EINVAL; - goto out_with_lock; - } - - ret = uacce->ops->mmap(q, vma, qfr); - if (ret) - goto out_with_lock; - - break; - case UACCE_QFRT_DUS: if (!uacce->ops->mmap) { ret = -EINVAL; -- cgit v1.2.3 From 385997dc17ab4927bad332c1283dc461a973ab1d Mon Sep 17 00:00:00 2001 From: Kai Ye Date: Mon, 2 Nov 2020 17:39:25 +0800 Subject: uacce: modify the module author information. The spelling of "Hisilicon" is modified. Reviewed-by: Zhou Wang Reviewed-by: Jonathan Cameron Acked-by: Zhangfei Gao Signed-off-by: Kai Ye Link: https://lore.kernel.org/r/1604309965-21752-3-git-send-email-yekai13@huawei.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/uacce/uacce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index eb827f18c031..d07af4edfcac 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -522,5 +522,5 @@ subsys_initcall(uacce_init); module_exit(uacce_exit); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Hisilicon Tech. Co., Ltd."); +MODULE_AUTHOR("HiSilicon Tech. Co., Ltd."); MODULE_DESCRIPTION("Accelerator interface for Userland applications"); -- cgit v1.2.3 From 6c20032c22d9823f35cd673b7f69ce73a49582b0 Mon Sep 17 00:00:00 2001 From: Andrew Bridges Date: Tue, 27 Oct 2020 22:56:55 +0000 Subject: Android: binder: added a missing blank line after declaration Fixed a coding style issue. Signed-off-by: Andrew Bridges Link: https://lore.kernel.org/r/20201027225655.650922-1-andrew@slova.app Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/android/binder.c b/drivers/android/binder.c index b5117576792b..3241d233a12d 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3614,6 +3614,7 @@ static int binder_thread_write(struct binder_proc *proc, ret = -1; if (increment && !target) { struct binder_node *ctx_mgr_node; + mutex_lock(&context->context_mgr_node_lock); ctx_mgr_node = context->binder_context_mgr_node; if (ctx_mgr_node) { -- cgit v1.2.3 From 88f6c77927e4aee04e0193fd94e13a55753a72b0 Mon Sep 17 00:00:00 2001 From: Zhang Qilong Date: Mon, 26 Oct 2020 19:03:14 +0800 Subject: binder: change error code from postive to negative in binder_transaction Depending on the context, the error return value here (extra_buffers_size < added_size) should be negative. Acked-by: Martijn Coenen Acked-by: Christian Brauner Signed-off-by: Zhang Qilong Link: https://lore.kernel.org/r/20201026110314.135481-1-zhangqilong3@huawei.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 3241d233a12d..cf70cb91e9f6 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3103,7 +3103,7 @@ static void binder_transaction(struct binder_proc *proc, if (extra_buffers_size < added_size) { /* integer overflow of extra_buffers_size */ return_error = BR_FAILED_REPLY; - return_error_param = EINVAL; + return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_extra_size; } -- cgit v1.2.3 From 17adb469bf1ef3c62e9356ab84449df6cad28ed5 Mon Sep 17 00:00:00 2001 From: Furquan Shaikh Date: Thu, 22 Oct 2020 00:15:50 -0700 Subject: firmware: gsmi: Drop the use of dma_pool_* API functions GSMI driver uses dma_pool_* API functions for buffer allocation because it requires that the SMI buffers are allocated within 32-bit physical address space. However, this does not work well with IOMMU since there is no real device and hence no domain associated with the device. Since this is not a real device, it does not require any device address(IOVA) for the buffer allocations. The only requirement is to ensure that the physical address allocated to the buffer is within 32-bit physical address space. This is because the buffers have nothing to do with DMA at all. It is required for communication with firmware executing in SMI mode which has access only to the bottom 4GiB of memory. Hence, this change switches to using a SLAB cache created with SLAB_CACHE_DMA32 that guarantees that the allocation happens from the DMA32 memory zone. All calls to dma_pool_* are replaced with kmem_cache_*. In addition to that, all the code for managing the dma_pool for GSMI platform device is dropped. Signed-off-by: Furquan Shaikh Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20201022071550.1192947-1-furquan@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/google/gsmi.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index 7d9367b22010..3d77f26c1e8c 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -85,7 +84,6 @@ struct gsmi_buf { u8 *start; /* start of buffer */ size_t length; /* length of buffer */ - dma_addr_t handle; /* dma allocation handle */ u32 address; /* physical address of buffer */ }; @@ -97,7 +95,7 @@ static struct gsmi_device { spinlock_t lock; /* serialize access to SMIs */ u16 smi_cmd; /* SMI command port */ int handshake_type; /* firmware handler interlock type */ - struct dma_pool *dma_pool; /* DMA buffer pool */ + struct kmem_cache *mem_pool; /* kmem cache for gsmi_buf allocations */ } gsmi_dev; /* Packed structures for communicating with the firmware */ @@ -157,8 +155,7 @@ static struct gsmi_buf *gsmi_buf_alloc(void) } /* allocate buffer in 32bit address space */ - smibuf->start = dma_pool_alloc(gsmi_dev.dma_pool, GFP_KERNEL, - &smibuf->handle); + smibuf->start = kmem_cache_alloc(gsmi_dev.mem_pool, GFP_KERNEL); if (!smibuf->start) { printk(KERN_ERR "gsmi: failed to allocate name buffer\n"); kfree(smibuf); @@ -176,8 +173,7 @@ static void gsmi_buf_free(struct gsmi_buf *smibuf) { if (smibuf) { if (smibuf->start) - dma_pool_free(gsmi_dev.dma_pool, smibuf->start, - smibuf->handle); + kmem_cache_free(gsmi_dev.mem_pool, smibuf->start); kfree(smibuf); } } @@ -914,9 +910,20 @@ static __init int gsmi_init(void) spin_lock_init(&gsmi_dev.lock); ret = -ENOMEM; - gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev, - GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0); - if (!gsmi_dev.dma_pool) + + /* + * SLAB cache is created using SLAB_CACHE_DMA32 to ensure that the + * allocations for gsmi_buf come from the DMA32 memory zone. These + * buffers have nothing to do with DMA. They are required for + * communication with firmware executing in SMI mode which can only + * access the bottom 4GiB of physical memory. Since DMA32 memory zone + * guarantees allocation under the 4GiB boundary, this driver creates + * a SLAB cache with SLAB_CACHE_DMA32 flag. + */ + gsmi_dev.mem_pool = kmem_cache_create("gsmi", GSMI_BUF_SIZE, + GSMI_BUF_ALIGN, + SLAB_CACHE_DMA32, NULL); + if (!gsmi_dev.mem_pool) goto out_err; /* @@ -1032,7 +1039,7 @@ out_err: gsmi_buf_free(gsmi_dev.param_buf); gsmi_buf_free(gsmi_dev.data_buf); gsmi_buf_free(gsmi_dev.name_buf); - dma_pool_destroy(gsmi_dev.dma_pool); + kmem_cache_destroy(gsmi_dev.mem_pool); platform_device_unregister(gsmi_dev.pdev); pr_info("gsmi: failed to load: %d\n", ret); #ifdef CONFIG_PM @@ -1057,7 +1064,7 @@ static void __exit gsmi_exit(void) gsmi_buf_free(gsmi_dev.param_buf); gsmi_buf_free(gsmi_dev.data_buf); gsmi_buf_free(gsmi_dev.name_buf); - dma_pool_destroy(gsmi_dev.dma_pool); + kmem_cache_destroy(gsmi_dev.mem_pool); platform_device_unregister(gsmi_dev.pdev); #ifdef CONFIG_PM platform_driver_unregister(&gsmi_driver_info); -- cgit v1.2.3 From 421518a2740fc95ab3a47109228bf230dee4040b Mon Sep 17 00:00:00 2001 From: "Frankie.Chang" Date: Wed, 11 Nov 2020 11:02:42 +0800 Subject: binder: move structs from core file to header file Moving all structs to header file makes module more extendable, and makes all these structs to be defined in the same file. Signed-off-by: Frankie.Chang Acked-by: Todd Kjos Link: https://lore.kernel.org/r/1605063764-12930-2-git-send-email-Frankie.Chang@mediatek.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 406 -------------------------------------- drivers/android/binder_internal.h | 406 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 406 insertions(+), 406 deletions(-) (limited to 'drivers') diff --git a/drivers/android/binder.c b/drivers/android/binder.c index cf70cb91e9f6..abf47734f185 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -68,11 +68,9 @@ #include #include -#include #include -#include "binder_alloc.h" #include "binder_internal.h" #include "binder_trace.h" @@ -160,24 +158,6 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error, #define to_binder_fd_array_object(hdr) \ container_of(hdr, struct binder_fd_array_object, hdr) -enum binder_stat_types { - BINDER_STAT_PROC, - BINDER_STAT_THREAD, - BINDER_STAT_NODE, - BINDER_STAT_REF, - BINDER_STAT_DEATH, - BINDER_STAT_TRANSACTION, - BINDER_STAT_TRANSACTION_COMPLETE, - BINDER_STAT_COUNT -}; - -struct binder_stats { - atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; - atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; - atomic_t obj_created[BINDER_STAT_COUNT]; - atomic_t obj_deleted[BINDER_STAT_COUNT]; -}; - static struct binder_stats binder_stats; static inline void binder_stats_deleted(enum binder_stat_types type) @@ -213,278 +193,11 @@ static struct binder_transaction_log_entry *binder_transaction_log_add( return e; } -/** - * struct binder_work - work enqueued on a worklist - * @entry: node enqueued on list - * @type: type of work to be performed - * - * There are separate work lists for proc, thread, and node (async). - */ -struct binder_work { - struct list_head entry; - - enum binder_work_type { - BINDER_WORK_TRANSACTION = 1, - BINDER_WORK_TRANSACTION_COMPLETE, - BINDER_WORK_RETURN_ERROR, - BINDER_WORK_NODE, - BINDER_WORK_DEAD_BINDER, - BINDER_WORK_DEAD_BINDER_AND_CLEAR, - BINDER_WORK_CLEAR_DEATH_NOTIFICATION, - } type; -}; - -struct binder_error { - struct binder_work work; - uint32_t cmd; -}; - -/** - * struct binder_node - binder node bookkeeping - * @debug_id: unique ID for debugging - * (invariant after initialized) - * @lock: lock for node fields - * @work: worklist element for node work - * (protected by @proc->inner_lock) - * @rb_node: element for proc->nodes tree - * (protected by @proc->inner_lock) - * @dead_node: element for binder_dead_nodes list - * (protected by binder_dead_nodes_lock) - * @proc: binder_proc that owns this node - * (invariant after initialized) - * @refs: list of references on this node - * (protected by @lock) - * @internal_strong_refs: used to take strong references when - * initiating a transaction - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @local_weak_refs: weak user refs from local process - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @local_strong_refs: strong user refs from local process - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @tmp_refs: temporary kernel refs - * (protected by @proc->inner_lock while @proc - * is valid, and by binder_dead_nodes_lock - * if @proc is NULL. During inc/dec and node release - * it is also protected by @lock to provide safety - * as the node dies and @proc becomes NULL) - * @ptr: userspace pointer for node - * (invariant, no lock needed) - * @cookie: userspace cookie for node - * (invariant, no lock needed) - * @has_strong_ref: userspace notified of strong ref - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @pending_strong_ref: userspace has acked notification of strong ref - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @has_weak_ref: userspace notified of weak ref - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @pending_weak_ref: userspace has acked notification of weak ref - * (protected by @proc->inner_lock if @proc - * and by @lock) - * @has_async_transaction: async transaction to node in progress - * (protected by @lock) - * @accept_fds: file descriptor operations supported for node - * (invariant after initialized) - * @min_priority: minimum scheduling priority - * (invariant after initialized) - * @txn_security_ctx: require sender's security context - * (invariant after initialized) - * @async_todo: list of async work items - * (protected by @proc->inner_lock) - * - * Bookkeeping structure for binder nodes. - */ -struct binder_node { - int debug_id; - spinlock_t lock; - struct binder_work work; - union { - struct rb_node rb_node; - struct hlist_node dead_node; - }; - struct binder_proc *proc; - struct hlist_head refs; - int internal_strong_refs; - int local_weak_refs; - int local_strong_refs; - int tmp_refs; - binder_uintptr_t ptr; - binder_uintptr_t cookie; - struct { - /* - * bitfield elements protected by - * proc inner_lock - */ - u8 has_strong_ref:1; - u8 pending_strong_ref:1; - u8 has_weak_ref:1; - u8 pending_weak_ref:1; - }; - struct { - /* - * invariant after initialization - */ - u8 accept_fds:1; - u8 txn_security_ctx:1; - u8 min_priority; - }; - bool has_async_transaction; - struct list_head async_todo; -}; - -struct binder_ref_death { - /** - * @work: worklist element for death notifications - * (protected by inner_lock of the proc that - * this ref belongs to) - */ - struct binder_work work; - binder_uintptr_t cookie; -}; - -/** - * struct binder_ref_data - binder_ref counts and id - * @debug_id: unique ID for the ref - * @desc: unique userspace handle for ref - * @strong: strong ref count (debugging only if not locked) - * @weak: weak ref count (debugging only if not locked) - * - * Structure to hold ref count and ref id information. Since - * the actual ref can only be accessed with a lock, this structure - * is used to return information about the ref to callers of - * ref inc/dec functions. - */ -struct binder_ref_data { - int debug_id; - uint32_t desc; - int strong; - int weak; -}; - -/** - * struct binder_ref - struct to track references on nodes - * @data: binder_ref_data containing id, handle, and current refcounts - * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree - * @rb_node_node: node for lookup by @node in proc's rb_tree - * @node_entry: list entry for node->refs list in target node - * (protected by @node->lock) - * @proc: binder_proc containing ref - * @node: binder_node of target node. When cleaning up a - * ref for deletion in binder_cleanup_ref, a non-NULL - * @node indicates the node must be freed - * @death: pointer to death notification (ref_death) if requested - * (protected by @node->lock) - * - * Structure to track references from procA to target node (on procB). This - * structure is unsafe to access without holding @proc->outer_lock. - */ -struct binder_ref { - /* Lookups needed: */ - /* node + proc => ref (transaction) */ - /* desc + proc => ref (transaction, inc/dec ref) */ - /* node => refs + procs (proc exit) */ - struct binder_ref_data data; - struct rb_node rb_node_desc; - struct rb_node rb_node_node; - struct hlist_node node_entry; - struct binder_proc *proc; - struct binder_node *node; - struct binder_ref_death *death; -}; - enum binder_deferred_state { BINDER_DEFERRED_FLUSH = 0x01, BINDER_DEFERRED_RELEASE = 0x02, }; -/** - * struct binder_proc - binder process bookkeeping - * @proc_node: element for binder_procs list - * @threads: rbtree of binder_threads in this proc - * (protected by @inner_lock) - * @nodes: rbtree of binder nodes associated with - * this proc ordered by node->ptr - * (protected by @inner_lock) - * @refs_by_desc: rbtree of refs ordered by ref->desc - * (protected by @outer_lock) - * @refs_by_node: rbtree of refs ordered by ref->node - * (protected by @outer_lock) - * @waiting_threads: threads currently waiting for proc work - * (protected by @inner_lock) - * @pid PID of group_leader of process - * (invariant after initialized) - * @tsk task_struct for group_leader of process - * (invariant after initialized) - * @deferred_work_node: element for binder_deferred_list - * (protected by binder_deferred_lock) - * @deferred_work: bitmap of deferred work to perform - * (protected by binder_deferred_lock) - * @is_dead: process is dead and awaiting free - * when outstanding transactions are cleaned up - * (protected by @inner_lock) - * @todo: list of work for this process - * (protected by @inner_lock) - * @stats: per-process binder statistics - * (atomics, no lock needed) - * @delivered_death: list of delivered death notification - * (protected by @inner_lock) - * @max_threads: cap on number of binder threads - * (protected by @inner_lock) - * @requested_threads: number of binder threads requested but not - * yet started. In current implementation, can - * only be 0 or 1. - * (protected by @inner_lock) - * @requested_threads_started: number binder threads started - * (protected by @inner_lock) - * @tmp_ref: temporary reference to indicate proc is in use - * (protected by @inner_lock) - * @default_priority: default scheduler priority - * (invariant after initialized) - * @debugfs_entry: debugfs node - * @alloc: binder allocator bookkeeping - * @context: binder_context for this proc - * (invariant after initialized) - * @inner_lock: can nest under outer_lock and/or node lock - * @outer_lock: no nesting under innor or node lock - * Lock order: 1) outer, 2) node, 3) inner - * @binderfs_entry: process-specific binderfs log file - * - * Bookkeeping structure for binder processes - */ -struct binder_proc { - struct hlist_node proc_node; - struct rb_root threads; - struct rb_root nodes; - struct rb_root refs_by_desc; - struct rb_root refs_by_node; - struct list_head waiting_threads; - int pid; - struct task_struct *tsk; - struct hlist_node deferred_work_node; - int deferred_work; - bool is_dead; - - struct list_head todo; - struct binder_stats stats; - struct list_head delivered_death; - int max_threads; - int requested_threads; - int requested_threads_started; - int tmp_ref; - long default_priority; - struct dentry *debugfs_entry; - struct binder_alloc alloc; - struct binder_context *context; - spinlock_t inner_lock; - spinlock_t outer_lock; - struct dentry *binderfs_entry; -}; - enum { BINDER_LOOPER_STATE_REGISTERED = 0x01, BINDER_LOOPER_STATE_ENTERED = 0x02, @@ -494,125 +207,6 @@ enum { BINDER_LOOPER_STATE_POLL = 0x20, }; -/** - * struct binder_thread - binder thread bookkeeping - * @proc: binder process for this thread - * (invariant after initialization) - * @rb_node: element for proc->threads rbtree - * (protected by @proc->inner_lock) - * @waiting_thread_node: element for @proc->waiting_threads list - * (protected by @proc->inner_lock) - * @pid: PID for this thread - * (invariant after initialization) - * @looper: bitmap of looping state - * (only accessed by this thread) - * @looper_needs_return: looping thread needs to exit driver - * (no lock needed) - * @transaction_stack: stack of in-progress transactions for this thread - * (protected by @proc->inner_lock) - * @todo: list of work to do for this thread - * (protected by @proc->inner_lock) - * @process_todo: whether work in @todo should be processed - * (protected by @proc->inner_lock) - * @return_error: transaction errors reported by this thread - * (only accessed by this thread) - * @reply_error: transaction errors reported by target thread - * (protected by @proc->inner_lock) - * @wait: wait queue for thread work - * @stats: per-thread statistics - * (atomics, no lock needed) - * @tmp_ref: temporary reference to indicate thread is in use - * (atomic since @proc->inner_lock cannot - * always be acquired) - * @is_dead: thread is dead and awaiting free - * when outstanding transactions are cleaned up - * (protected by @proc->inner_lock) - * - * Bookkeeping structure for binder threads. - */ -struct binder_thread { - struct binder_proc *proc; - struct rb_node rb_node; - struct list_head waiting_thread_node; - int pid; - int looper; /* only modified by this thread */ - bool looper_need_return; /* can be written by other thread */ - struct binder_transaction *transaction_stack; - struct list_head todo; - bool process_todo; - struct binder_error return_error; - struct binder_error reply_error; - wait_queue_head_t wait; - struct binder_stats stats; - atomic_t tmp_ref; - bool is_dead; -}; - -/** - * struct binder_txn_fd_fixup - transaction fd fixup list element - * @fixup_entry: list entry - * @file: struct file to be associated with new fd - * @offset: offset in buffer data to this fixup - * - * List element for fd fixups in a transaction. Since file - * descriptors need to be allocated in the context of the - * target process, we pass each fd to be processed in this - * struct. - */ -struct binder_txn_fd_fixup { - struct list_head fixup_entry; - struct file *file; - size_t offset; -}; - -struct binder_transaction { - int debug_id; - struct binder_work work; - struct binder_thread *from; - struct binder_transaction *from_parent; - struct binder_proc *to_proc; - struct binder_thread *to_thread; - struct binder_transaction *to_parent; - unsigned need_reply:1; - /* unsigned is_dead:1; */ /* not used at the moment */ - - struct binder_buffer *buffer; - unsigned int code; - unsigned int flags; - long priority; - long saved_priority; - kuid_t sender_euid; - struct list_head fd_fixups; - binder_uintptr_t security_ctx; - /** - * @lock: protects @from, @to_proc, and @to_thread - * - * @from, @to_proc, and @to_thread can be set to NULL - * during thread teardown - */ - spinlock_t lock; -}; - -/** - * struct binder_object - union of flat binder object types - * @hdr: generic object header - * @fbo: binder object (nodes and refs) - * @fdo: file descriptor object - * @bbo: binder buffer pointer - * @fdao: file descriptor array - * - * Used for type-independent object copies - */ -struct binder_object { - union { - struct binder_object_header hdr; - struct flat_binder_object fbo; - struct binder_fd_object fdo; - struct binder_buffer_object bbo; - struct binder_fd_array_object fdao; - }; -}; - /** * binder_proc_lock() - Acquire outer lock for given binder_proc * @proc: struct binder_proc to acquire diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 283d3cb9c16e..6cd79011e35d 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -12,6 +12,8 @@ #include #include #include +#include +#include "binder_alloc.h" struct binder_context { struct binder_node *binder_context_mgr_node; @@ -141,6 +143,410 @@ struct binder_transaction_log { struct binder_transaction_log_entry entry[32]; }; +enum binder_stat_types { + BINDER_STAT_PROC, + BINDER_STAT_THREAD, + BINDER_STAT_NODE, + BINDER_STAT_REF, + BINDER_STAT_DEATH, + BINDER_STAT_TRANSACTION, + BINDER_STAT_TRANSACTION_COMPLETE, + BINDER_STAT_COUNT +}; + +struct binder_stats { + atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; + atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; + atomic_t obj_created[BINDER_STAT_COUNT]; + atomic_t obj_deleted[BINDER_STAT_COUNT]; +}; + +/** + * struct binder_work - work enqueued on a worklist + * @entry: node enqueued on list + * @type: type of work to be performed + * + * There are separate work lists for proc, thread, and node (async). + */ +struct binder_work { + struct list_head entry; + + enum binder_work_type { + BINDER_WORK_TRANSACTION = 1, + BINDER_WORK_TRANSACTION_COMPLETE, + BINDER_WORK_RETURN_ERROR, + BINDER_WORK_NODE, + BINDER_WORK_DEAD_BINDER, + BINDER_WORK_DEAD_BINDER_AND_CLEAR, + BINDER_WORK_CLEAR_DEATH_NOTIFICATION, + } type; +}; + +struct binder_error { + struct binder_work work; + uint32_t cmd; +}; + +/** + * struct binder_node - binder node bookkeeping + * @debug_id: unique ID for debugging + * (invariant after initialized) + * @lock: lock for node fields + * @work: worklist element for node work + * (protected by @proc->inner_lock) + * @rb_node: element for proc->nodes tree + * (protected by @proc->inner_lock) + * @dead_node: element for binder_dead_nodes list + * (protected by binder_dead_nodes_lock) + * @proc: binder_proc that owns this node + * (invariant after initialized) + * @refs: list of references on this node + * (protected by @lock) + * @internal_strong_refs: used to take strong references when + * initiating a transaction + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @local_weak_refs: weak user refs from local process + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @local_strong_refs: strong user refs from local process + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @tmp_refs: temporary kernel refs + * (protected by @proc->inner_lock while @proc + * is valid, and by binder_dead_nodes_lock + * if @proc is NULL. During inc/dec and node release + * it is also protected by @lock to provide safety + * as the node dies and @proc becomes NULL) + * @ptr: userspace pointer for node + * (invariant, no lock needed) + * @cookie: userspace cookie for node + * (invariant, no lock needed) + * @has_strong_ref: userspace notified of strong ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @pending_strong_ref: userspace has acked notification of strong ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @has_weak_ref: userspace notified of weak ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @pending_weak_ref: userspace has acked notification of weak ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @has_async_transaction: async transaction to node in progress + * (protected by @lock) + * @accept_fds: file descriptor operations supported for node + * (invariant after initialized) + * @min_priority: minimum scheduling priority + * (invariant after initialized) + * @txn_security_ctx: require sender's security context + * (invariant after initialized) + * @async_todo: list of async work items + * (protected by @proc->inner_lock) + * + * Bookkeeping structure for binder nodes. + */ +struct binder_node { + int debug_id; + spinlock_t lock; + struct binder_work work; + union { + struct rb_node rb_node; + struct hlist_node dead_node; + }; + struct binder_proc *proc; + struct hlist_head refs; + int internal_strong_refs; + int local_weak_refs; + int local_strong_refs; + int tmp_refs; + binder_uintptr_t ptr; + binder_uintptr_t cookie; + struct { + /* + * bitfield elements protected by + * proc inner_lock + */ + u8 has_strong_ref:1; + u8 pending_strong_ref:1; + u8 has_weak_ref:1; + u8 pending_weak_ref:1; + }; + struct { + /* + * invariant after initialization + */ + u8 accept_fds:1; + u8 txn_security_ctx:1; + u8 min_priority; + }; + bool has_async_transaction; + struct list_head async_todo; +}; + +struct binder_ref_death { + /** + * @work: worklist element for death notifications + * (protected by inner_lock of the proc that + * this ref belongs to) + */ + struct binder_work work; + binder_uintptr_t cookie; +}; + +/** + * struct binder_ref_data - binder_ref counts and id + * @debug_id: unique ID for the ref + * @desc: unique userspace handle for ref + * @strong: strong ref count (debugging only if not locked) + * @weak: weak ref count (debugging only if not locked) + * + * Structure to hold ref count and ref id information. Since + * the actual ref can only be accessed with a lock, this structure + * is used to return information about the ref to callers of + * ref inc/dec functions. + */ +struct binder_ref_data { + int debug_id; + uint32_t desc; + int strong; + int weak; +}; + +/** + * struct binder_ref - struct to track references on nodes + * @data: binder_ref_data containing id, handle, and current refcounts + * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree + * @rb_node_node: node for lookup by @node in proc's rb_tree + * @node_entry: list entry for node->refs list in target node + * (protected by @node->lock) + * @proc: binder_proc containing ref + * @node: binder_node of target node. When cleaning up a + * ref for deletion in binder_cleanup_ref, a non-NULL + * @node indicates the node must be freed + * @death: pointer to death notification (ref_death) if requested + * (protected by @node->lock) + * + * Structure to track references from procA to target node (on procB). This + * structure is unsafe to access without holding @proc->outer_lock. + */ +struct binder_ref { + /* Lookups needed: */ + /* node + proc => ref (transaction) */ + /* desc + proc => ref (transaction, inc/dec ref) */ + /* node => refs + procs (proc exit) */ + struct binder_ref_data data; + struct rb_node rb_node_desc; + struct rb_node rb_node_node; + struct hlist_node node_entry; + struct binder_proc *proc; + struct binder_node *node; + struct binder_ref_death *death; +}; + +/** + * struct binder_proc - binder process bookkeeping + * @proc_node: element for binder_procs list + * @threads: rbtree of binder_threads in this proc + * (protected by @inner_lock) + * @nodes: rbtree of binder nodes associated with + * this proc ordered by node->ptr + * (protected by @inner_lock) + * @refs_by_desc: rbtree of refs ordered by ref->desc + * (protected by @outer_lock) + * @refs_by_node: rbtree of refs ordered by ref->node + * (protected by @outer_lock) + * @waiting_threads: threads currently waiting for proc work + * (protected by @inner_lock) + * @pid PID of group_leader of process + * (invariant after initialized) + * @tsk task_struct for group_leader of process + * (invariant after initialized) + * @deferred_work_node: element for binder_deferred_list + * (protected by binder_deferred_lock) + * @deferred_work: bitmap of deferred work to perform + * (protected by binder_deferred_lock) + * @is_dead: process is dead and awaiting free + * when outstanding transactions are cleaned up + * (protected by @inner_lock) + * @todo: list of work for this process + * (protected by @inner_lock) + * @stats: per-process binder statistics + * (atomics, no lock needed) + * @delivered_death: list of delivered death notification + * (protected by @inner_lock) + * @max_threads: cap on number of binder threads + * (protected by @inner_lock) + * @requested_threads: number of binder threads requested but not + * yet started. In current implementation, can + * only be 0 or 1. + * (protected by @inner_lock) + * @requested_threads_started: number binder threads started + * (protected by @inner_lock) + * @tmp_ref: temporary reference to indicate proc is in use + * (protected by @inner_lock) + * @default_priority: default scheduler priority + * (invariant after initialized) + * @debugfs_entry: debugfs node + * @alloc: binder allocator bookkeeping + * @context: binder_context for this proc + * (invariant after initialized) + * @inner_lock: can nest under outer_lock and/or node lock + * @outer_lock: no nesting under innor or node lock + * Lock order: 1) outer, 2) node, 3) inner + * @binderfs_entry: process-specific binderfs log file + * + * Bookkeeping structure for binder processes + */ +struct binder_proc { + struct hlist_node proc_node; + struct rb_root threads; + struct rb_root nodes; + struct rb_root refs_by_desc; + struct rb_root refs_by_node; + struct list_head waiting_threads; + int pid; + struct task_struct *tsk; + struct hlist_node deferred_work_node; + int deferred_work; + bool is_dead; + + struct list_head todo; + struct binder_stats stats; + struct list_head delivered_death; + int max_threads; + int requested_threads; + int requested_threads_started; + int tmp_ref; + long default_priority; + struct dentry *debugfs_entry; + struct binder_alloc alloc; + struct binder_context *context; + spinlock_t inner_lock; + spinlock_t outer_lock; + struct dentry *binderfs_entry; +}; + +/** + * struct binder_thread - binder thread bookkeeping + * @proc: binder process for this thread + * (invariant after initialization) + * @rb_node: element for proc->threads rbtree + * (protected by @proc->inner_lock) + * @waiting_thread_node: element for @proc->waiting_threads list + * (protected by @proc->inner_lock) + * @pid: PID for this thread + * (invariant after initialization) + * @looper: bitmap of looping state + * (only accessed by this thread) + * @looper_needs_return: looping thread needs to exit driver + * (no lock needed) + * @transaction_stack: stack of in-progress transactions for this thread + * (protected by @proc->inner_lock) + * @todo: list of work to do for this thread + * (protected by @proc->inner_lock) + * @process_todo: whether work in @todo should be processed + * (protected by @proc->inner_lock) + * @return_error: transaction errors reported by this thread + * (only accessed by this thread) + * @reply_error: transaction errors reported by target thread + * (protected by @proc->inner_lock) + * @wait: wait queue for thread work + * @stats: per-thread statistics + * (atomics, no lock needed) + * @tmp_ref: temporary reference to indicate thread is in use + * (atomic since @proc->inner_lock cannot + * always be acquired) + * @is_dead: thread is dead and awaiting free + * when outstanding transactions are cleaned up + * (protected by @proc->inner_lock) + * + * Bookkeeping structure for binder threads. + */ +struct binder_thread { + struct binder_proc *proc; + struct rb_node rb_node; + struct list_head waiting_thread_node; + int pid; + int looper; /* only modified by this thread */ + bool looper_need_return; /* can be written by other thread */ + struct binder_transaction *transaction_stack; + struct list_head todo; + bool process_todo; + struct binder_error return_error; + struct binder_error reply_error; + wait_queue_head_t wait; + struct binder_stats stats; + atomic_t tmp_ref; + bool is_dead; +}; + +/** + * struct binder_txn_fd_fixup - transaction fd fixup list element + * @fixup_entry: list entry + * @file: struct file to be associated with new fd + * @offset: offset in buffer data to this fixup + * + * List element for fd fixups in a transaction. Since file + * descriptors need to be allocated in the context of the + * target process, we pass each fd to be processed in this + * struct. + */ +struct binder_txn_fd_fixup { + struct list_head fixup_entry; + struct file *file; + size_t offset; +}; + +struct binder_transaction { + int debug_id; + struct binder_work work; + struct binder_thread *from; + struct binder_transaction *from_parent; + struct binder_proc *to_proc; + struct binder_thread *to_thread; + struct binder_transaction *to_parent; + unsigned need_reply:1; + /* unsigned is_dead:1; */ /* not used at the moment */ + + struct binder_buffer *buffer; + unsigned int code; + unsigned int flags; + long priority; + long saved_priority; + kuid_t sender_euid; + struct list_head fd_fixups; + binder_uintptr_t security_ctx; + /** + * @lock: protects @from, @to_proc, and @to_thread + * + * @from, @to_proc, and @to_thread can be set to NULL + * during thread teardown + */ + spinlock_t lock; +}; + +/** + * struct binder_object - union of flat binder object types + * @hdr: generic object header + * @fbo: binder object (nodes and refs) + * @fdo: file descriptor object + * @bbo: binder buffer pointer + * @fdao: file descriptor array + * + * Used for type-independent object copies + */ +struct binder_object { + union { + struct binder_object_header hdr; + struct flat_binder_object fbo; + struct binder_fd_object fdo; + struct binder_buffer_object bbo; + struct binder_fd_array_object fdao; + }; +}; + extern struct binder_transaction_log binder_transaction_log; extern struct binder_transaction_log binder_transaction_log_failed; #endif /* _LINUX_BINDER_INTERNAL_H */ -- cgit v1.2.3 From 1987f112f1425cba2671d878f6952087e9456a0a Mon Sep 17 00:00:00 2001 From: "Frankie.Chang" Date: Wed, 11 Nov 2020 11:02:43 +0800 Subject: binder: add trace at free transaction. Since the original trace_binder_transaction_received cannot precisely present the real finished time of transaction, adding a trace_binder_txn_latency_free at the point of free transaction may be more close to it. Signed-off-by: Frankie.Chang Acked-by: Todd Kjos Link: https://lore.kernel.org/r/1605063764-12930-3-git-send-email-Frankie.Chang@mediatek.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 18 ++++++++++++++++++ drivers/android/binder_trace.h | 29 +++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) (limited to 'drivers') diff --git a/drivers/android/binder.c b/drivers/android/binder.c index abf47734f185..20b08f52e788 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -1486,6 +1486,20 @@ static void binder_free_txn_fixups(struct binder_transaction *t) } } +static void binder_txn_latency_free(struct binder_transaction *t) +{ + int from_proc, from_thread, to_proc, to_thread; + + spin_lock(&t->lock); + from_proc = t->from ? t->from->proc->pid : 0; + from_thread = t->from ? t->from->pid : 0; + to_proc = t->to_proc ? t->to_proc->pid : 0; + to_thread = t->to_thread ? t->to_thread->pid : 0; + spin_unlock(&t->lock); + + trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread); +} + static void binder_free_transaction(struct binder_transaction *t) { struct binder_proc *target_proc = t->to_proc; @@ -1496,6 +1510,8 @@ static void binder_free_transaction(struct binder_transaction *t) t->buffer->transaction = NULL; binder_inner_proc_unlock(target_proc); } + if (trace_binder_txn_latency_free_enabled()) + binder_txn_latency_free(t); /* * If the transaction has no target_proc, then * t->buffer->transaction has already been cleared. @@ -3073,6 +3089,8 @@ err_get_secctx_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); err_alloc_tcomplete_failed: + if (trace_binder_txn_latency_free_enabled()) + binder_txn_latency_free(t); kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); err_alloc_t_failed: diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index 6731c3cd8145..8eeccdc64724 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -95,6 +95,35 @@ TRACE_EVENT(binder_wait_for_work, __entry->thread_todo) ); +TRACE_EVENT(binder_txn_latency_free, + TP_PROTO(struct binder_transaction *t, + int from_proc, int from_thread, + int to_proc, int to_thread), + TP_ARGS(t, from_proc, from_thread, to_proc, to_thread), + TP_STRUCT__entry( + __field(int, debug_id) + __field(int, from_proc) + __field(int, from_thread) + __field(int, to_proc) + __field(int, to_thread) + __field(unsigned int, code) + __field(unsigned int, flags) + ), + TP_fast_assign( + __entry->debug_id = t->debug_id; + __entry->from_proc = from_proc; + __entry->from_thread = from_thread; + __entry->to_proc = to_proc; + __entry->to_thread = to_thread; + __entry->code = t->code; + __entry->flags = t->flags; + ), + TP_printk("transaction=%d from %d:%d to %d:%d flags=0x%x code=0x%x", + __entry->debug_id, __entry->from_proc, __entry->from_thread, + __entry->to_proc, __entry->to_thread, __entry->code, + __entry->flags) +); + TRACE_EVENT(binder_transaction, TP_PROTO(bool reply, struct binder_transaction *t, struct binder_node *target_node), -- cgit v1.2.3 From 439e8f6f1e5d1ca973da278499078e213dad63bb Mon Sep 17 00:00:00 2001 From: Ivan Zaentsev Date: Thu, 12 Nov 2020 09:49:31 +0300 Subject: w1: w1_therm: Rename conflicting sysfs attribute 'eeprom' to 'eeprom_cmd' Duplicate attribute 'eeprom' is defined in: 1) Documentation/ABI/testing/sysfs-driver-w1_therm 2) Documentation/ABI/stable/sysfs-driver-w1_ds28e04 Both drivers define an attribute: /sys/bus/w1/devices/.../eeprom with conflicting behavior. Fix by renaming the newer one in w1_therm.c to 'eeprom_cmd'. Reported-by: Mauro Carvalho Chehab Suggested-by: Greg Kroah-Hartman Link: https://lore.kernel.org/lkml/20201029152845.6bbb39ce@coco.lan/ Signed-off-by: Ivan Zaentsev Link: https://lore.kernel.org/r/20201112064931.8471-1-ivan.zaentsev@wirenboard.ru Signed-off-by: Greg Kroah-Hartman --- Documentation/ABI/testing/sysfs-driver-w1_therm | 2 +- Documentation/w1/slaves/w1_therm.rst | 2 +- drivers/w1/slaves/w1_therm.c | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/Documentation/ABI/testing/sysfs-driver-w1_therm b/Documentation/ABI/testing/sysfs-driver-w1_therm index 6a37dc33ffdb..74642c73d29c 100644 --- a/Documentation/ABI/testing/sysfs-driver-w1_therm +++ b/Documentation/ABI/testing/sysfs-driver-w1_therm @@ -14,7 +14,7 @@ Users: any user space application which wants to communicate with w1_term device -What: /sys/bus/w1/devices/.../eeprom +What: /sys/bus/w1/devices/.../eeprom_cmd Date: May 2020 Contact: Akira Shimahara Description: diff --git a/Documentation/w1/slaves/w1_therm.rst b/Documentation/w1/slaves/w1_therm.rst index e39202e2b000..c3c9ed7a356c 100644 --- a/Documentation/w1/slaves/w1_therm.rst +++ b/Documentation/w1/slaves/w1_therm.rst @@ -82,7 +82,7 @@ resolution is read back from the chip and verified. Note: Changing the resolution reverts the conversion time to default. -The write-only sysfs entry ``eeprom`` is an alternative for EEPROM operations. +The write-only sysfs entry ``eeprom_cmd`` is an alternative for EEPROM operations. Write ``save`` to save device RAM to EEPROM. Write ``restore`` to restore EEPROM data in device RAM. diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index cddf60b7309c..3712b1e6dc71 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c @@ -315,7 +315,7 @@ static ssize_t resolution_show(struct device *device, static ssize_t resolution_store(struct device *device, struct device_attribute *attr, const char *buf, size_t size); -static ssize_t eeprom_store(struct device *device, +static ssize_t eeprom_cmd_store(struct device *device, struct device_attribute *attr, const char *buf, size_t size); static ssize_t alarms_store(struct device *device, @@ -350,7 +350,7 @@ static DEVICE_ATTR_RO(w1_seq); static DEVICE_ATTR_RO(temperature); static DEVICE_ATTR_RO(ext_power); static DEVICE_ATTR_RW(resolution); -static DEVICE_ATTR_WO(eeprom); +static DEVICE_ATTR_WO(eeprom_cmd); static DEVICE_ATTR_RW(alarms); static DEVICE_ATTR_RW(conv_time); static DEVICE_ATTR_RW(features); @@ -386,7 +386,7 @@ static struct attribute *w1_therm_attrs[] = { &dev_attr_temperature.attr, &dev_attr_ext_power.attr, &dev_attr_resolution.attr, - &dev_attr_eeprom.attr, + &dev_attr_eeprom_cmd.attr, &dev_attr_alarms.attr, &dev_attr_conv_time.attr, &dev_attr_features.attr, @@ -397,7 +397,7 @@ static struct attribute *w1_ds18s20_attrs[] = { &dev_attr_w1_slave.attr, &dev_attr_temperature.attr, &dev_attr_ext_power.attr, - &dev_attr_eeprom.attr, + &dev_attr_eeprom_cmd.attr, &dev_attr_alarms.attr, &dev_attr_conv_time.attr, &dev_attr_features.attr, @@ -410,7 +410,7 @@ static struct attribute *w1_ds28ea00_attrs[] = { &dev_attr_temperature.attr, &dev_attr_ext_power.attr, &dev_attr_resolution.attr, - &dev_attr_eeprom.attr, + &dev_attr_eeprom_cmd.attr, &dev_attr_alarms.attr, &dev_attr_conv_time.attr, &dev_attr_features.attr, @@ -1740,7 +1740,7 @@ static ssize_t resolution_store(struct device *device, return size; } -static ssize_t eeprom_store(struct device *device, +static ssize_t eeprom_cmd_store(struct device *device, struct device_attribute *attr, const char *buf, size_t size) { struct w1_slave *sl = dev_to_w1_slave(device); -- cgit v1.2.3 From cfd3443e2d32a655871a445b3509e5df96fa7581 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Wed, 11 Nov 2020 13:22:41 +0200 Subject: uio: pruss: move simple allocations to dem_ equivalents This change moves the simple allocations to their device-managed equivalents. This cleans up some error/exit paths. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201111112242.62116-1-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_pruss.c | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c index 1cc175d3c25c..41470c4dba02 100644 --- a/drivers/uio/uio_pruss.c +++ b/drivers/uio/uio_pruss.c @@ -99,7 +99,6 @@ static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev) for (cnt = 0; cnt < MAX_PRUSS_EVT; cnt++, p++) { uio_unregister_device(p); - kfree(p->name); } iounmap(gdev->prussio_vaddr); if (gdev->ddr_vaddr) { @@ -110,10 +109,8 @@ static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev) gen_pool_free(gdev->sram_pool, gdev->sram_vaddr, sram_pool_sz); - kfree(gdev->info); clk_disable(gdev->pruss_clk); clk_put(gdev->pruss_clk); - kfree(gdev); } static int pruss_probe(struct platform_device *pdev) @@ -125,22 +122,19 @@ static int pruss_probe(struct platform_device *pdev) int ret, cnt, i, len; struct uio_pruss_pdata *pdata = dev_get_platdata(dev); - gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL); + gdev = devm_kzalloc(dev, sizeof(struct uio_pruss_dev), GFP_KERNEL); if (!gdev) return -ENOMEM; - gdev->info = kcalloc(MAX_PRUSS_EVT, sizeof(*p), GFP_KERNEL); - if (!gdev->info) { - ret = -ENOMEM; - goto err_free_gdev; - } + gdev->info = devm_kcalloc(dev, MAX_PRUSS_EVT, sizeof(*p), GFP_KERNEL); + if (!gdev->info) + return -ENOMEM; /* Power on PRU in case its not done as part of boot-loader */ gdev->pruss_clk = clk_get(dev, "pruss"); if (IS_ERR(gdev->pruss_clk)) { dev_err(dev, "Failed to get clock\n"); - ret = PTR_ERR(gdev->pruss_clk); - goto err_free_info; + return PTR_ERR(gdev->pruss_clk); } ret = clk_enable(gdev->pruss_clk); @@ -206,7 +200,7 @@ static int pruss_probe(struct platform_device *pdev) p->mem[2].size = extram_pool_sz; p->mem[2].memtype = UIO_MEM_PHYS; - p->name = kasprintf(GFP_KERNEL, "pruss_evt%d", cnt); + p->name = devm_kasprintf(dev, GFP_KERNEL, "pruss_evt%d", cnt); p->version = DRV_VERSION; /* Register PRUSS IRQ lines */ @@ -215,10 +209,8 @@ static int pruss_probe(struct platform_device *pdev) p->priv = gdev; ret = uio_register_device(dev, p); - if (ret < 0) { - kfree(p->name); + if (ret < 0) goto err_unloop; - } } platform_set_drvdata(pdev, gdev); @@ -227,7 +219,6 @@ static int pruss_probe(struct platform_device *pdev) err_unloop: for (i = 0, p = gdev->info; i < cnt; i++, p++) { uio_unregister_device(p); - kfree(p->name); } iounmap(gdev->prussio_vaddr); err_free_ddr_vaddr: @@ -240,10 +231,6 @@ err_clk_disable: clk_disable(gdev->pruss_clk); err_clk_put: clk_put(gdev->pruss_clk); -err_free_info: - kfree(gdev->info); -err_free_gdev: - kfree(gdev); return ret; } -- cgit v1.2.3 From 60aa8782d2bf93379beffc0525e0e7496d578851 Mon Sep 17 00:00:00 2001 From: Wang Qing Date: Sat, 7 Nov 2020 16:19:39 +0800 Subject: firmware: fix spelling typo of 'wtih' wtih -> with Acked-by: Nicolas Saenz Julienne Signed-off-by: Wang Qing Link: https://lore.kernel.org/r/1604737181-14464-1-git-send-email-wangqing@vivo.com Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/raspberrypi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index 2371d08bdd17..30259dc9b805 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Defines interfaces for interacting wtih the Raspberry Pi firmware's + * Defines interfaces for interacting with the Raspberry Pi firmware's * property channel. * * Copyright © 2015 Broadcom -- cgit v1.2.3 From 667aef00f3f32eb10b959cb7f3bbd28e87049a4f Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Sat, 7 Nov 2020 14:33:35 +0100 Subject: eeprom: at25: Add example part numbers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To save the interested reader some time, add examples of AT25 part numbers that correspond to EEPROMs rather than flashes. Signed-off-by: Jonathan Neuschäfer Link: https://lore.kernel.org/r/20201107133337.1066271-1-j.neuschaefer@gmx.net Signed-off-by: Greg Kroah-Hartman --- drivers/misc/eeprom/at25.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 3b7d8b7584f4..b76e4901b4a4 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -22,6 +22,9 @@ * mean that some AT25 products are EEPROMs, and others are FLASH. * Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver, * not this one! + * + * EEPROMs that can be used with this driver include, for example: + * AT25M02, AT25128B */ struct at25_data { -- cgit v1.2.3 From ef84928cff589c6b42e16b3ef7d2d95469128c80 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Wed, 11 Nov 2020 13:38:28 +0200 Subject: uio/uio_pci_generic: use device-managed function equivalents This driver can be easily converted to use the device-managed allocator function and the PCI managed enable function. With these conversions the probe error paths are no longer needed and neither is the remove function. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201111113828.64992-1-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_pci_generic.c | 37 +++++++++---------------------------- 1 file changed, 9 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c index dde5cbb27178..1c6c09e1280d 100644 --- a/drivers/uio/uio_pci_generic.c +++ b/drivers/uio/uio_pci_generic.c @@ -74,23 +74,19 @@ static int probe(struct pci_dev *pdev, struct uio_pci_generic_dev *gdev; int err; - err = pci_enable_device(pdev); + err = pcim_enable_device(pdev); if (err) { dev_err(&pdev->dev, "%s: pci_enable_device failed: %d\n", __func__, err); return err; } - if (pdev->irq && !pci_intx_mask_supported(pdev)) { - err = -ENODEV; - goto err_verify; - } + if (pdev->irq && !pci_intx_mask_supported(pdev)) + return -ENOMEM; - gdev = kzalloc(sizeof(struct uio_pci_generic_dev), GFP_KERNEL); - if (!gdev) { - err = -ENOMEM; - goto err_alloc; - } + gdev = devm_kzalloc(&pdev->dev, sizeof(struct uio_pci_generic_dev), GFP_KERNEL); + if (!gdev) + return -ENOMEM; gdev->info.name = "uio_pci_generic"; gdev->info.version = DRIVER_VERSION; @@ -105,34 +101,19 @@ static int probe(struct pci_dev *pdev, "no support for interrupts?\n"); } - err = uio_register_device(&pdev->dev, &gdev->info); + err = devm_uio_register_device(&pdev->dev, &gdev->info); if (err) - goto err_register; + return err; + pci_set_drvdata(pdev, gdev); return 0; -err_register: - kfree(gdev); -err_alloc: -err_verify: - pci_disable_device(pdev); - return err; -} - -static void remove(struct pci_dev *pdev) -{ - struct uio_pci_generic_dev *gdev = pci_get_drvdata(pdev); - - uio_unregister_device(&gdev->info); - pci_disable_device(pdev); - kfree(gdev); } static struct pci_driver uio_pci_driver = { .name = "uio_pci_generic", .id_table = NULL, /* only dynamic id's */ .probe = probe, - .remove = remove, }; module_pci_driver(uio_pci_driver); -- cgit v1.2.3 From 6edf7700a9dde8d3e494d1b390b5284329642a70 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 27 Oct 2020 22:30:30 +0530 Subject: phy: qcom-qmp: Add SM8250 PCIe QMP PHYs SM8250 has multiple different PHY versions: QMP GEN3x1 PHY - 1 lane QMP GEN3x2 PHY - 2 lanes QMP Modem PHY - 2 lanes Add support for these with relevant init sequence. In order to abstract the init sequence, this commit introduces secondary tables which can be used to factor out the unique sequence for each PHY while the former tables can have the common sequence. Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20201027170033.8475-3-manivannan.sadhasivam@linaro.org Signed-off-by: Vinod Koul --- drivers/phy/qualcomm/phy-qcom-qmp.c | 281 +++++++++++++++++++++++++++++++++++- drivers/phy/qualcomm/phy-qcom-qmp.h | 18 +++ 2 files changed, 295 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 5d33ad4d06f2..5a941730377f 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -217,6 +217,13 @@ static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_PCS_READY_STATUS] = 0x160, }; +static const unsigned int sm8250_pcie_regs_layout[QPHY_LAYOUT_SIZE] = { + [QPHY_SW_RESET] = 0x00, + [QPHY_START_CTRL] = 0x44, + [QPHY_PCS_STATUS] = 0x14, + [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40, +}; + static const unsigned int sm8150_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = { [QPHY_START_CTRL] = QPHY_V4_PCS_UFS_PHY_START, [QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_UFS_READY_STATUS, @@ -1824,6 +1831,149 @@ static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_pcs_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21), }; +static const struct qmp_phy_init_tbl sm8250_qmp_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_SELECT, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_IVCO, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x42), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0xb4), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0x55), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x68), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xaa), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0x4c), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V4_COM_CLK_ENABLE1, 0x90), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x07), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x35), + QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x70), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_ENABLES, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_LOW, 0xd4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH, 0x54), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xdb), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x24), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xff), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xe4), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xec), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x3b), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0x36), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RCLK_AUXDATA_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x30), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0x77), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_RATE_SLEW_CNTRL1, 0x0b), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x12), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L, 0x01), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P6_P7_PRE, 0x33), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_PRE, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_PRESET_P10_POST, 0x58), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x1_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_EQ_CONFIG2, 0x0f), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x05), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG2, 0x0f), +}; + +static const struct qmp_phy_init_tbl sm8250_qmp_gen3x2_pcie_pcs_misc_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4, 0x07), +}; + /* struct qmp_phy_cfg - per-PHY initialization config */ struct qmp_phy_cfg { /* phy-type - PCIE/UFS/USB */ @@ -1834,14 +1984,24 @@ struct qmp_phy_cfg { /* Init sequence for PHY blocks - serdes, tx, rx, pcs */ const struct qmp_phy_init_tbl *serdes_tbl; int serdes_tbl_num; + const struct qmp_phy_init_tbl *serdes_tbl_sec; + int serdes_tbl_num_sec; const struct qmp_phy_init_tbl *tx_tbl; int tx_tbl_num; + const struct qmp_phy_init_tbl *tx_tbl_sec; + int tx_tbl_num_sec; const struct qmp_phy_init_tbl *rx_tbl; int rx_tbl_num; + const struct qmp_phy_init_tbl *rx_tbl_sec; + int rx_tbl_num_sec; const struct qmp_phy_init_tbl *pcs_tbl; int pcs_tbl_num; + const struct qmp_phy_init_tbl *pcs_tbl_sec; + int pcs_tbl_num_sec; const struct qmp_phy_init_tbl *pcs_misc_tbl; int pcs_misc_tbl_num; + const struct qmp_phy_init_tbl *pcs_misc_tbl_sec; + int pcs_misc_tbl_num_sec; /* Init sequence for DP PHY block link rates */ const struct qmp_phy_init_tbl *serdes_tbl_rbr; @@ -2245,6 +2405,83 @@ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = { .pwrdn_delay_max = 1005, /* us */ }; +static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 1, + + .serdes_tbl = sm8250_qmp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl), + .serdes_tbl_sec = sm8250_qmp_gen3x1_pcie_serdes_tbl, + .serdes_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_serdes_tbl), + .tx_tbl = sm8250_qmp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl), + .rx_tbl = sm8250_qmp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl), + .rx_tbl_sec = sm8250_qmp_gen3x1_pcie_rx_tbl, + .rx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_rx_tbl), + .pcs_tbl = sm8250_qmp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl), + .pcs_tbl_sec = sm8250_qmp_gen3x1_pcie_pcs_tbl, + .pcs_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_tbl), + .pcs_misc_tbl = sm8250_qmp_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl), + .pcs_misc_tbl_sec = sm8250_qmp_gen3x1_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + +static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = { + .type = PHY_TYPE_PCIE, + .nlanes = 2, + + .serdes_tbl = sm8250_qmp_pcie_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl), + .tx_tbl = sm8250_qmp_pcie_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_tx_tbl), + .tx_tbl_sec = sm8250_qmp_gen3x2_pcie_tx_tbl, + .tx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_tx_tbl), + .rx_tbl = sm8250_qmp_pcie_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_rx_tbl), + .rx_tbl_sec = sm8250_qmp_gen3x2_pcie_rx_tbl, + .rx_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_rx_tbl), + .pcs_tbl = sm8250_qmp_pcie_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_tbl), + .pcs_tbl_sec = sm8250_qmp_gen3x2_pcie_pcs_tbl, + .pcs_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_tbl), + .pcs_misc_tbl = sm8250_qmp_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num = ARRAY_SIZE(sm8250_qmp_pcie_pcs_misc_tbl), + .pcs_misc_tbl_sec = sm8250_qmp_gen3x2_pcie_pcs_misc_tbl, + .pcs_misc_tbl_num_sec = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl), + .clk_list = sdm845_pciephy_clk_l, + .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l), + .reset_list = sdm845_pciephy_reset_l, + .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = sm8250_pcie_regs_layout, + + .start_ctrl = PCS_START | SERDES_START, + .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, + + .is_dual_lane_phy = true, + .has_pwrdn_delay = true, + .pwrdn_delay_min = 995, /* us */ + .pwrdn_delay_max = 1005, /* us */ +}; + static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = { .type = PHY_TYPE_USB3, .nlanes = 1, @@ -2629,6 +2866,9 @@ static int qcom_qmp_phy_serdes_init(struct qmp_phy *qphy) int ret; qcom_qmp_phy_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num); + if (cfg->serdes_tbl_sec) + qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl_sec, + cfg->serdes_tbl_num_sec); if (cfg->type == PHY_TYPE_DP) { switch (dp_opts->link_rate) { @@ -3117,10 +3357,19 @@ static int qcom_qmp_phy_power_on(struct phy *phy) /* Tx, Rx, and PCS configurations */ qcom_qmp_phy_configure_lane(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 1); + if (cfg->tx_tbl_sec) + qcom_qmp_phy_configure_lane(tx, cfg->regs, cfg->tx_tbl_sec, + cfg->tx_tbl_num_sec, 1); + /* Configuration for other LANE for USB-DP combo PHY */ - if (cfg->is_dual_lane_phy) + if (cfg->is_dual_lane_phy) { qcom_qmp_phy_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 2); + if (cfg->tx_tbl_sec) + qcom_qmp_phy_configure_lane(qphy->tx2, cfg->regs, + cfg->tx_tbl_sec, + cfg->tx_tbl_num_sec, 2); + } /* Configure special DP tx tunings */ if (cfg->type == PHY_TYPE_DP) @@ -3128,16 +3377,28 @@ static int qcom_qmp_phy_power_on(struct phy *phy) qcom_qmp_phy_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1); + if (cfg->rx_tbl_sec) + qcom_qmp_phy_configure_lane(rx, cfg->regs, + cfg->rx_tbl_sec, cfg->rx_tbl_num_sec, 1); - if (cfg->is_dual_lane_phy) + if (cfg->is_dual_lane_phy) { qcom_qmp_phy_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 2); + if (cfg->rx_tbl_sec) + qcom_qmp_phy_configure_lane(qphy->rx2, cfg->regs, + cfg->rx_tbl_sec, + cfg->rx_tbl_num_sec, 2); + } /* Configure link rate, swing, etc. */ - if (cfg->type == PHY_TYPE_DP) + if (cfg->type == PHY_TYPE_DP) { qcom_qmp_phy_configure_dp_phy(qphy); - else + } else { qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); + if (cfg->pcs_tbl_sec) + qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl_sec, + cfg->pcs_tbl_num_sec); + } ret = reset_control_deassert(qmp->ufs_reset); if (ret) @@ -3145,6 +3406,9 @@ static int qcom_qmp_phy_power_on(struct phy *phy) qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl, cfg->pcs_misc_tbl_num); + if (cfg->pcs_misc_tbl_sec) + qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl_sec, + cfg->pcs_misc_tbl_num_sec); /* * Pull out PHY from POWER DOWN state. @@ -3900,6 +4164,15 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { }, { .compatible = "qcom,sm8250-qmp-usb3-uni-phy", .data = &sm8250_usb3_uniphy_cfg, + }, { + .compatible = "qcom,sm8250-qmp-gen3x1-pcie-phy", + .data = &sm8250_qmp_gen3x1_pciephy_cfg, + }, { + .compatible = "qcom,sm8250-qmp-gen3x2-pcie-phy", + .data = &sm8250_qmp_gen3x2_pciephy_cfg, + }, { + .compatible = "qcom,sm8250-qmp-modem-pcie-phy", + .data = &sm8250_qmp_gen3x2_pciephy_cfg, }, { }, }; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index b7c530088a6c..db92a461dd2e 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -403,6 +403,7 @@ #define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0 0x028 #define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1 0x030 #define QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1 0x034 +#define QSERDES_V4_COM_CLK_ENABLE1 0x048 #define QSERDES_V4_COM_SYSCLK_BUF_ENABLE 0x050 #define QSERDES_V4_COM_PLL_IVCO 0x058 #define QSERDES_V4_COM_CMN_IPTRIM 0x060 @@ -432,6 +433,7 @@ #define QSERDES_V4_COM_VCO_TUNE1_MODE1 0x118 #define QSERDES_V4_COM_VCO_TUNE2_MODE1 0x11c #define QSERDES_V4_COM_VCO_TUNE_INITVAL2 0x124 +#define QSERDES_V4_COM_CLK_SELECT 0x154 #define QSERDES_V4_COM_HSCLK_SEL 0x158 #define QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL 0x15c #define QSERDES_V4_COM_CORECLK_DIV_MODE1 0x16c @@ -471,12 +473,14 @@ #define QSERDES_V4_RX_UCDR_SB2_GAIN1 0x054 #define QSERDES_V4_RX_UCDR_SB2_GAIN2 0x058 #define QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE 0x060 +#define QSERDES_V4_RX_RCLK_AUXDATA_SEL 0x064 #define QSERDES_V4_RX_AC_JTAG_ENABLE 0x068 #define QSERDES_V4_RX_AC_JTAG_MODE 0x078 #define QSERDES_V4_RX_RX_TERM_BW 0x080 #define QSERDES_V4_RX_VGA_CAL_CNTRL1 0x0d4 #define QSERDES_V4_RX_VGA_CAL_CNTRL2 0x0d8 #define QSERDES_V4_RX_GM_CAL 0x0dc +#define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1 0x0e8 #define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2 0x0ec #define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3 0x0f0 #define QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4 0x0f4 @@ -485,6 +489,7 @@ #define QSERDES_V4_RX_RX_IDAC_MEASURE_TIME 0x100 #define QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110 #define QSERDES_V4_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x114 +#define QSERDES_V4_RX_SIGDET_ENABLES 0x118 #define QSERDES_V4_RX_SIGDET_CNTRL 0x11c #define QSERDES_V4_RX_SIGDET_LVL 0x120 #define QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL 0x124 @@ -806,4 +811,17 @@ #define QPHY_V4_PCS_MISC_TYPEC_STATUS 0x10 #define QPHY_V4_PCS_MISC_PLACEHOLDER_STATUS 0x14 +/* Only for QMP V4 PHY - PCS_PCIE registers (same as PCS_MISC?) */ +#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG2 0x0c +#define QPHY_V4_PCS_PCIE_POWER_STATE_CONFIG4 0x14 +#define QPHY_V4_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x1c +#define QPHY_V4_PCS_PCIE_L1P1_WAKEUP_DLY_TIME_AUXCLK_L 0x40 +#define QPHY_V4_PCS_PCIE_L1P2_WAKEUP_DLY_TIME_AUXCLK_L 0x48 +#define QPHY_V4_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x50 +#define QPHY_V4_PCS_PCIE_OSC_DTCT_ACTIONS 0x90 +#define QPHY_V4_PCS_PCIE_EQ_CONFIG2 0xa4 +#define QPHY_V4_PCS_PCIE_PRESET_P6_P7_PRE 0xb4 +#define QPHY_V4_PCS_PCIE_PRESET_P10_PRE 0xbc +#define QPHY_V4_PCS_PCIE_PRESET_P10_POST 0xe0 + #endif -- cgit v1.2.3 From 08d4deda6970a91313955934487a37077b1792cb Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:34 +0800 Subject: phy: allwinner: convert to devm_platform_ioremap_resource(_byname) Use devm_platform_ioremap_resource(_byname) to simplify code Signed-off-by: Chunfeng Yun Link: https://lore.kernel.org/r/1604642930-29019-1-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/allwinner/phy-sun4i-usb.c | 8 ++------ drivers/phy/allwinner/phy-sun50i-usb3.c | 4 +--- drivers/phy/allwinner/phy-sun6i-mipi-dphy.c | 4 +--- drivers/phy/allwinner/phy-sun9i-usb.c | 4 +--- 4 files changed, 5 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index 651d5e2a25ce..406d5943f8a8 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -686,7 +686,6 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct phy_provider *phy_provider; - struct resource *res; int i, ret; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); @@ -700,8 +699,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) if (!data->cfg) return -EINVAL; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_ctrl"); - data->base = devm_ioremap_resource(dev, res); + data->base = devm_platform_ioremap_resource_byname(pdev, "phy_ctrl"); if (IS_ERR(data->base)) return PTR_ERR(data->base); @@ -796,9 +794,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) if (i || data->cfg->phy0_dual_route) { /* No pmu for musb */ snprintf(name, sizeof(name), "pmu%d", i); - res = platform_get_resource_byname(pdev, - IORESOURCE_MEM, name); - phy->pmu = devm_ioremap_resource(dev, res); + phy->pmu = devm_platform_ioremap_resource_byname(pdev, name); if (IS_ERR(phy->pmu)) return PTR_ERR(phy->pmu); } diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c index b1c04f71a31d..84055b720016 100644 --- a/drivers/phy/allwinner/phy-sun50i-usb3.c +++ b/drivers/phy/allwinner/phy-sun50i-usb3.c @@ -134,7 +134,6 @@ static int sun50i_usb3_phy_probe(struct platform_device *pdev) struct sun50i_usb3_phy *phy; struct device *dev = &pdev->dev; struct phy_provider *phy_provider; - struct resource *res; phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); if (!phy) @@ -153,8 +152,7 @@ static int sun50i_usb3_phy_probe(struct platform_device *pdev) return PTR_ERR(phy->reset); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - phy->regs = devm_ioremap_resource(dev, res); + phy->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(phy->regs)) return PTR_ERR(phy->regs); diff --git a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c index 1fa761ba6cbb..f0bc87d654d4 100644 --- a/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c +++ b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c @@ -253,15 +253,13 @@ static int sun6i_dphy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; struct sun6i_dphy *dphy; - struct resource *res; void __iomem *regs; dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL); if (!dphy) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(&pdev->dev, res); + regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) { dev_err(&pdev->dev, "Couldn't map the DPHY encoder registers\n"); return PTR_ERR(regs); diff --git a/drivers/phy/allwinner/phy-sun9i-usb.c b/drivers/phy/allwinner/phy-sun9i-usb.c index fc6784dd7fa0..2f9e60c188b8 100644 --- a/drivers/phy/allwinner/phy-sun9i-usb.c +++ b/drivers/phy/allwinner/phy-sun9i-usb.c @@ -117,7 +117,6 @@ static int sun9i_usb_phy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct phy_provider *phy_provider; - struct resource *res; phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); if (!phy) @@ -156,8 +155,7 @@ static int sun9i_usb_phy_probe(struct platform_device *pdev) } } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - phy->pmu = devm_ioremap_resource(dev, res); + phy->pmu = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(phy->pmu)) return PTR_ERR(phy->pmu); -- cgit v1.2.3 From 202de02556bb49ba35f5c7675934b9b14bbb2a56 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:35 +0800 Subject: phy: amlogic: convert to devm_platform_ioremap_resource Use devm_platform_ioremap_resource to simplify code Signed-off-by: Chunfeng Yun Reviewed-by: Remi Pommarel Reviewed-by: Martin Blumenstingl Link: https://lore.kernel.org/r/1604642930-29019-2-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/amlogic/phy-meson-axg-pcie.c | 4 +--- drivers/phy/amlogic/phy-meson-g12a-usb2.c | 4 +--- drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c | 4 +--- drivers/phy/amlogic/phy-meson-gxl-usb2.c | 4 +--- 4 files changed, 4 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/amlogic/phy-meson-axg-pcie.c b/drivers/phy/amlogic/phy-meson-axg-pcie.c index 377ed0dcd0d9..58a7507a8422 100644 --- a/drivers/phy/amlogic/phy-meson-axg-pcie.c +++ b/drivers/phy/amlogic/phy-meson-axg-pcie.c @@ -129,7 +129,6 @@ static int phy_axg_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct phy_axg_pcie_priv *priv; struct device_node *np = dev->of_node; - struct resource *res; void __iomem *base; int ret; @@ -145,8 +144,7 @@ static int phy_axg_pcie_probe(struct platform_device *pdev) return ret; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb2.c b/drivers/phy/amlogic/phy-meson-g12a-usb2.c index b26e30e1afaf..9d1efa0d9394 100644 --- a/drivers/phy/amlogic/phy-meson-g12a-usb2.c +++ b/drivers/phy/amlogic/phy-meson-g12a-usb2.c @@ -292,7 +292,6 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct phy_provider *phy_provider; - struct resource *res; struct phy_meson_g12a_usb2_priv *priv; struct phy *phy; void __iomem *base; @@ -305,8 +304,7 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev) priv->dev = dev; platform_set_drvdata(pdev, priv); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c index 08e322789e59..ebe3d0ddd304 100644 --- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c +++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c @@ -386,7 +386,6 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct phy_g12a_usb3_pcie_priv *priv; - struct resource *res; struct phy_provider *phy_provider; void __iomem *base; int ret; @@ -395,8 +394,7 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/phy/amlogic/phy-meson-gxl-usb2.c b/drivers/phy/amlogic/phy-meson-gxl-usb2.c index 43ec9bf24abf..875afb2672c7 100644 --- a/drivers/phy/amlogic/phy-meson-gxl-usb2.c +++ b/drivers/phy/amlogic/phy-meson-gxl-usb2.c @@ -230,7 +230,6 @@ static int phy_meson_gxl_usb2_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct phy_provider *phy_provider; - struct resource *res; struct phy_meson_gxl_usb2_priv *priv; struct phy *phy; void __iomem *base; @@ -242,8 +241,7 @@ static int phy_meson_gxl_usb2_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); -- cgit v1.2.3 From f669bc8b9f7beb6b1936882a2c3670dc1bd985ba Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:36 +0800 Subject: phy: broadcom: convert to devm_platform_ioremap_resource(_byname) Use devm_platform_ioremap_resource(_byname) to simplify code Signed-off-by: Chunfeng Yun Reviewed-by: Florian Fainelli Cc: Al Cooper Link: https://lore.kernel.org/r/1604642930-29019-3-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/broadcom/phy-bcm-cygnus-pcie.c | 4 +--- drivers/phy/broadcom/phy-bcm-kona-usb2.c | 4 +--- drivers/phy/broadcom/phy-bcm-ns-usb2.c | 4 +--- drivers/phy/broadcom/phy-bcm-ns-usb3.c | 7 ++----- drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c | 13 ++++--------- drivers/phy/broadcom/phy-bcm-sr-pcie.c | 5 +---- drivers/phy/broadcom/phy-bcm-sr-usb.c | 4 +--- drivers/phy/broadcom/phy-brcm-sata.c | 8 ++------ 8 files changed, 13 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c index b074682d9dd8..548e46776100 100644 --- a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c +++ b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c @@ -126,7 +126,6 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev) struct device_node *node = dev->of_node, *child; struct cygnus_pcie_phy_core *core; struct phy_provider *provider; - struct resource *res; unsigned cnt = 0; int ret; @@ -141,8 +140,7 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev) core->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - core->base = devm_ioremap_resource(dev, res); + core->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(core->base)) return PTR_ERR(core->base); diff --git a/drivers/phy/broadcom/phy-bcm-kona-usb2.c b/drivers/phy/broadcom/phy-bcm-kona-usb2.c index 6459296d9bf9..e9cc5f2cb89a 100644 --- a/drivers/phy/broadcom/phy-bcm-kona-usb2.c +++ b/drivers/phy/broadcom/phy-bcm-kona-usb2.c @@ -94,7 +94,6 @@ static int bcm_kona_usb2_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct bcm_kona_usb *phy; - struct resource *res; struct phy *gphy; struct phy_provider *phy_provider; @@ -102,8 +101,7 @@ static int bcm_kona_usb2_probe(struct platform_device *pdev) if (!phy) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - phy->regs = devm_ioremap_resource(&pdev->dev, res); + phy->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(phy->regs)) return PTR_ERR(phy->regs); diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb2.c b/drivers/phy/broadcom/phy-bcm-ns-usb2.c index 9f2f84d65dcd..4b015b8a71c3 100644 --- a/drivers/phy/broadcom/phy-bcm-ns-usb2.c +++ b/drivers/phy/broadcom/phy-bcm-ns-usb2.c @@ -83,7 +83,6 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct bcm_ns_usb2 *usb2; - struct resource *res; struct phy_provider *phy_provider; usb2 = devm_kzalloc(&pdev->dev, sizeof(*usb2), GFP_KERNEL); @@ -91,8 +90,7 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev) return -ENOMEM; usb2->dev = dev; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmu"); - usb2->dmu = devm_ioremap_resource(dev, res); + usb2->dmu = devm_platform_ioremap_resource_byname(pdev, "dmu"); if (IS_ERR(usb2->dmu)) { dev_err(dev, "Failed to map DMU regs\n"); return PTR_ERR(usb2->dmu); diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c index 47b029fbebbd..42baf4d6dd5d 100644 --- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c +++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c @@ -305,7 +305,6 @@ static int bcm_ns_usb3_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; const struct of_device_id *of_id; struct bcm_ns_usb3 *usb3; - struct resource *res; struct phy_provider *phy_provider; usb3 = devm_kzalloc(dev, sizeof(*usb3), GFP_KERNEL); @@ -319,15 +318,13 @@ static int bcm_ns_usb3_probe(struct platform_device *pdev) return -EINVAL; usb3->family = (enum bcm_ns_family)of_id->data; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmp"); - usb3->dmp = devm_ioremap_resource(dev, res); + usb3->dmp = devm_platform_ioremap_resource_byname(pdev, "dmp"); if (IS_ERR(usb3->dmp)) { dev_err(dev, "Failed to map DMP regs\n"); return PTR_ERR(usb3->dmp); } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ccb-mii"); - usb3->ccb_mii = devm_ioremap_resource(dev, res); + usb3->ccb_mii = devm_platform_ioremap_resource_byname(pdev, "ccb-mii"); if (IS_ERR(usb3->ccb_mii)) { dev_err(dev, "Failed to map ChipCommon B MII regs\n"); return PTR_ERR(usb3->ccb_mii); diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c index 9630ac127366..65a399acc845 100644 --- a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c +++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c @@ -293,7 +293,6 @@ static int ns2_drd_phy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct ns2_phy_driver *driver; struct ns2_phy_data *data; - struct resource *res; int ret; u32 val; @@ -307,23 +306,19 @@ static int ns2_drd_phy_probe(struct platform_device *pdev) if (!driver->data) return -ENOMEM; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "icfg"); - driver->icfgdrd_regs = devm_ioremap_resource(dev, res); + driver->icfgdrd_regs = devm_platform_ioremap_resource_byname(pdev, "icfg"); if (IS_ERR(driver->icfgdrd_regs)) return PTR_ERR(driver->icfgdrd_regs); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rst-ctrl"); - driver->idmdrd_rst_ctrl = devm_ioremap_resource(dev, res); + driver->idmdrd_rst_ctrl = devm_platform_ioremap_resource_byname(pdev, "rst-ctrl"); if (IS_ERR(driver->idmdrd_rst_ctrl)) return PTR_ERR(driver->idmdrd_rst_ctrl); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crmu-ctrl"); - driver->crmu_usb2_ctrl = devm_ioremap_resource(dev, res); + driver->crmu_usb2_ctrl = devm_platform_ioremap_resource_byname(pdev, "crmu-ctrl"); if (IS_ERR(driver->crmu_usb2_ctrl)) return PTR_ERR(driver->crmu_usb2_ctrl); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "usb2-strap"); - driver->usb2h_strap_reg = devm_ioremap_resource(dev, res); + driver->usb2h_strap_reg = devm_platform_ioremap_resource_byname(pdev, "usb2-strap"); if (IS_ERR(driver->usb2h_strap_reg)) return PTR_ERR(driver->usb2h_strap_reg); diff --git a/drivers/phy/broadcom/phy-bcm-sr-pcie.c b/drivers/phy/broadcom/phy-bcm-sr-pcie.c index 96a3af126a78..8a4aadf166cf 100644 --- a/drivers/phy/broadcom/phy-bcm-sr-pcie.c +++ b/drivers/phy/broadcom/phy-bcm-sr-pcie.c @@ -217,7 +217,6 @@ static int sr_pcie_phy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct sr_pcie_phy_core *core; - struct resource *res; struct phy_provider *provider; unsigned int phy_idx = 0; @@ -226,9 +225,7 @@ static int sr_pcie_phy_probe(struct platform_device *pdev) return -ENOMEM; core->dev = dev; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - core->base = devm_ioremap_resource(core->dev, res); + core->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(core->base)) return PTR_ERR(core->base); diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c index c3e99ad17487..0002da3b5b5d 100644 --- a/drivers/phy/broadcom/phy-bcm-sr-usb.c +++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c @@ -300,14 +300,12 @@ static int bcm_usb_phy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *dn = dev->of_node; const struct of_device_id *of_id; - struct resource *res; void __iomem *regs; int ret; enum bcm_usb_phy_version version; struct phy_provider *phy_provider; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(dev, res); + regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) return PTR_ERR(regs); diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c index 18251f232172..53942973f508 100644 --- a/drivers/phy/broadcom/phy-brcm-sata.c +++ b/drivers/phy/broadcom/phy-brcm-sata.c @@ -726,7 +726,6 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) struct device_node *dn = dev->of_node, *child; const struct of_device_id *of_id; struct brcm_sata_phy *priv; - struct resource *res; struct phy_provider *provider; int ret, count = 0; @@ -739,8 +738,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) dev_set_drvdata(dev, priv); priv->dev = dev; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - priv->phy_base = devm_ioremap_resource(dev, res); + priv->phy_base = devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(priv->phy_base)) return PTR_ERR(priv->phy_base); @@ -751,9 +749,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) priv->version = BRCM_SATA_PHY_STB_28NM; if (priv->version == BRCM_SATA_PHY_IPROC_NS2) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "phy-ctrl"); - priv->ctrl_base = devm_ioremap_resource(dev, res); + priv->ctrl_base = devm_platform_ioremap_resource_byname(pdev, "phy-ctrl"); if (IS_ERR(priv->ctrl_base)) return PTR_ERR(priv->ctrl_base); } -- cgit v1.2.3 From fa62909400fa5abc53cc76cb4917bb92b8bf3a2d Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:37 +0800 Subject: phy: cadence: convert to devm_platform_ioremap_resource Use devm_platform_ioremap_resource to simplify code Signed-off-by: Chunfeng Yun Acked-by: Peter Chen Link: https://lore.kernel.org/r/1604642930-29019-4-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/cadence/cdns-dphy.c | 4 +--- drivers/phy/cadence/phy-cadence-salvo.c | 4 +--- drivers/phy/cadence/phy-cadence-sierra.c | 4 +--- 3 files changed, 3 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c index 90c4e9b5aac8..ba042e39cfaf 100644 --- a/drivers/phy/cadence/cdns-dphy.c +++ b/drivers/phy/cadence/cdns-dphy.c @@ -314,7 +314,6 @@ static int cdns_dphy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; struct cdns_dphy *dphy; - struct resource *res; int ret; dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL); @@ -326,8 +325,7 @@ static int cdns_dphy_probe(struct platform_device *pdev) if (!dphy->ops) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dphy->regs = devm_ioremap_resource(&pdev->dev, res); + dphy->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dphy->regs)) return PTR_ERR(dphy->regs); diff --git a/drivers/phy/cadence/phy-cadence-salvo.c b/drivers/phy/cadence/phy-cadence-salvo.c index 88e239adc3b8..51c0b98f5fd7 100644 --- a/drivers/phy/cadence/phy-cadence-salvo.c +++ b/drivers/phy/cadence/phy-cadence-salvo.c @@ -263,7 +263,6 @@ static int cdns_salvo_phy_probe(struct platform_device *pdev) struct phy_provider *phy_provider; struct device *dev = &pdev->dev; struct cdns_salvo_phy *salvo_phy; - struct resource *res; const struct of_device_id *match; struct cdns_salvo_data *data; @@ -281,8 +280,7 @@ static int cdns_salvo_phy_probe(struct platform_device *pdev) if (IS_ERR(salvo_phy->clk)) return PTR_ERR(salvo_phy->clk); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - salvo_phy->base = devm_ioremap_resource(dev, res); + salvo_phy->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(salvo_phy->base)) return PTR_ERR(salvo_phy->base); diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c index 453ef26fa1c7..26a0badabe38 100644 --- a/drivers/phy/cadence/phy-cadence-sierra.c +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -479,7 +479,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) const struct of_device_id *match; struct cdns_sierra_data *data; unsigned int id_value; - struct resource *res; int i, ret, node = 0; void __iomem *base; struct clk *clk; @@ -502,8 +501,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) sp->dev = dev; sp->init_data = data; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) { dev_err(dev, "missing \"reg\"\n"); return PTR_ERR(base); -- cgit v1.2.3 From 0b7c4c88b83f415d4c75ba4774e0bb7ed266c7c0 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:38 +0800 Subject: phy: freescale: convert to devm_platform_ioremap_resource Use devm_platform_ioremap_resource to simplify code Signed-off-by: Chunfeng Yun Reviewed-by: Peter Chen Link: https://lore.kernel.org/r/1604642930-29019-5-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c | 4 +--- drivers/phy/freescale/phy-fsl-imx8mq-usb.c | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c index 9f2c1da14f5a..a95572b397ca 100644 --- a/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c +++ b/drivers/phy/freescale/phy-fsl-imx8-mipi-dphy.c @@ -434,7 +434,6 @@ static int mixel_dphy_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; struct phy_provider *phy_provider; struct mixel_dphy_priv *priv; - struct resource *res; struct phy *phy; void __iomem *base; @@ -449,8 +448,7 @@ static int mixel_dphy_probe(struct platform_device *pdev) if (!priv->devdata) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c index 47a0c09bc9fa..a29b4a6f7c24 100644 --- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c +++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c @@ -152,7 +152,6 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev) struct phy_provider *phy_provider; struct device *dev = &pdev->dev; struct imx8mq_usb_phy *imx_phy; - struct resource *res; const struct phy_ops *phy_ops; imx_phy = devm_kzalloc(dev, sizeof(*imx_phy), GFP_KERNEL); @@ -165,8 +164,7 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev) return PTR_ERR(imx_phy->clk); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - imx_phy->base = devm_ioremap_resource(dev, res); + imx_phy->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(imx_phy->base)) return PTR_ERR(imx_phy->base); -- cgit v1.2.3 From 6c9111bc9eef0ff0302a0963ff802d31e21bfe05 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:39 +0800 Subject: phy: lantiq: convert to devm_platform_ioremap_resource Use devm_platform_ioremap_resource to simplify code Signed-off-by: Chunfeng Yun Link: https://lore.kernel.org/r/1604642930-29019-6-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c index 22c5698123cf..ef93bf2cba10 100644 --- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c +++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c @@ -402,7 +402,6 @@ static int ltq_vrx200_pcie_phy_probe(struct platform_device *pdev) struct ltq_vrx200_pcie_phy_priv *priv; struct device *dev = &pdev->dev; struct phy_provider *provider; - struct resource *res; void __iomem *base; int ret; @@ -410,8 +409,7 @@ static int ltq_vrx200_pcie_phy_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); -- cgit v1.2.3 From ee55b501bd8ceb12c30ff0c0240498a301e126db Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:40 +0800 Subject: phy: marvell: convert to devm_platform_ioremap_resource Use devm_platform_ioremap_resource to simplify code Signed-off-by: Chunfeng Yun Reviewed-by: Jisheng Zhang Link: https://lore.kernel.org/r/1604642930-29019-7-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/marvell/phy-armada375-usb2.c | 4 +--- drivers/phy/marvell/phy-berlin-usb.c | 4 +--- drivers/phy/marvell/phy-mmp3-usb.c | 4 +--- drivers/phy/marvell/phy-mvebu-sata.c | 4 +--- drivers/phy/marvell/phy-pxa-28nm-hsic.c | 4 +--- drivers/phy/marvell/phy-pxa-28nm-usb2.c | 4 +--- drivers/phy/marvell/phy-pxa-usb.c | 4 +--- 7 files changed, 7 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/marvell/phy-armada375-usb2.c b/drivers/phy/marvell/phy-armada375-usb2.c index fa5dc9462d09..b141e3cd8a94 100644 --- a/drivers/phy/marvell/phy-armada375-usb2.c +++ b/drivers/phy/marvell/phy-armada375-usb2.c @@ -105,15 +105,13 @@ static int armada375_usb_phy_probe(struct platform_device *pdev) struct phy *phy; struct phy_provider *phy_provider; void __iomem *usb_cluster_base; - struct resource *res; struct armada375_cluster_phy *cluster_phy; cluster_phy = devm_kzalloc(dev, sizeof(*cluster_phy), GFP_KERNEL); if (!cluster_phy) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - usb_cluster_base = devm_ioremap_resource(&pdev->dev, res); + usb_cluster_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(usb_cluster_base)) return PTR_ERR(usb_cluster_base); diff --git a/drivers/phy/marvell/phy-berlin-usb.c b/drivers/phy/marvell/phy-berlin-usb.c index a43df63007c5..78ef6ae72a9a 100644 --- a/drivers/phy/marvell/phy-berlin-usb.c +++ b/drivers/phy/marvell/phy-berlin-usb.c @@ -165,7 +165,6 @@ static int phy_berlin_usb_probe(struct platform_device *pdev) const struct of_device_id *match = of_match_device(phy_berlin_usb_of_match, &pdev->dev); struct phy_berlin_usb_priv *priv; - struct resource *res; struct phy *phy; struct phy_provider *phy_provider; @@ -173,8 +172,7 @@ static int phy_berlin_usb_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/phy/marvell/phy-mmp3-usb.c b/drivers/phy/marvell/phy-mmp3-usb.c index 499869595a58..04c0bada3519 100644 --- a/drivers/phy/marvell/phy-mmp3-usb.c +++ b/drivers/phy/marvell/phy-mmp3-usb.c @@ -246,7 +246,6 @@ MODULE_DEVICE_TABLE(of, mmp3_usb_phy_of_match); static int mmp3_usb_phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *resource; struct mmp3_usb_phy *mmp3_usb_phy; struct phy_provider *provider; @@ -254,8 +253,7 @@ static int mmp3_usb_phy_probe(struct platform_device *pdev) if (!mmp3_usb_phy) return -ENOMEM; - resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mmp3_usb_phy->base = devm_ioremap_resource(dev, resource); + mmp3_usb_phy->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mmp3_usb_phy->base)) { dev_err(dev, "failed to remap PHY regs\n"); return PTR_ERR(mmp3_usb_phy->base); diff --git a/drivers/phy/marvell/phy-mvebu-sata.c b/drivers/phy/marvell/phy-mvebu-sata.c index 3c01b5dceaae..51a4646e2933 100644 --- a/drivers/phy/marvell/phy-mvebu-sata.c +++ b/drivers/phy/marvell/phy-mvebu-sata.c @@ -80,7 +80,6 @@ static const struct phy_ops phy_mvebu_sata_ops = { static int phy_mvebu_sata_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; - struct resource *res; struct priv *priv; struct phy *phy; @@ -88,8 +87,7 @@ static int phy_mvebu_sata_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, res); + priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/phy/marvell/phy-pxa-28nm-hsic.c b/drivers/phy/marvell/phy-pxa-28nm-hsic.c index 31b43d2ee39a..c5c100563f55 100644 --- a/drivers/phy/marvell/phy-pxa-28nm-hsic.c +++ b/drivers/phy/marvell/phy-pxa-28nm-hsic.c @@ -162,7 +162,6 @@ static int mv_hsic_phy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; struct mv_hsic_phy *mv_phy; - struct resource *r; mv_phy = devm_kzalloc(&pdev->dev, sizeof(*mv_phy), GFP_KERNEL); if (!mv_phy) @@ -176,8 +175,7 @@ static int mv_hsic_phy_probe(struct platform_device *pdev) return PTR_ERR(mv_phy->clk); } - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mv_phy->base = devm_ioremap_resource(&pdev->dev, r); + mv_phy->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mv_phy->base)) return PTR_ERR(mv_phy->base); diff --git a/drivers/phy/marvell/phy-pxa-28nm-usb2.c b/drivers/phy/marvell/phy-pxa-28nm-usb2.c index a175ae915f02..0b390b9d2ae1 100644 --- a/drivers/phy/marvell/phy-pxa-28nm-usb2.c +++ b/drivers/phy/marvell/phy-pxa-28nm-usb2.c @@ -294,7 +294,6 @@ static int mv_usb2_phy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; struct mv_usb2_phy *mv_phy; - struct resource *r; mv_phy = devm_kzalloc(&pdev->dev, sizeof(*mv_phy), GFP_KERNEL); if (!mv_phy) @@ -308,8 +307,7 @@ static int mv_usb2_phy_probe(struct platform_device *pdev) return PTR_ERR(mv_phy->clk); } - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mv_phy->base = devm_ioremap_resource(&pdev->dev, r); + mv_phy->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mv_phy->base)) return PTR_ERR(mv_phy->base); diff --git a/drivers/phy/marvell/phy-pxa-usb.c b/drivers/phy/marvell/phy-pxa-usb.c index 87ff7550b912..ffe889893ff4 100644 --- a/drivers/phy/marvell/phy-pxa-usb.c +++ b/drivers/phy/marvell/phy-pxa-usb.c @@ -286,7 +286,6 @@ MODULE_DEVICE_TABLE(of, pxa_usb_phy_of_match); static int pxa_usb_phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *resource; struct pxa_usb_phy *pxa_usb_phy; struct phy_provider *provider; const struct of_device_id *of_id; @@ -301,8 +300,7 @@ static int pxa_usb_phy_probe(struct platform_device *pdev) else pxa_usb_phy->version = PXA_USB_PHY_MMP2; - resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pxa_usb_phy->base = devm_ioremap_resource(dev, resource); + pxa_usb_phy->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pxa_usb_phy->base)) { dev_err(dev, "failed to remap PHY regs\n"); return PTR_ERR(pxa_usb_phy->base); -- cgit v1.2.3 From 5d797059ff57b2213153523f4cf031a811e53dbf Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:41 +0800 Subject: phy: phy-xgene: convert to devm_platform_ioremap_resource Use devm_platform_ioremap_resource to simplify code Signed-off-by: Chunfeng Yun Link: https://lore.kernel.org/r/1604642930-29019-8-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/phy-xgene.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c index b88922e7de1d..f4cd590fbde7 100644 --- a/drivers/phy/phy-xgene.c +++ b/drivers/phy/phy-xgene.c @@ -1644,7 +1644,6 @@ static int xgene_phy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; struct xgene_phy_ctx *ctx; - struct resource *res; u32 default_spd[] = DEFAULT_SATA_SPD_SEL; u32 default_txboost_gain[] = DEFAULT_SATA_TXBOOST_GAIN; u32 default_txeye_direction[] = DEFAULT_SATA_TXEYEDIRECTION; @@ -1661,8 +1660,7 @@ static int xgene_phy_probe(struct platform_device *pdev) ctx->dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->sds_base = devm_ioremap_resource(&pdev->dev, res); + ctx->sds_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->sds_base)) return PTR_ERR(ctx->sds_base); -- cgit v1.2.3 From 7458d650e256b2b418efa4c974585846da4358a6 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:42 +0800 Subject: phy: phy-mtk-ufs: convert to devm_platform_ioremap_resource Use devm_platform_ioremap_resource to simplify code Signed-off-by: Chunfeng Yun Link: https://lore.kernel.org/r/1604642930-29019-9-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/mediatek/phy-mtk-ufs.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/mediatek/phy-mtk-ufs.c b/drivers/phy/mediatek/phy-mtk-ufs.c index cf94f5c35dc5..769b00b038d8 100644 --- a/drivers/phy/mediatek/phy-mtk-ufs.c +++ b/drivers/phy/mediatek/phy-mtk-ufs.c @@ -195,7 +195,6 @@ static int ufs_mtk_phy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct phy *generic_phy; struct phy_provider *phy_provider; - struct resource *res; struct ufs_mtk_phy *phy; int ret; @@ -203,8 +202,7 @@ static int ufs_mtk_phy_probe(struct platform_device *pdev) if (!phy) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - phy->mmio = devm_ioremap_resource(dev, res); + phy->mmio = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(phy->mmio)) return PTR_ERR(phy->mmio); -- cgit v1.2.3 From 8a7772cdd91de4eea6dcd9df9b6fce7f4c012208 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:43 +0800 Subject: phy: qualcomm: convert to devm_platform_ioremap_resource(_byname) Use devm_platform_ioremap_resource(_byname) to simplify code Signed-off-by: Chunfeng Yun Link: https://lore.kernel.org/r/1604642930-29019-10-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/qualcomm/phy-qcom-apq8064-sata.c | 4 +--- drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c | 4 +--- drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c | 4 +--- drivers/phy/qualcomm/phy-qcom-pcie2.c | 5 +---- drivers/phy/qualcomm/phy-qcom-qusb2.c | 4 +--- 5 files changed, 5 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c index ce91ae7f8dbd..d437a249cd73 100644 --- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c +++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c @@ -201,7 +201,6 @@ static int qcom_apq8064_sata_phy_probe(struct platform_device *pdev) { struct qcom_apq8064_sata_phy *phy; struct device *dev = &pdev->dev; - struct resource *res; struct phy_provider *phy_provider; struct phy *generic_phy; int ret; @@ -210,8 +209,7 @@ static int qcom_apq8064_sata_phy_probe(struct platform_device *pdev) if (!phy) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - phy->mmio = devm_ioremap_resource(dev, res); + phy->mmio = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(phy->mmio)) return PTR_ERR(phy->mmio); diff --git a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c index fc7f9df80a7b..d3e7d5e1d1b6 100644 --- a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c @@ -95,7 +95,6 @@ MODULE_DEVICE_TABLE(of, ipq4019_usb_phy_of_match); static int ipq4019_usb_phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *res; struct phy_provider *phy_provider; struct ipq4019_usb_phy *phy; @@ -104,8 +103,7 @@ static int ipq4019_usb_phy_probe(struct platform_device *pdev) return -ENOMEM; phy->dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - phy->base = devm_ioremap_resource(&pdev->dev, res); + phy->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(phy->base)) { dev_err(dev, "failed to remap register memory\n"); return PTR_ERR(phy->base); diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c index 41a69f56b346..0fc2a1ed39b3 100644 --- a/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c +++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-sata.c @@ -128,7 +128,6 @@ static int qcom_ipq806x_sata_phy_probe(struct platform_device *pdev) { struct qcom_ipq806x_sata_phy *phy; struct device *dev = &pdev->dev; - struct resource *res; struct phy_provider *phy_provider; struct phy *generic_phy; int ret; @@ -137,8 +136,7 @@ static int qcom_ipq806x_sata_phy_probe(struct platform_device *pdev) if (!phy) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - phy->mmio = devm_ioremap_resource(dev, res); + phy->mmio = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(phy->mmio)) return PTR_ERR(phy->mmio); diff --git a/drivers/phy/qualcomm/phy-qcom-pcie2.c b/drivers/phy/qualcomm/phy-qcom-pcie2.c index 9dba3594e6d9..5407e59bb185 100644 --- a/drivers/phy/qualcomm/phy-qcom-pcie2.c +++ b/drivers/phy/qualcomm/phy-qcom-pcie2.c @@ -250,7 +250,6 @@ static int qcom_pcie2_phy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; struct qcom_phy *qphy; - struct resource *res; struct device *dev = &pdev->dev; struct phy *phy; int ret; @@ -260,9 +259,7 @@ static int qcom_pcie2_phy_probe(struct platform_device *pdev) return -ENOMEM; qphy->dev = dev; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - qphy->base = devm_ioremap_resource(dev, res); + qphy->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(qphy->base)) return PTR_ERR(qphy->base); diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c index 557547dabfd5..109792203baf 100644 --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c @@ -844,7 +844,6 @@ static int qusb2_phy_probe(struct platform_device *pdev) struct qusb2_phy *qphy; struct phy_provider *phy_provider; struct phy *generic_phy; - struct resource *res; int ret, i; int num; u32 value; @@ -855,8 +854,7 @@ static int qusb2_phy_probe(struct platform_device *pdev) return -ENOMEM; or = &qphy->overrides; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - qphy->base = devm_ioremap_resource(dev, res); + qphy->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(qphy->base)) return PTR_ERR(qphy->base); -- cgit v1.2.3 From fc5662127a257eafcb37133fa272b1ab0813ae07 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:44 +0800 Subject: phy: phy-ralink-usb: convert to devm_platform_ioremap_resource Use devm_platform_ioremap_resource to simplify code Signed-off-by: Chunfeng Yun Link: https://lore.kernel.org/r/1604642930-29019-11-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/ralink/phy-ralink-usb.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/ralink/phy-ralink-usb.c b/drivers/phy/ralink/phy-ralink-usb.c index 95dfa9fd284d..2bd8ad2e76ed 100644 --- a/drivers/phy/ralink/phy-ralink-usb.c +++ b/drivers/phy/ralink/phy-ralink-usb.c @@ -170,7 +170,6 @@ MODULE_DEVICE_TABLE(of, ralink_usb_phy_of_match); static int ralink_usb_phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *res; struct phy_provider *phy_provider; const struct of_device_id *match; struct ralink_usb_phy *phy; @@ -194,8 +193,7 @@ static int ralink_usb_phy_probe(struct platform_device *pdev) /* The MT7628 and MT7688 require extra setup of PHY registers. */ if (of_device_is_compatible(dev->of_node, "mediatek,mt7628-usbphy")) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - phy->base = devm_ioremap_resource(&pdev->dev, res); + phy->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(phy->base)) { dev_err(dev, "failed to remap register memory\n"); return PTR_ERR(phy->base); -- cgit v1.2.3 From 0b5604affbec0241d72996924b2f5a33d96b860a Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:45 +0800 Subject: phy: renesas: convert to devm_platform_ioremap_resource Use devm_platform_ioremap_resource to simplify code Signed-off-by: Chunfeng Yun Link: https://lore.kernel.org/r/1604642930-29019-12-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/renesas/phy-rcar-gen2.c | 4 +--- drivers/phy/renesas/phy-rcar-gen3-pcie.c | 4 +--- drivers/phy/renesas/phy-rcar-gen3-usb2.c | 4 +--- drivers/phy/renesas/phy-rcar-gen3-usb3.c | 4 +--- 4 files changed, 4 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c index 2e279ac0fa4d..c375a4676a3d 100644 --- a/drivers/phy/renesas/phy-rcar-gen2.c +++ b/drivers/phy/renesas/phy-rcar-gen2.c @@ -339,7 +339,6 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev) struct rcar_gen2_phy_driver *drv; struct phy_provider *provider; struct device_node *np; - struct resource *res; void __iomem *base; struct clk *clk; const struct rcar_gen2_phy_data *data; @@ -357,8 +356,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev) return PTR_ERR(clk); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/phy/renesas/phy-rcar-gen3-pcie.c b/drivers/phy/renesas/phy-rcar-gen3-pcie.c index c4e4aa216936..4dc721eb9577 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-pcie.c +++ b/drivers/phy/renesas/phy-rcar-gen3-pcie.c @@ -76,7 +76,6 @@ static int rcar_gen3_phy_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct phy_provider *provider; struct rcar_gen3_phy *phy; - struct resource *res; void __iomem *base; int error; @@ -86,8 +85,7 @@ static int rcar_gen3_phy_pcie_probe(struct platform_device *pdev) return -EINVAL; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index e34e4475027c..1898bbe7c8e5 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -611,7 +611,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct rcar_gen3_chan *channel; struct phy_provider *provider; - struct resource *res; const struct phy_ops *phy_usb2_ops; int ret = 0, i; @@ -624,8 +623,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) if (!channel) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - channel->base = devm_ioremap_resource(dev, res); + channel->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(channel->base)) return PTR_ERR(channel->base); diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb3.c b/drivers/phy/renesas/phy-rcar-gen3-usb3.c index 566b4cf4ff38..f27d6f471629 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb3.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb3.c @@ -133,7 +133,6 @@ static int rcar_gen3_phy_usb3_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct rcar_gen3_usb3 *r; struct phy_provider *provider; - struct resource *res; int ret = 0; struct clk *clk; @@ -146,8 +145,7 @@ static int rcar_gen3_phy_usb3_probe(struct platform_device *pdev) if (!r) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - r->base = devm_ioremap_resource(dev, res); + r->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(r->base)) return PTR_ERR(r->base); -- cgit v1.2.3 From 6824ebc047b9936bfe7b5885d85c1fe88dca014e Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:46 +0800 Subject: phy: rockchip: convert to devm_platform_ioremap_resource Use devm_platform_ioremap_resource to simplify code Signed-off-by: Chunfeng Yun Link: https://lore.kernel.org/r/1604642930-29019-13-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/rockchip/phy-rockchip-inno-hdmi.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c index 9ca20c947283..a37f3f342642 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c @@ -1144,7 +1144,6 @@ static int inno_hdmi_phy_probe(struct platform_device *pdev) { struct inno_hdmi_phy *inno; struct phy_provider *phy_provider; - struct resource *res; void __iomem *regs; int ret; @@ -1158,8 +1157,7 @@ static int inno_hdmi_phy_probe(struct platform_device *pdev) if (!inno->plat_data || !inno->plat_data->ops) return -EINVAL; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - regs = devm_ioremap_resource(inno->dev, res); + regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) return PTR_ERR(regs); -- cgit v1.2.3 From 2f0c9fac3be6c834427f9d2af2107ebd6602d4b1 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:47 +0800 Subject: phy: samsung: convert to devm_platform_ioremap_resource Use devm_platform_ioremap_resource to simplify code Signed-off-by: Chunfeng Yun Reviewed-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/1604642930-29019-14-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/samsung/phy-exynos-pcie.c | 7 ++----- drivers/phy/samsung/phy-exynos5-usbdrd.c | 4 +--- drivers/phy/samsung/phy-exynos5250-sata.c | 5 +---- drivers/phy/samsung/phy-samsung-usb2.c | 4 +--- 4 files changed, 5 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c index 7e28b1aea0d1..c98fff5c1ac8 100644 --- a/drivers/phy/samsung/phy-exynos-pcie.c +++ b/drivers/phy/samsung/phy-exynos-pcie.c @@ -232,7 +232,6 @@ static int exynos_pcie_phy_probe(struct platform_device *pdev) struct exynos_pcie_phy *exynos_phy; struct phy *generic_phy; struct phy_provider *phy_provider; - struct resource *res; const struct exynos_pcie_phy_data *drv_data; drv_data = of_device_get_match_data(dev); @@ -243,13 +242,11 @@ static int exynos_pcie_phy_probe(struct platform_device *pdev) if (!exynos_phy) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - exynos_phy->phy_base = devm_ioremap_resource(dev, res); + exynos_phy->phy_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(exynos_phy->phy_base)) return PTR_ERR(exynos_phy->phy_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - exynos_phy->blk_base = devm_ioremap_resource(dev, res); + exynos_phy->blk_base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(exynos_phy->blk_base)) return PTR_ERR(exynos_phy->blk_base); diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index cfa9b8b7e5ac..ee0848fe8432 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -829,7 +829,6 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; struct exynos5_usbdrd_phy *phy_drd; struct phy_provider *phy_provider; - struct resource *res; const struct exynos5_usbdrd_phy_drvdata *drv_data; struct regmap *reg_pmu; u32 pmu_offset; @@ -843,8 +842,7 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev) dev_set_drvdata(dev, phy_drd); phy_drd->dev = dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - phy_drd->reg_phy = devm_ioremap_resource(dev, res); + phy_drd->reg_phy = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(phy_drd->reg_phy)) return PTR_ERR(phy_drd->reg_phy); diff --git a/drivers/phy/samsung/phy-exynos5250-sata.c b/drivers/phy/samsung/phy-exynos5250-sata.c index 4dd7324d91b2..9ec234243f7c 100644 --- a/drivers/phy/samsung/phy-exynos5250-sata.c +++ b/drivers/phy/samsung/phy-exynos5250-sata.c @@ -162,7 +162,6 @@ static int exynos_sata_phy_probe(struct platform_device *pdev) { struct exynos_sata_phy *sata_phy; struct device *dev = &pdev->dev; - struct resource *res; struct phy_provider *phy_provider; struct device_node *node; int ret = 0; @@ -171,9 +170,7 @@ static int exynos_sata_phy_probe(struct platform_device *pdev) if (!sata_phy) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - sata_phy->regs = devm_ioremap_resource(dev, res); + sata_phy->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(sata_phy->regs)) return PTR_ERR(sata_phy->regs); diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c index a3ed3ff04690..f79f605cff79 100644 --- a/drivers/phy/samsung/phy-samsung-usb2.c +++ b/drivers/phy/samsung/phy-samsung-usb2.c @@ -143,7 +143,6 @@ static int samsung_usb2_phy_probe(struct platform_device *pdev) const struct samsung_usb2_phy_config *cfg; struct device *dev = &pdev->dev; struct phy_provider *phy_provider; - struct resource *mem; struct samsung_usb2_phy_driver *drv; int i, ret; @@ -167,8 +166,7 @@ static int samsung_usb2_phy_probe(struct platform_device *pdev) drv->cfg = cfg; drv->dev = dev; - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - drv->reg_phy = devm_ioremap_resource(dev, mem); + drv->reg_phy = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(drv->reg_phy)) { dev_err(dev, "Failed to map register memory (phy)\n"); return PTR_ERR(drv->reg_phy); -- cgit v1.2.3 From 5a77b16c6b2b0eb5ab108d1933e99be769f5c90f Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:48 +0800 Subject: phy: phy-stm32-usbphyc: convert to devm_platform_ioremap_resource Use devm_platform_ioremap_resource to simplify code Signed-off-by: Chunfeng Yun Reviewed-by: Amelie Delaunay Link: https://lore.kernel.org/r/1604642930-29019-15-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/st/phy-stm32-usbphyc.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index 2b3639cba51a..0ab18f2078db 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -311,7 +311,6 @@ static int stm32_usbphyc_probe(struct platform_device *pdev) struct stm32_usbphyc *usbphyc; struct device *dev = &pdev->dev; struct device_node *child, *np = dev->of_node; - struct resource *res; struct phy_provider *phy_provider; u32 version; int ret, port = 0; @@ -322,8 +321,7 @@ static int stm32_usbphyc_probe(struct platform_device *pdev) usbphyc->dev = dev; dev_set_drvdata(dev, usbphyc); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - usbphyc->base = devm_ioremap_resource(dev, res); + usbphyc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(usbphyc->base)) return PTR_ERR(usbphyc->base); -- cgit v1.2.3 From 9ab4212b0a363a1edc5a52f70843dc2a5cda200d Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:49 +0800 Subject: phy: tegra: convert to devm_platform_ioremap_resource(_byname) Use devm_platform_ioremap_resource(_byname) to simplify code Signed-off-by: Chunfeng Yun Cc: JC Kuo Link: https://lore.kernel.org/r/1604642930-29019-16-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/tegra/phy-tegra194-p2u.c | 4 +--- drivers/phy/tegra/xusb.c | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/tegra/phy-tegra194-p2u.c b/drivers/phy/tegra/phy-tegra194-p2u.c index 7042bed9feaa..3ee02b9eb04f 100644 --- a/drivers/phy/tegra/phy-tegra194-p2u.c +++ b/drivers/phy/tegra/phy-tegra194-p2u.c @@ -72,14 +72,12 @@ static int tegra_p2u_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct phy *generic_phy; struct tegra_p2u *phy; - struct resource *res; phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctl"); - phy->base = devm_ioremap_resource(dev, res); + phy->base = devm_platform_ioremap_resource_byname(pdev, "ctl"); if (IS_ERR(phy->base)) return PTR_ERR(phy->base); diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index de4a46fe1763..067e71326826 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -1148,7 +1148,6 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev) const struct tegra_xusb_padctl_soc *soc; struct tegra_xusb_padctl *padctl; const struct of_device_id *match; - struct resource *res; int err; /* for backwards compatibility with old device trees */ @@ -1173,8 +1172,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev) INIT_LIST_HEAD(&padctl->pads); mutex_init(&padctl->lock); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - padctl->regs = devm_ioremap_resource(&pdev->dev, res); + padctl->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(padctl->regs)) { err = PTR_ERR(padctl->regs); goto remove; -- cgit v1.2.3 From 79caf207d6699419e83ac150accc5c80c5719b47 Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Fri, 6 Nov 2020 14:08:50 +0800 Subject: phy: ti: convert to devm_platform_ioremap_resource(_byname) Use devm_platform_ioremap_resource(_byname) to simplify code Signed-off-by: Chunfeng Yun Link: https://lore.kernel.org/r/1604642930-29019-17-git-send-email-chunfeng.yun@mediatek.com Signed-off-by: Vinod Koul --- drivers/phy/ti/phy-omap-control.c | 17 ++++++----------- drivers/phy/ti/phy-omap-usb2.c | 4 +--- drivers/phy/ti/phy-ti-pipe3.c | 15 ++++----------- 3 files changed, 11 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/ti/phy-omap-control.c b/drivers/phy/ti/phy-omap-control.c index ccd0e4e00451..47482f106fab 100644 --- a/drivers/phy/ti/phy-omap-control.c +++ b/drivers/phy/ti/phy-omap-control.c @@ -268,7 +268,6 @@ MODULE_DEVICE_TABLE(of, omap_control_phy_id_table); static int omap_control_phy_probe(struct platform_device *pdev) { - struct resource *res; const struct of_device_id *of_id; struct omap_control_phy *control_phy; @@ -285,16 +284,13 @@ static int omap_control_phy_probe(struct platform_device *pdev) control_phy->type = *(enum omap_control_phy_type *)of_id->data; if (control_phy->type == OMAP_CTRL_TYPE_OTGHS) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "otghs_control"); - control_phy->otghs_control = devm_ioremap_resource( - &pdev->dev, res); + control_phy->otghs_control = + devm_platform_ioremap_resource_byname(pdev, "otghs_control"); if (IS_ERR(control_phy->otghs_control)) return PTR_ERR(control_phy->otghs_control); } else { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "power"); - control_phy->power = devm_ioremap_resource(&pdev->dev, res); + control_phy->power = + devm_platform_ioremap_resource_byname(pdev, "power"); if (IS_ERR(control_phy->power)) { dev_err(&pdev->dev, "Couldn't get power register\n"); return PTR_ERR(control_phy->power); @@ -312,9 +308,8 @@ static int omap_control_phy_probe(struct platform_device *pdev) } if (control_phy->type == OMAP_CTRL_TYPE_PCIE) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "pcie_pcs"); - control_phy->pcie_pcs = devm_ioremap_resource(&pdev->dev, res); + control_phy->pcie_pcs = + devm_platform_ioremap_resource_byname(pdev, "pcie_pcs"); if (IS_ERR(control_phy->pcie_pcs)) return PTR_ERR(control_phy->pcie_pcs); } diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c index 4fec90d2624f..ebceb1520ce8 100644 --- a/drivers/phy/ti/phy-omap-usb2.c +++ b/drivers/phy/ti/phy-omap-usb2.c @@ -366,7 +366,6 @@ static int omap_usb2_probe(struct platform_device *pdev) { struct omap_usb *phy; struct phy *generic_phy; - struct resource *res; struct phy_provider *phy_provider; struct usb_otg *otg; struct device_node *node = pdev->dev.of_node; @@ -403,8 +402,7 @@ static int omap_usb2_probe(struct platform_device *pdev) omap_usb2_init_errata(phy); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - phy->phy_base = devm_ioremap_resource(&pdev->dev, res); + phy->phy_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(phy->phy_base)) return PTR_ERR(phy->phy_base); diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c index e9332c90f75f..2cbc91e535d4 100644 --- a/drivers/phy/ti/phy-ti-pipe3.c +++ b/drivers/phy/ti/phy-ti-pipe3.c @@ -745,35 +745,28 @@ static int ti_pipe3_get_sysctrl(struct ti_pipe3 *phy) static int ti_pipe3_get_tx_rx_base(struct ti_pipe3 *phy) { - struct resource *res; struct device *dev = phy->dev; struct platform_device *pdev = to_platform_device(dev); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "phy_rx"); - phy->phy_rx = devm_ioremap_resource(dev, res); + phy->phy_rx = devm_platform_ioremap_resource_byname(pdev, "phy_rx"); if (IS_ERR(phy->phy_rx)) return PTR_ERR(phy->phy_rx); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "phy_tx"); - phy->phy_tx = devm_ioremap_resource(dev, res); + phy->phy_tx = devm_platform_ioremap_resource_byname(pdev, "phy_tx"); return PTR_ERR_OR_ZERO(phy->phy_tx); } static int ti_pipe3_get_pll_base(struct ti_pipe3 *phy) { - struct resource *res; struct device *dev = phy->dev; struct platform_device *pdev = to_platform_device(dev); if (phy->mode == PIPE3_MODE_PCIE) return 0; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "pll_ctrl"); - phy->pll_ctrl_base = devm_ioremap_resource(dev, res); + phy->pll_ctrl_base = + devm_platform_ioremap_resource_byname(pdev, "pll_ctrl"); return PTR_ERR_OR_ZERO(phy->pll_ctrl_base); } -- cgit v1.2.3 From 3cc8e86721adbd24de7303bbb5a82464ba9e324a Mon Sep 17 00:00:00 2001 From: Yejune Deng Date: Tue, 3 Nov 2020 12:37:54 +0800 Subject: phy: amlogic: Replace devm_reset_control_array_get() devm_reset_control_array_get_exclusive() looks more readable Signed-off-by: Yejune Deng Reviewed-by: Martin Blumenstingl Link: https://lore.kernel.org/r/1604378274-6860-1-git-send-email-yejune.deng@gmail.com Signed-off-by: Vinod Koul --- drivers/phy/amlogic/phy-meson-axg-pcie.c | 2 +- drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c | 2 +- drivers/soc/amlogic/meson-ee-pwrc.c | 3 +-- drivers/soc/amlogic/meson-gx-pwrc-vpu.c | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/amlogic/phy-meson-axg-pcie.c b/drivers/phy/amlogic/phy-meson-axg-pcie.c index 58a7507a8422..2299bab38e05 100644 --- a/drivers/phy/amlogic/phy-meson-axg-pcie.c +++ b/drivers/phy/amlogic/phy-meson-axg-pcie.c @@ -153,7 +153,7 @@ static int phy_axg_pcie_probe(struct platform_device *pdev) if (IS_ERR(priv->regmap)) return PTR_ERR(priv->regmap); - priv->reset = devm_reset_control_array_get(dev, false, false); + priv->reset = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(priv->reset)) return PTR_ERR(priv->reset); diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c index ebe3d0ddd304..5b471ab80fe2 100644 --- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c +++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c @@ -416,7 +416,7 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev) if (ret) goto err_disable_clk_ref; - priv->reset = devm_reset_control_array_get(dev, false, false); + priv->reset = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(priv->reset)) return PTR_ERR(priv->reset); diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c index 5164a4dc2352..551edd934ba1 100644 --- a/drivers/soc/amlogic/meson-ee-pwrc.c +++ b/drivers/soc/amlogic/meson-ee-pwrc.c @@ -412,8 +412,7 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev, dev_warn(&pdev->dev, "Invalid resets count %d for domain %s\n", count, dom->desc.name); - dom->rstc = devm_reset_control_array_get(&pdev->dev, false, - false); + dom->rstc = devm_reset_control_array_get_exclusive(&pdev->dev) if (IS_ERR(dom->rstc)) return PTR_ERR(dom->rstc); } diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c index 21b4bc811c00..a4dba8a9cfbc 100644 --- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c +++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c @@ -303,7 +303,7 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev) return PTR_ERR(regmap_hhi); } - rstc = devm_reset_control_array_get(&pdev->dev, false, false); + rstc = devm_reset_control_array_get_exclusive(&pdev->dev); if (IS_ERR(rstc)) { if (PTR_ERR(rstc) != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to get reset lines\n"); -- cgit v1.2.3 From 86f1a6e6c5f700f27bbee046ad330b6a50707a24 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Mon, 9 Nov 2020 22:58:44 +0100 Subject: phy: tegra: Constify static device_type structs The only usage of tegra_xusb_pad_type and tegra_xusb_port_type is to assign their address to the type field in the device struct, which is a const pointer. Make them const to allow the compiler to put them in read-only memory. Signed-off-by: Rikard Falkeborn Link: https://lore.kernel.org/r/20201109215844.167954-1-rikard.falkeborn@gmail.com Signed-off-by: Vinod Koul --- drivers/phy/tegra/xusb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 067e71326826..ea9d93097ad8 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -146,7 +146,7 @@ static void tegra_xusb_pad_release(struct device *dev) pad->soc->ops->remove(pad); } -static struct device_type tegra_xusb_pad_type = { +static const struct device_type tegra_xusb_pad_type = { .release = tegra_xusb_pad_release, }; @@ -513,7 +513,7 @@ static void tegra_xusb_port_release(struct device *dev) port->ops->release(port); } -static struct device_type tegra_xusb_port_type = { +static const struct device_type tegra_xusb_port_type = { .release = tegra_xusb_port_release, }; -- cgit v1.2.3 From f98130b34515544d3004ea64ea1dc9db77e9c65a Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Tue, 10 Nov 2020 14:05:31 +0100 Subject: phy: stm32: don't print an error on probe deferral Change stm32-usbphyc driver to not print an error message when the device probe operation is deferred. Signed-off-by: Etienne Carriere Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20201110130531.7610-1-amelie.delaunay@st.com Signed-off-by: Vinod Koul --- drivers/phy/st/phy-stm32-usbphyc.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index 0ab18f2078db..8ebfd567d407 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -326,11 +326,8 @@ static int stm32_usbphyc_probe(struct platform_device *pdev) return PTR_ERR(usbphyc->base); usbphyc->clk = devm_clk_get(dev, NULL); - if (IS_ERR(usbphyc->clk)) { - ret = PTR_ERR(usbphyc->clk); - dev_err(dev, "clk get failed: %d\n", ret); - return ret; - } + if (IS_ERR(usbphyc->clk)) + return dev_err_probe(dev, PTR_ERR(usbphyc->clk), "clk get_failed\n"); ret = clk_prepare_enable(usbphyc->clk); if (ret) { -- cgit v1.2.3 From 13ea8e0eee4580686a9d2993ac64938cb5141526 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Tue, 10 Nov 2020 11:23:05 +0100 Subject: phy: stm32: defer probe for reset controller Change stm32-usbphyc driver to defer its probe when the expected reset control has its probe operation deferred. Signed-off-by: Etienne Carriere Signed-off-by: Amelie Delaunay Link: https://lore.kernel.org/r/20201110102305.27205-2-amelie.delaunay@st.com Signed-off-by: Vinod Koul --- drivers/phy/st/phy-stm32-usbphyc.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index 8ebfd567d407..a54317e96c41 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -340,6 +340,10 @@ static int stm32_usbphyc_probe(struct platform_device *pdev) reset_control_assert(usbphyc->rst); udelay(2); reset_control_deassert(usbphyc->rst); + } else { + ret = PTR_ERR(usbphyc->rst); + if (ret == -EPROBE_DEFER) + goto clk_disable; } usbphyc->switch_setup = -EINVAL; -- cgit v1.2.3 From 839034d8bd7f380b13b3493e057239f95bb8672a Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 22 Oct 2020 13:50:56 -0700 Subject: phy: phy-brcm-sata: Allow configuration SATA AFE TX amplitude Read the 'brcm,tx-amplitude-millivolt' property from Device Tree and propagate its value into the appropriate test transmit register to change the TX amplitude. Signed-off-by: Florian Fainelli Link: https://lore.kernel.org/r/20201022205056.233879-3-f.fainelli@gmail.com Signed-off-by: Vinod Koul --- drivers/phy/broadcom/phy-brcm-sata.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'drivers') diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c index 53942973f508..3ecf41359591 100644 --- a/drivers/phy/broadcom/phy-brcm-sata.c +++ b/drivers/phy/broadcom/phy-brcm-sata.c @@ -65,6 +65,7 @@ struct brcm_sata_port { bool ssc_en; enum brcm_sata_phy_rxaeq_mode rxaeq_mode; u32 rxaeq_val; + u32 tx_amplitude_val; }; struct brcm_sata_phy { @@ -84,6 +85,10 @@ enum sata_phy_regs { BLOCK0_SPARE_OOB_CLK_SEL_MASK = 0x3, BLOCK0_SPARE_OOB_CLK_SEL_REFBY2 = 0x1, + BLOCK1_REG_BANK = 0x10, + BLOCK1_TEST_TX = 0x83, + BLOCK1_TEST_TX_AMP_SHIFT = 12, + PLL_REG_BANK_0 = 0x050, PLL_REG_BANK_0_PLLCONTROL_0 = 0x81, PLLCONTROL_0_FREQ_DET_RESTART = BIT(13), @@ -379,6 +384,29 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port) brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1, ~tmp, RXPMD_MON_CORRECT_EN | value); + tmp = GENMASK(15, 12); + switch (port->tx_amplitude_val) { + case 400: + value = BIT(12) | BIT(13); + break; + case 500: + value = BIT(13); + break; + case 600: + value = BIT(12); + break; + case 800: + value = 0; + break; + default: + value = tmp; + break; + } + + if (value != tmp) + brcm_sata_phy_wr(port, BLOCK1_REG_BANK, BLOCK1_TEST_TX, ~tmp, + value); + /* Turn on/off SSC */ brcm_sata_phy_wr(port, TX_REG_BANK, TX_ACTRL5, ~TX_ACTRL5_SSC_EN, port->ssc_en ? TX_ACTRL5_SSC_EN : 0); @@ -787,6 +815,10 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) if (port->rxaeq_mode == RXAEQ_MODE_MANUAL) of_property_read_u32(child, "brcm,rxaeq-value", &port->rxaeq_val); + + of_property_read_u32(child, "brcm,tx-amplitude-millivolt", + &port->tx_amplitude_val); + port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc"); if (IS_ERR(port->phy)) { dev_err(dev, "failed to create PHY\n"); -- cgit v1.2.3 From 50c0133cd154090446bc19e466df57502f422644 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Wed, 11 Nov 2020 10:37:08 +0000 Subject: phy: tegra: Don't warn on probe deferral Deferred probe is an expected return value for devm_regulator_bulk_get(). Given that the driver deals with it properly, there's no need to output a warning that may potentially confuse users. Signed-off-by: Jon Hunter Acked-by: Thierry Reding Acked-by: JC Kuo Link: https://lore.kernel.org/r/20201111103708.152566-1-jonathanh@nvidia.com Signed-off-by: Vinod Koul --- drivers/phy/tegra/xusb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index ea9d93097ad8..40e03c931340 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -1198,7 +1198,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev) err = devm_regulator_bulk_get(&pdev->dev, padctl->soc->num_supplies, padctl->supplies); if (err < 0) { - dev_err(&pdev->dev, "failed to get regulators: %d\n", err); + dev_err_probe(&pdev->dev, err, "failed to get regulators\n"); goto remove; } -- cgit v1.2.3 From 53cde0fe020fd9594820307661e9b9c42821722d Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 17 Nov 2020 15:26:17 +0530 Subject: phy: amlogic: Revert "phy: amlogic: Replace devm_reset_control_array_get()" This reverts commit 3cc8e86721ad ("phy: amlogic: Replace devm_reset_control_array_get()") as it caused build failure drivers/soc/amlogic/meson-ee-pwrc.c: In function 'meson_ee_pwrc_init_domain': drivers/soc/amlogic/meson-ee-pwrc.c:416:65: error: expected ';' before 'if' Reported-by: Stephen Rothwell Signed-off-by: Vinod Koul --- drivers/phy/amlogic/phy-meson-axg-pcie.c | 2 +- drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c | 2 +- drivers/soc/amlogic/meson-ee-pwrc.c | 3 ++- drivers/soc/amlogic/meson-gx-pwrc-vpu.c | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/amlogic/phy-meson-axg-pcie.c b/drivers/phy/amlogic/phy-meson-axg-pcie.c index 2299bab38e05..58a7507a8422 100644 --- a/drivers/phy/amlogic/phy-meson-axg-pcie.c +++ b/drivers/phy/amlogic/phy-meson-axg-pcie.c @@ -153,7 +153,7 @@ static int phy_axg_pcie_probe(struct platform_device *pdev) if (IS_ERR(priv->regmap)) return PTR_ERR(priv->regmap); - priv->reset = devm_reset_control_array_get_exclusive(dev); + priv->reset = devm_reset_control_array_get(dev, false, false); if (IS_ERR(priv->reset)) return PTR_ERR(priv->reset); diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c index 5b471ab80fe2..ebe3d0ddd304 100644 --- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c +++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c @@ -416,7 +416,7 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev) if (ret) goto err_disable_clk_ref; - priv->reset = devm_reset_control_array_get_exclusive(dev); + priv->reset = devm_reset_control_array_get(dev, false, false); if (IS_ERR(priv->reset)) return PTR_ERR(priv->reset); diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c index 551edd934ba1..5164a4dc2352 100644 --- a/drivers/soc/amlogic/meson-ee-pwrc.c +++ b/drivers/soc/amlogic/meson-ee-pwrc.c @@ -412,7 +412,8 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev, dev_warn(&pdev->dev, "Invalid resets count %d for domain %s\n", count, dom->desc.name); - dom->rstc = devm_reset_control_array_get_exclusive(&pdev->dev) + dom->rstc = devm_reset_control_array_get(&pdev->dev, false, + false); if (IS_ERR(dom->rstc)) return PTR_ERR(dom->rstc); } diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c index a4dba8a9cfbc..21b4bc811c00 100644 --- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c +++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c @@ -303,7 +303,7 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev) return PTR_ERR(regmap_hhi); } - rstc = devm_reset_control_array_get_exclusive(&pdev->dev); + rstc = devm_reset_control_array_get(&pdev->dev, false, false); if (IS_ERR(rstc)) { if (PTR_ERR(rstc) != -EPROBE_DEFER) dev_err(&pdev->dev, "failed to get reset lines\n"); -- cgit v1.2.3 From 9b627c25e70816a5e1dca940444b5029065b4d60 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Thu, 15 Oct 2020 11:47:51 -0700 Subject: bus: mhi: core: Remove double locking from mhi_driver_remove() There is double acquisition of the pm_lock from mhi_driver_remove() function. Remove the read_lock_bh/read_unlock_bh calls for pm_lock taken during a call to mhi_device_put() as the lock is acquired within the function already. This will help avoid a potential kernel panic. Fixes: 189ff97cca53 ("bus: mhi: core: Add support for data transfer") Reported-by: Shuah Khan Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/init.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 0ffdebde8265..0a09f8215057 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -1276,10 +1276,8 @@ static int mhi_driver_remove(struct device *dev) mutex_unlock(&mhi_chan->mutex); } - read_lock_bh(&mhi_cntrl->pm_lock); while (mhi_dev->dev_wake) mhi_device_put(mhi_dev); - read_unlock_bh(&mhi_cntrl->pm_lock); return 0; } -- cgit v1.2.3 From 8ff3f7bdde45b32f9294fc87e4bd76f369178664 Mon Sep 17 00:00:00 2001 From: Jeffrey Hugo Date: Tue, 20 Oct 2020 14:29:45 -0600 Subject: bus: mhi: core: fix potential operator-precedence with BHI macros The BHI_MSMHWID and BHI_OEMPKHASH macros take a value 'n' which is a BHI register index. If 'n' is an expression rather than a simple value, there can be an operator precedence issue which can result in the incorrect calculation of the register offset. Adding parentheses around the macro parameter can prevent such issues. Signed-off-by: Jeffrey Hugo Reviewed-by: Manivannan Sadhasivam Reviewed-by: Hemant Kumar Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/internal.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 7989269ddd96..78e4e84d6743 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -153,8 +153,8 @@ extern struct bus_type mhi_bus_type; #define BHI_SERIALNU (0x40) #define BHI_SBLANTIROLLVER (0x44) #define BHI_NUMSEG (0x48) -#define BHI_MSMHWID(n) (0x4C + (0x4 * n)) -#define BHI_OEMPKHASH(n) (0x64 + (0x4 * n)) +#define BHI_MSMHWID(n) (0x4C + (0x4 * (n))) +#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n))) #define BHI_RSVD5 (0xC4) #define BHI_STATUS_MASK (0xC0000000) #define BHI_STATUS_SHIFT (30) -- cgit v1.2.3 From 855a70c12021bdc5df60512f1d3f6d492dc715be Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Wed, 21 Oct 2020 19:18:19 +0200 Subject: bus: mhi: Add MHI PCI support for WWAN modems This is a generic MHI-over-PCI controller driver for MHI only devices such as QCOM modems. For now it supports registering of Qualcomm SDX55 based PCIe modules. The MHI channels have been extracted from mhi downstream driver. This driver is for MHI-only devices which have all functionalities exposed through MHI channels and accessed by the corresponding MHI device drivers (no out-of-band communication). Signed-off-by: Loic Poulain Reviewed-by: Bhaumik Bhatt Reviewed-by: Hemant Kumar Reviewed-by: Manivannan Sadhasivam [mani: fixed up the Makefile rule] Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/Kconfig | 9 ++ drivers/bus/mhi/Makefile | 4 + drivers/bus/mhi/pci_generic.c | 345 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 358 insertions(+) create mode 100644 drivers/bus/mhi/pci_generic.c (limited to 'drivers') diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig index e841c1097fb4..da5cd0c9fc62 100644 --- a/drivers/bus/mhi/Kconfig +++ b/drivers/bus/mhi/Kconfig @@ -20,3 +20,12 @@ config MHI_BUS_DEBUG Enable debugfs support for use with the MHI transport. Allows reading and/or modifying some values within the MHI controller for debug and test purposes. + +config MHI_BUS_PCI_GENERIC + tristate "MHI PCI controller driver" + depends on MHI_BUS + depends on PCI + help + This driver provides MHI PCI controller driver for devices such as + Qualcomm SDX55 based PCIe modems. + diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile index 19e6443b72df..0a2d778d6fb4 100644 --- a/drivers/bus/mhi/Makefile +++ b/drivers/bus/mhi/Makefile @@ -1,2 +1,6 @@ # core layer obj-y += core/ + +obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o +mhi_pci_generic-y += pci_generic.o + diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c new file mode 100644 index 000000000000..e3df838c3c80 --- /dev/null +++ b/drivers/bus/mhi/pci_generic.c @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * MHI PCI driver - MHI over PCI controller driver + * + * This module is a generic driver for registering MHI-over-PCI devices, + * such as PCIe QCOM modems. + * + * Copyright (C) 2020 Linaro Ltd + */ + +#include +#include +#include +#include + +#define MHI_PCI_DEFAULT_BAR_NUM 0 + +/** + * struct mhi_pci_dev_info - MHI PCI device specific information + * @config: MHI controller configuration + * @name: name of the PCI module + * @fw: firmware path (if any) + * @edl: emergency download mode firmware path (if any) + * @bar_num: PCI base address register to use for MHI MMIO register space + * @dma_data_width: DMA transfer word size (32 or 64 bits) + */ +struct mhi_pci_dev_info { + const struct mhi_controller_config *config; + const char *name; + const char *fw; + const char *edl; + unsigned int bar_num; + unsigned int dma_data_width; +}; + +#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_TO_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } \ + +#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .num_elements = el_count, \ + .event_ring = ev_ring, \ + .dir = DMA_FROM_DEVICE, \ + .ee_mask = BIT(MHI_EE_AMSS), \ + .pollcfg = 0, \ + .doorbell = MHI_DB_BRST_DISABLE, \ + .lpm_notify = false, \ + .offload_channel = false, \ + .doorbell_mode_switch = false, \ + } + +#define MHI_EVENT_CONFIG_CTRL(ev_ring) \ + { \ + .num_elements = 64, \ + .irq_moderation_ms = 0, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_CTRL, \ + .hardware_event = false, \ + .client_managed = false, \ + .offload_channel = false, \ + } + +#define MHI_EVENT_CONFIG_DATA(ev_ring) \ + { \ + .num_elements = 128, \ + .irq_moderation_ms = 5, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_DATA, \ + .hardware_event = false, \ + .client_managed = false, \ + .offload_channel = false, \ + } + +#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, ch_num) \ + { \ + .num_elements = 128, \ + .irq_moderation_ms = 5, \ + .irq = (ev_ring) + 1, \ + .priority = 1, \ + .mode = MHI_DB_BRST_DISABLE, \ + .data_type = MHI_ER_DATA, \ + .hardware_event = true, \ + .client_managed = false, \ + .offload_channel = false, \ + .channel = ch_num, \ + } + +static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = { + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0), + MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0), + MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0), + MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0), + MHI_CHANNEL_CONFIG_UL(100, "IP_HW0", 128, 1), + MHI_CHANNEL_CONFIG_DL(101, "IP_HW0", 128, 2), +}; + +static const struct mhi_event_config modem_qcom_v1_mhi_events[] = { + /* first ring is control+data ring */ + MHI_EVENT_CONFIG_CTRL(0), + /* Hardware channels request dedicated hardware event rings */ + MHI_EVENT_CONFIG_HW_DATA(1, 100), + MHI_EVENT_CONFIG_HW_DATA(2, 101) +}; + +static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { + .max_channels = 128, + .timeout_ms = 5000, + .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), + .ch_cfg = modem_qcom_v1_mhi_channels, + .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), + .event_cfg = modem_qcom_v1_mhi_events, +}; + +static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = { + .name = "qcom-sdx55m", + .fw = "qcom/sdx55m/sbl1.mbn", + .edl = "qcom/sdx55m/edl.mbn", + .config = &modem_qcom_v1_mhiv_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32 +}; + +static const struct pci_device_id mhi_pci_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, + { } +}; +MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); + +static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl, + void __iomem *addr, u32 *out) +{ + *out = readl(addr); + return 0; +} + +static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl, + void __iomem *addr, u32 val) +{ + writel(val, addr); +} + +static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl, + enum mhi_callback cb) +{ + /* Nothing to do for now */ +} + +static int mhi_pci_claim(struct mhi_controller *mhi_cntrl, + unsigned int bar_num, u64 dma_mask) +{ + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + int err; + + err = pci_assign_resource(pdev, bar_num); + if (err) + return err; + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "failed to enable pci device: %d\n", err); + return err; + } + + err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev)); + if (err) { + dev_err(&pdev->dev, "failed to map pci region: %d\n", err); + return err; + } + mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num]; + + err = pci_set_dma_mask(pdev, dma_mask); + if (err) { + dev_err(&pdev->dev, "Cannot set proper DMA mask\n"); + return err; + } + + err = pci_set_consistent_dma_mask(pdev, dma_mask); + if (err) { + dev_err(&pdev->dev, "set consistent dma mask failed\n"); + return err; + } + + pci_set_master(pdev); + + return 0; +} + +static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl, + const struct mhi_controller_config *mhi_cntrl_config) +{ + struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev); + int nr_vectors, i; + int *irq; + + /* + * Alloc one MSI vector for BHI + one vector per event ring, ideally... + * No explicit pci_free_irq_vectors required, done by pcim_release. + */ + mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events; + + nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI); + if (nr_vectors < 0) { + dev_err(&pdev->dev, "Error allocating MSI vectors %d\n", + nr_vectors); + return nr_vectors; + } + + if (nr_vectors < mhi_cntrl->nr_irqs) { + dev_warn(&pdev->dev, "Not enough MSI vectors (%d/%d), use shared MSI\n", + nr_vectors, mhi_cntrl_config->num_events); + } + + irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL); + if (!irq) + return -ENOMEM; + + for (i = 0; i < mhi_cntrl->nr_irqs; i++) { + int vector = i >= nr_vectors ? (nr_vectors - 1) : i; + + irq[i] = pci_irq_vector(pdev, vector); + } + + mhi_cntrl->irq = irq; + + return 0; +} + +static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl) +{ + /* no PM for now */ + return 0; +} + +static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl) +{ + /* no PM for now */ +} + +static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data; + const struct mhi_controller_config *mhi_cntrl_config; + struct mhi_controller *mhi_cntrl; + int err; + + dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name); + + mhi_cntrl = mhi_alloc_controller(); + if (!mhi_cntrl) + return -ENOMEM; + + mhi_cntrl_config = info->config; + mhi_cntrl->cntrl_dev = &pdev->dev; + mhi_cntrl->iova_start = 0; + mhi_cntrl->iova_stop = DMA_BIT_MASK(info->dma_data_width); + mhi_cntrl->fw_image = info->fw; + mhi_cntrl->edl_image = info->edl; + + mhi_cntrl->read_reg = mhi_pci_read_reg; + mhi_cntrl->write_reg = mhi_pci_write_reg; + mhi_cntrl->status_cb = mhi_pci_status_cb; + mhi_cntrl->runtime_get = mhi_pci_runtime_get; + mhi_cntrl->runtime_put = mhi_pci_runtime_put; + + err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width)); + if (err) + goto err_release; + + err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config); + if (err) + goto err_release; + + pci_set_drvdata(pdev, mhi_cntrl); + + err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config); + if (err) + goto err_release; + + /* MHI bus does not power up the controller by default */ + err = mhi_prepare_for_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to prepare MHI controller\n"); + goto err_unregister; + } + + err = mhi_sync_power_up(mhi_cntrl); + if (err) { + dev_err(&pdev->dev, "failed to power up MHI controller\n"); + goto err_unprepare; + } + + return 0; + +err_unprepare: + mhi_unprepare_after_power_down(mhi_cntrl); +err_unregister: + mhi_unregister_controller(mhi_cntrl); +err_release: + mhi_free_controller(mhi_cntrl); + + return err; +} + +static void mhi_pci_remove(struct pci_dev *pdev) +{ + struct mhi_controller *mhi_cntrl = pci_get_drvdata(pdev); + + mhi_power_down(mhi_cntrl, true); + mhi_unprepare_after_power_down(mhi_cntrl); + mhi_unregister_controller(mhi_cntrl); + mhi_free_controller(mhi_cntrl); +} + +static struct pci_driver mhi_pci_driver = { + .name = "mhi-pci-generic", + .id_table = mhi_pci_id_table, + .probe = mhi_pci_probe, + .remove = mhi_pci_remove +}; +module_pci_driver(mhi_pci_driver); + +MODULE_AUTHOR("Loic Poulain "); +MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From f4d0b39c842585c74bce8f8a80553369181b72df Mon Sep 17 00:00:00 2001 From: Carl Yin Date: Mon, 2 Nov 2020 20:27:10 +0800 Subject: bus: mhi: core: Fix null pointer access when parsing MHI configuration Functions parse_ev_cfg() and parse_ch_cfg() access mhi_cntrl->mhi_dev before it is set in function mhi_register_controller(), use cntrl_dev instead of mhi_dev. Fixes: 0cbf260820fa ("bus: mhi: core: Add support for registering MHI controllers") Signed-off-by: Carl Yin Reviewed-by: Bhaumik Bhatt Reviewed-by: Hemant Kumar Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 0a09f8215057..8cefa359fccd 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -610,7 +610,7 @@ static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, { struct mhi_event *mhi_event; const struct mhi_event_config *event_cfg; - struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct device *dev = mhi_cntrl->cntrl_dev; int i, num; num = config->num_events; @@ -692,7 +692,7 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, const struct mhi_controller_config *config) { const struct mhi_channel_config *ch_cfg; - struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct device *dev = mhi_cntrl->cntrl_dev; int i; u32 chan; -- cgit v1.2.3 From a7f422f2f89e7d48aa66e6488444a4c7f01269d5 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Fri, 9 Oct 2020 11:07:14 +0200 Subject: bus: mhi: Fix channel close issue on driver remove Some MHI device drivers need to stop the channels in their driver remove callback (e.g. module unloading), but the unprepare function is aborted because MHI core moved the channels to suspended state prior calling driver remove callback. This prevents the driver to send a proper MHI RESET CHAN command to the device. Device is then unaware of the stopped state of these channels. This causes issue when driver tries to start the channels again (e.g. module is reloaded), since device considers channels as already started (inconsistent state). Fix this by allowing channel reset when channel is suspended. Signed-off-by: Loic Poulain Reviewed-by: Manivannan Sadhasivam Reviewed-by: Bhaumik Bhatt Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 2cff5ddff225..cea5eab83f48 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -1224,7 +1224,8 @@ static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, /* no more processing events for this channel */ mutex_lock(&mhi_chan->mutex); write_lock_irq(&mhi_chan->lock); - if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) { + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED && + mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { write_unlock_irq(&mhi_chan->lock); mutex_unlock(&mhi_chan->mutex); return; -- cgit v1.2.3 From 56c8ea864018a995c96810c9536d98a879b89218 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Fri, 6 Nov 2020 09:44:45 -0800 Subject: bus: mhi: core: Remove unnecessary counter from mhi_firmware_copy() There is an extra 'i' counter in the mhi_firmware_copy() function which is unused. Remove it to clean-up code and reduce stack space as well as improve efficiency of the function. Fixes: cd457afb1667 ("bus: mhi: core: Add support for downloading firmware over BHIe") Reported-by: kernel test robot Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/boot.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c index 24422f5c3d80..6b6fd9668c3b 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/core/boot.c @@ -365,7 +365,6 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, size_t remainder = firmware->size; size_t to_cpy; const u8 *buf = firmware->data; - int i = 0; struct mhi_buf *mhi_buf = img_info->mhi_buf; struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; @@ -377,7 +376,6 @@ static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl, buf += to_cpy; remainder -= to_cpy; - i++; bhi_vec++; mhi_buf++; } -- cgit v1.2.3 From a8ca15a9c73f92703492e8afe25103a34504851b Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Fri, 6 Nov 2020 09:44:46 -0800 Subject: bus: mhi: core: Add missing EXPORT_SYMBOL for mhi_get_mhi_state() Add missing EXPORT_SYMBOL_GPL() declaration for mhi_get_mhi_state() API. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index cea5eab83f48..6ecaacaa8b54 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -132,6 +132,7 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) MHISTATUS_MHISTATE_SHIFT, &state); return ret ? MHI_STATE_MAX : state; } +EXPORT_SYMBOL_GPL(mhi_get_mhi_state); int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, struct mhi_buf_info *buf_info) -- cgit v1.2.3 From 78e1d22687ff1fd320abac12e8a607ea79782c48 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Fri, 6 Nov 2020 09:44:47 -0800 Subject: bus: mhi: core: Expose mhi_get_exec_env() API for controllers The mhi_get_exec_env() APIs can be used by the controller drivers to query the execution environment of the MHI device. Expose it so it can be used in some scenarios to determine behavior of controllers. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/internal.h | 1 - drivers/bus/mhi/core/main.c | 1 + include/linux/mhi.h | 6 ++++++ 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index 78e4e84d6743..d8af8a702493 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -609,7 +609,6 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state( struct mhi_controller *mhi_cntrl, enum mhi_pm_state state); const char *to_mhi_pm_state_str(enum mhi_pm_state state); -enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl); int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, enum dev_st_transition state); void mhi_pm_st_worker(struct work_struct *work); diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 6ecaacaa8b54..f953e2a6d58a 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -123,6 +123,7 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) return (ret) ? MHI_EE_MAX : exec; } +EXPORT_SYMBOL_GPL(mhi_get_exec_env); enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) { diff --git a/include/linux/mhi.h b/include/linux/mhi.h index d4841e5a5f45..9225d5551d69 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -658,6 +658,12 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic); */ int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl); +/** + * mhi_get_exec_env - Get BHI execution environment of the device + * @mhi_cntrl: MHI controller + */ +enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl); + /** * mhi_get_mhi_state - Get MHI state of the device * @mhi_cntrl: MHI controller -- cgit v1.2.3 From bca7218099e0796901a9c27366e145d9f485c81a Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Fri, 6 Nov 2020 09:44:48 -0800 Subject: bus: mhi: core: Remove unused mhi_fw_load_worker() declaration The mhi_fw_load_worker() function no longer exists. Remove its declaration as part of code clean-up. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/internal.h | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h index d8af8a702493..2df8de5432f9 100644 --- a/drivers/bus/mhi/core/internal.h +++ b/drivers/bus/mhi/core/internal.h @@ -613,7 +613,6 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, enum dev_st_transition state); void mhi_pm_st_worker(struct work_struct *work); void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl); -void mhi_fw_load_worker(struct work_struct *work); int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl); int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl); void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl); -- cgit v1.2.3 From 9e1660e5c396ee081907386d0b95b8e0804a6c86 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Fri, 6 Nov 2020 09:44:49 -0800 Subject: bus: mhi: core: Rename RDDM download function to use proper words mhi_download_rddm_img() uses a shorter version of the word image. Expand it and rename the function to mhi_download_rddm_image(). Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/boot.c | 4 ++-- include/linux/mhi.h | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c index 6b6fd9668c3b..16244cc8fbe7 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/core/boot.c @@ -147,7 +147,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) } /* Download RDDM image from device */ -int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic) +int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic) { void __iomem *base = mhi_cntrl->bhie; struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -169,7 +169,7 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic) return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO; } -EXPORT_SYMBOL_GPL(mhi_download_rddm_img); +EXPORT_SYMBOL_GPL(mhi_download_rddm_image); static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, const struct mhi_buf *mhi_buf) diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 9225d5551d69..52b3c60bf9bb 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -645,12 +645,12 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); int mhi_pm_resume(struct mhi_controller *mhi_cntrl); /** - * mhi_download_rddm_img - Download ramdump image from device for - * debugging purpose. + * mhi_download_rddm_image - Download ramdump image from device for + * debugging purpose. * @mhi_cntrl: MHI controller * @in_panic: Download rddm image during kernel panic */ -int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic); +int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic); /** * mhi_force_rddm_mode - Force device into rddm mode -- cgit v1.2.3 From da7bdbf67db232234bb55e083c57b930c2010f95 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Fri, 6 Nov 2020 09:44:50 -0800 Subject: bus: mhi: core: Skip RDDM download for unknown execution environment If MHI is unable to determine the execution environment during the panic path, host must skip the RDDM download. This can happen if the BHI offset read or the BHI_EXECENV register read fails indicating that the underlying transport is unresponsive. Hence, there is no need to trigger an RDDM using SYSERR or request an SOC reset. Suggested-by: Hemant Kumar Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/boot.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c index 16244cc8fbe7..6f0cfb9922b4 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/core/boot.c @@ -92,6 +92,9 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) * image download completion. */ ee = mhi_get_exec_env(mhi_cntrl); + if (ee == MHI_EE_MAX) + goto error_exit_rddm; + if (ee != MHI_EE_RDDM) { dev_dbg(dev, "Trigger device into RDDM mode using SYS ERR\n"); mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); @@ -139,10 +142,12 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl) ee = mhi_get_exec_env(mhi_cntrl); ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status); - dev_err(dev, "Did not complete RDDM transfer\n"); - dev_err(dev, "Current EE: %s\n", TO_MHI_EXEC_STR(ee)); dev_err(dev, "RXVEC_STATUS: 0x%x\n", rx_status); +error_exit_rddm: + dev_err(dev, "RDDM transfer failed. Current EE: %s\n", + TO_MHI_EXEC_STR(ee)); + return -EIO; } -- cgit v1.2.3 From 1b55c16a5e4718f58ed58cfdfe37fe4cd4d89f88 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 9 Nov 2020 12:47:20 -0800 Subject: bus: mhi: core: Use appropriate names for firmware load functions mhi_fw_load_sbl() function is currently used to transfer SBL or EDL images over BHI (Boot Host Interface). Same goes with mhi_fw_load_amss() which uses BHIe. However, the contents of these functions do not indicate support for a specific set of images. Since these can be used for any image download over BHI or BHIe, rename them based on the protocol used. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/boot.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c index 6f0cfb9922b4..2d7752cd9ccc 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/core/boot.c @@ -176,7 +176,7 @@ int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic) } EXPORT_SYMBOL_GPL(mhi_download_rddm_image); -static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, +static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, const struct mhi_buf *mhi_buf) { void __iomem *base = mhi_cntrl->bhie; @@ -192,7 +192,7 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, } sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_TXVECSTATUS_SEQNUM_BMSK); - dev_dbg(dev, "Starting AMSS download via BHIe. Sequence ID:%u\n", + dev_dbg(dev, "Starting image download via BHIe. Sequence ID: %u\n", sequence_id); mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS, upper_32_bits(mhi_buf->dma_addr)); @@ -223,7 +223,7 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl, return (!ret) ? -ETIMEDOUT : 0; } -static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, +static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl, dma_addr_t dma_addr, size_t size) { @@ -250,7 +250,7 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl, } session_id = MHI_RANDOM_U32_NONZERO(BHI_TXDB_SEQNUM_BMSK); - dev_dbg(dev, "Starting SBL download via BHI. Session ID:%u\n", + dev_dbg(dev, "Starting image download via BHI. Session ID: %u\n", session_id); mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0); mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH, @@ -449,9 +449,9 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) return; } - /* Download SBL image */ + /* Download image using BHI */ memcpy(buf, firmware->data, size); - ret = mhi_fw_load_sbl(mhi_cntrl, dma_addr, size); + ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size); mhi_free_coherent(mhi_cntrl, size, buf, dma_addr); if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL) @@ -459,7 +459,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) /* Error or in EDL mode, we're done */ if (ret) { - dev_err(dev, "MHI did not load SBL, ret:%d\n", ret); + dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret); return; } @@ -509,11 +509,12 @@ fw_load_ee_pthru: /* Start full firmware image download */ image_info = mhi_cntrl->fbc_image; - ret = mhi_fw_load_amss(mhi_cntrl, + ret = mhi_fw_load_bhie(mhi_cntrl, /* Vector table is the last entry */ &image_info->mhi_buf[image_info->entries - 1]); if (ret) - dev_err(dev, "MHI did not load AMSS, ret:%d\n", ret); + dev_err(dev, "MHI did not load image over BHIe, ret: %d\n", + ret); release_firmware(firmware); -- cgit v1.2.3 From 8f70397876872789b2a5deba804eb6216fb5deb7 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 9 Nov 2020 12:47:21 -0800 Subject: bus: mhi: core: Move to using high priority workqueue MHI work is currently scheduled on the global/system workqueue and can encounter delays on a stressed system. To avoid those unforeseen delays which can hamper bootup or shutdown times, use a dedicated high priority workqueue instead of the global/system workqueue. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/init.c | 9 +++++++++ drivers/bus/mhi/core/pm.c | 2 +- include/linux/mhi.h | 2 ++ 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 8cefa359fccd..877e40c86801 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -880,6 +880,13 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); init_waitqueue_head(&mhi_cntrl->state_event); + mhi_cntrl->hiprio_wq = alloc_ordered_workqueue + ("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI); + if (!mhi_cntrl->hiprio_wq) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n"); + goto error_alloc_cmd; + } + mhi_cmd = mhi_cntrl->mhi_cmd; for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) spin_lock_init(&mhi_cmd->lock); @@ -969,6 +976,7 @@ error_alloc_dev: error_alloc_cmd: vfree(mhi_cntrl->mhi_chan); kfree(mhi_cntrl->mhi_event); + destroy_workqueue(mhi_cntrl->hiprio_wq); return ret; } @@ -982,6 +990,7 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) mhi_destroy_debugfs(mhi_cntrl); + destroy_workqueue(mhi_cntrl->hiprio_wq); kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_event); diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 3de7b1639ec6..805b6fa748f0 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -597,7 +597,7 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl, list_add_tail(&item->node, &mhi_cntrl->transition_list); spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); - schedule_work(&mhi_cntrl->st_worker); + queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker); return 0; } diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 52b3c60bf9bb..1ed5f2aa224b 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -337,6 +337,7 @@ struct mhi_controller_config { * @wlock: Lock for protecting device wakeup * @mhi_link_info: Device bandwidth info * @st_worker: State transition worker + * @hiprio_wq: High priority workqueue for MHI work such as state transitions * @state_event: State change event * @status_cb: CB function to notify power states of the device (required) * @wake_get: CB function to assert device wake (optional) @@ -419,6 +420,7 @@ struct mhi_controller { spinlock_t wlock; struct mhi_link_info mhi_link_info; struct work_struct st_worker; + struct workqueue_struct *hiprio_wq; wait_queue_head_t state_event; void (*status_cb)(struct mhi_controller *mhi_cntrl, -- cgit v1.2.3 From 8e0559921f9afc01fe0457c5e136ce4a7ae8f0d3 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 9 Nov 2020 12:47:22 -0800 Subject: bus: mhi: core: Skip device wake in error or shutdown states MHI client drivers can request a device wake even if the device may be in an error state or undergoing a shutdown. To prevent unnecessary device wake processing, check for the device state and bail out early so that the clients are made aware of the device state sooner. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/pm.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 805b6fa748f0..029919647002 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -827,6 +827,10 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl) /* Wake up the device */ read_lock_bh(&mhi_cntrl->pm_lock); + if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { + read_unlock_bh(&mhi_cntrl->pm_lock); + return -EIO; + } mhi_cntrl->wake_get(mhi_cntrl, true); if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) mhi_trigger_resume(mhi_cntrl); -- cgit v1.2.3 From 3fb81a4d5f2f55b1596cb5cffca98c97ca43f07a Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 9 Nov 2020 12:47:23 -0800 Subject: bus: mhi: core: Move to SYS_ERROR regardless of RDDM capability In some cases, the entry of device to RDDM execution environment can occur after a significant amount of time has elapsed and a SYS_ERROR state change event has already arrived. This can result in scenarios where MHI controller and client drivers are unaware of the error state of the device. Remove the check for rddm_image when processing the SYS_ERROR state change as it is present in mhi_pm_sys_err_handler() already and prevent further activity until the expected RDDM execution environment change occurs or the controller driver decides further action. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/main.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index f953e2a6d58a..91f8b8d05a62 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -737,11 +737,6 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, { enum mhi_pm_state new_state; - /* skip SYS_ERROR handling if RDDM supported */ - if (mhi_cntrl->ee == MHI_EE_RDDM || - mhi_cntrl->rddm_image) - break; - dev_dbg(dev, "System error detected\n"); write_lock_irq(&mhi_cntrl->pm_lock); new_state = mhi_tryset_pm_state(mhi_cntrl, -- cgit v1.2.3 From 0c76b3fa580da8f65333b8774205f0a72ffe844c Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 9 Nov 2020 12:47:24 -0800 Subject: bus: mhi: core: Prevent sending multiple RDDM entry callbacks If an mhi_power_down() is initiated after the device has entered RDDM and a status callback was provided for it, it is possible that another BHI interrupt fires while waiting for the MHI RESET to be cleared. If that happens, MHI host would have moved a "disabled" execution environment and the check to allow sending an RDDM status callback will pass when it is should not. Add a check to see if MHI is in an active state before proceeding. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 91f8b8d05a62..4eb93d8bea1d 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -401,6 +401,10 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) /* If device supports RDDM don't bother processing SYS error */ if (mhi_cntrl->rddm_image) { + /* host may be performing a device power down already */ + if (!mhi_is_active(mhi_cntrl)) + goto exit_intvec; + if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) { mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); wake_up_all(&mhi_cntrl->state_event); -- cgit v1.2.3 From 12e050c77be036377fee479dccd479202d8d6853 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 9 Nov 2020 12:47:25 -0800 Subject: bus: mhi: core: Move to an error state on any firmware load failure Move MHI to a firmware download error state for a failure to find the firmware files or to load SBL or EBL image using BHI/BHIe. This helps detect an error state sooner and shortens the wait for a synchronous power up timeout. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/boot.c | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c index 2d7752cd9ccc..7b57bb9a3080 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/core/boot.c @@ -428,13 +428,13 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) !mhi_cntrl->seg_len))) { dev_err(dev, "No firmware image defined or !sbl_size || !seg_len\n"); - return; + goto error_fw_load; } ret = request_firmware(&firmware, fw_name, dev); if (ret) { dev_err(dev, "Error loading firmware: %d\n", ret); - return; + goto error_fw_load; } size = (mhi_cntrl->fbc_download) ? mhi_cntrl->sbl_size : firmware->size; @@ -446,7 +446,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL); if (!buf) { release_firmware(firmware); - return; + goto error_fw_load; } /* Download image using BHI */ @@ -454,17 +454,17 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size); mhi_free_coherent(mhi_cntrl, size, buf, dma_addr); - if (!mhi_cntrl->fbc_download || ret || mhi_cntrl->ee == MHI_EE_EDL) - release_firmware(firmware); - /* Error or in EDL mode, we're done */ if (ret) { dev_err(dev, "MHI did not load image over BHI, ret: %d\n", ret); - return; + release_firmware(firmware); + goto error_fw_load; } - if (mhi_cntrl->ee == MHI_EE_EDL) + if (mhi_cntrl->ee == MHI_EE_EDL) { + release_firmware(firmware); return; + } write_lock_irq(&mhi_cntrl->pm_lock); mhi_cntrl->dev_state = MHI_STATE_RESET; @@ -477,13 +477,17 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl) if (mhi_cntrl->fbc_download) { ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, firmware->size); - if (ret) - goto error_alloc_fw_table; + if (ret) { + release_firmware(firmware); + goto error_fw_load; + } /* Load the firmware into BHIE vec table */ mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image); } + release_firmware(firmware); + fw_load_ee_pthru: /* Transitioning into MHI RESET->READY state */ ret = mhi_ready_state_transition(mhi_cntrl); @@ -512,11 +516,11 @@ fw_load_ee_pthru: ret = mhi_fw_load_bhie(mhi_cntrl, /* Vector table is the last entry */ &image_info->mhi_buf[image_info->entries - 1]); - if (ret) + if (ret) { dev_err(dev, "MHI did not load image over BHIe, ret: %d\n", ret); - - release_firmware(firmware); + goto error_fw_load; + } return; @@ -524,6 +528,7 @@ error_read: mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); mhi_cntrl->fbc_image = NULL; -error_alloc_fw_table: - release_firmware(firmware); +error_fw_load: + mhi_cntrl->pm_state = MHI_PM_FW_DL_ERR; + wake_up_all(&mhi_cntrl->state_event); } -- cgit v1.2.3 From faa257075bcc35e2c2ee68eb83e4e53d3bb02a90 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 9 Nov 2020 12:47:26 -0800 Subject: bus: mhi: core: Use appropriate label in firmware load handler API Correct the "error_read" label to say "error_ready_state" as that is the appropriate usage of the label. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/boot.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c index 7b57bb9a3080..c2546bf229fb 100644 --- a/drivers/bus/mhi/core/boot.c +++ b/drivers/bus/mhi/core/boot.c @@ -497,7 +497,7 @@ fw_load_ee_pthru: if (ret) { dev_err(dev, "MHI did not enter READY state\n"); - goto error_read; + goto error_ready_state; } /* Wait for the SBL event */ @@ -508,7 +508,7 @@ fw_load_ee_pthru: if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) { dev_err(dev, "MHI did not enter SBL\n"); - goto error_read; + goto error_ready_state; } /* Start full firmware image download */ @@ -524,7 +524,7 @@ fw_load_ee_pthru: return; -error_read: +error_ready_state: mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); mhi_cntrl->fbc_image = NULL; -- cgit v1.2.3 From dc53d862eab89000ebc87240274943793f0af682 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 9 Nov 2020 12:47:27 -0800 Subject: bus: mhi: core: Move to an error state on mission mode failure If the host receives a mission mode event and by the time it can get to processing it, the register accesses fail implying a connectivity error, MHI should move to an error state. This helps avoid longer wait times from a synchronous power up perspective and accurately reflects the MHI execution environment and power management states. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/pm.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 029919647002..06adea2580d2 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -383,10 +383,14 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl) write_lock_irq(&mhi_cntrl->pm_lock); if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl); - write_unlock_irq(&mhi_cntrl->pm_lock); - if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) + if (!MHI_IN_MISSION_MODE(mhi_cntrl->ee)) { + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; + write_unlock_irq(&mhi_cntrl->pm_lock); + wake_up_all(&mhi_cntrl->state_event); return -EIO; + } + write_unlock_irq(&mhi_cntrl->pm_lock); wake_up_all(&mhi_cntrl->state_event); -- cgit v1.2.3 From 40c3127187cb38ed255ba4473e11cd70385c7431 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 9 Nov 2020 12:47:28 -0800 Subject: bus: mhi: core: Check for IRQ availability during registration Current design allows a controller to register with MHI successfully without the need to have any IRQs available for use. If no IRQs are available, power up requests to MHI can fail after a successful registration with MHI. Improve the design by checking for the number of IRQs available sooner within the mhi_regsiter_controller() API as it is required to be specified by the controller. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/init.c | 2 +- drivers/bus/mhi/core/pm.c | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 877e40c86801..2534f1c9c153 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -858,7 +858,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || - !mhi_cntrl->write_reg) + !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs) return -EINVAL; ret = parse_config(mhi_cntrl, config); diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 06adea2580d2..1d04e401b67f 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -926,9 +926,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) dev_info(dev, "Requested to power ON\n"); - if (mhi_cntrl->nr_irqs < 1) - return -EINVAL; - /* Supply default wake routines if not provided by controller driver */ if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put || !mhi_cntrl->wake_toggle) { -- cgit v1.2.3 From 556bbb442bbb44f429dbaa9f8b48e0b4cda6e088 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 9 Nov 2020 12:47:29 -0800 Subject: bus: mhi: core: Separate system error and power down handling Currently, there exist a set of if...else statements in the mhi_pm_disable_transition() function which make handling system error and disable transitions differently complex. To make that cleaner and facilitate differences in behavior, separate these two transitions for MHI host. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/pm.c | 159 +++++++++++++++++++++++++++++++++++++++------- 1 file changed, 137 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 1d04e401b67f..347ae7dd6cf8 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -444,7 +444,7 @@ error_mission_mode: return ret; } -/* Handle SYS_ERR and Shutdown transitions */ +/* Handle shutdown transitions */ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, enum mhi_pm_state transition_state) { @@ -460,10 +460,6 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, to_mhi_pm_state_str(mhi_cntrl->pm_state), to_mhi_pm_state_str(transition_state)); - /* We must notify MHI control driver so it can clean up first */ - if (transition_state == MHI_PM_SYS_ERR_PROCESS) - mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); - mutex_lock(&mhi_cntrl->pm_mutex); write_lock_irq(&mhi_cntrl->pm_lock); prev_state = mhi_cntrl->pm_state; @@ -502,11 +498,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, MHICTRL_RESET_SHIFT, &in_reset) || !in_reset, timeout); - if ((!ret || in_reset) && cur_state == MHI_PM_SYS_ERR_PROCESS) { + if (!ret || in_reset) dev_err(dev, "Device failed to exit MHI Reset state\n"); - mutex_unlock(&mhi_cntrl->pm_mutex); - return; - } /* * Device will clear BHI_INTVEC as a part of RESET processing, @@ -566,19 +559,142 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, er_ctxt->wp = er_ctxt->rbase; } - if (cur_state == MHI_PM_SYS_ERR_PROCESS) { - mhi_ready_state_transition(mhi_cntrl); - } else { - /* Move to disable state */ - write_lock_irq(&mhi_cntrl->pm_lock); - cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); - write_unlock_irq(&mhi_cntrl->pm_lock); - if (unlikely(cur_state != MHI_PM_DISABLE)) - dev_err(dev, "Error moving from PM state: %s to: %s\n", - to_mhi_pm_state_str(cur_state), - to_mhi_pm_state_str(MHI_PM_DISABLE)); + /* Move to disable state */ + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE); + write_unlock_irq(&mhi_cntrl->pm_lock); + if (unlikely(cur_state != MHI_PM_DISABLE)) + dev_err(dev, "Error moving from PM state: %s to: %s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_DISABLE)); + + dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + TO_MHI_STATE_STR(mhi_cntrl->dev_state)); + + mutex_unlock(&mhi_cntrl->pm_mutex); +} + +/* Handle system error transitions */ +static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) +{ + enum mhi_pm_state cur_state, prev_state; + struct mhi_event *mhi_event; + struct mhi_cmd_ctxt *cmd_ctxt; + struct mhi_cmd *mhi_cmd; + struct mhi_event_ctxt *er_ctxt; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state), + to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); + + /* We must notify MHI control driver so it can clean up first */ + mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR); + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + prev_state = mhi_cntrl->pm_state; + cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); + write_unlock_irq(&mhi_cntrl->pm_lock); + + if (cur_state != MHI_PM_SYS_ERR_PROCESS) { + dev_err(dev, "Failed to transition from PM state: %s to: %s\n", + to_mhi_pm_state_str(cur_state), + to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS)); + goto exit_sys_error_transition; + } + + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + + /* Wake up threads waiting for state transition */ + wake_up_all(&mhi_cntrl->state_event); + + /* Trigger MHI RESET so that the device will not access host memory */ + if (MHI_REG_ACCESS_VALID(prev_state)) { + u32 in_reset = -1; + unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); + + dev_dbg(dev, "Triggering MHI Reset in device\n"); + mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); + + /* Wait for the reset bit to be cleared by the device */ + ret = wait_event_timeout(mhi_cntrl->state_event, + mhi_read_reg_field(mhi_cntrl, + mhi_cntrl->regs, + MHICTRL, + MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, + &in_reset) || + !in_reset, timeout); + if (!ret || in_reset) { + dev_err(dev, "Device failed to exit MHI Reset state\n"); + goto exit_sys_error_transition; + } + + /* + * Device will clear BHI_INTVEC as a part of RESET processing, + * hence re-program it + */ + mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + } + + dev_dbg(dev, + "Waiting for all pending event ring processing to complete\n"); + mhi_event = mhi_cntrl->mhi_event; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { + if (mhi_event->offload_ev) + continue; + tasklet_kill(&mhi_event->task); } + /* Release lock and wait for all pending threads to complete */ + mutex_unlock(&mhi_cntrl->pm_mutex); + dev_dbg(dev, "Waiting for all pending threads to complete\n"); + wake_up_all(&mhi_cntrl->state_event); + + dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); + device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device); + + mutex_lock(&mhi_cntrl->pm_mutex); + + WARN_ON(atomic_read(&mhi_cntrl->dev_wake)); + WARN_ON(atomic_read(&mhi_cntrl->pending_pkts)); + + /* Reset the ev rings and cmd rings */ + dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n"); + mhi_cmd = mhi_cntrl->mhi_cmd; + cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt; + for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { + struct mhi_ring *ring = &mhi_cmd->ring; + + ring->rp = ring->base; + ring->wp = ring->base; + cmd_ctxt->rp = cmd_ctxt->rbase; + cmd_ctxt->wp = cmd_ctxt->rbase; + } + + mhi_event = mhi_cntrl->mhi_event; + er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt; + for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, + mhi_event++) { + struct mhi_ring *ring = &mhi_event->ring; + + /* Skip offload events */ + if (mhi_event->offload_ev) + continue; + + ring->rp = ring->base; + ring->wp = ring->base; + er_ctxt->rp = er_ctxt->rbase; + er_ctxt->wp = er_ctxt->rbase; + } + + mhi_ready_state_transition(mhi_cntrl); + +exit_sys_error_transition: dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n", to_mhi_pm_state_str(mhi_cntrl->pm_state), TO_MHI_STATE_STR(mhi_cntrl->dev_state)); @@ -666,8 +782,7 @@ void mhi_pm_st_worker(struct work_struct *work) mhi_ready_state_transition(mhi_cntrl); break; case DEV_ST_TRANSITION_SYS_ERR: - mhi_pm_disable_transition - (mhi_cntrl, MHI_PM_SYS_ERR_PROCESS); + mhi_pm_sys_error_transition(mhi_cntrl); break; case DEV_ST_TRANSITION_DISABLE: mhi_pm_disable_transition -- cgit v1.2.3 From a03c7a86e12721da9f6bb509dddda19fd9ae8c6c Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 9 Nov 2020 12:47:30 -0800 Subject: bus: mhi: core: Mark and maintain device states early on after power down mhi_power_down() does not ensure that the PM state is moved to an inaccessible state soon enough as the system can encounter scheduling delays till mhi_pm_disable_transition() gets called. Additionally, if an MHI controller decides that the device is now inaccessible and issues a power down, the register inaccessible state is not maintained by moving from MHI_PM_LD_ERR_FATAL_DETECT to MHI_PM_SHUTDOWN_PROCESS. This can result in bus errors if a client driver attempted to read registers when powering down. Close these gaps and avoid any race conditions to prevent such activity. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/pm.c | 77 ++++++++++++++++++++--------------------------- 1 file changed, 33 insertions(+), 44 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index 347ae7dd6cf8..ffbf6f539510 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -37,9 +37,10 @@ * M0 -> FW_DL_ERR * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR - * L2: SHUTDOWN_PROCESS -> DISABLE + * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT + * SHUTDOWN_PROCESS -> DISABLE * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT - * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS + * LD_ERR_FATAL_DETECT -> DISABLE */ static struct mhi_pm_transitions const dev_state_transitions[] = { /* L0 States */ @@ -72,7 +73,7 @@ static struct mhi_pm_transitions const dev_state_transitions[] = { { MHI_PM_M3, MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT | - MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT + MHI_PM_LD_ERR_FATAL_DETECT }, { MHI_PM_M3_EXIT, @@ -103,7 +104,7 @@ static struct mhi_pm_transitions const dev_state_transitions[] = { /* L3 States */ { MHI_PM_LD_ERR_FATAL_DETECT, - MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS + MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE }, }; @@ -445,10 +446,9 @@ error_mission_mode: } /* Handle shutdown transitions */ -static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, - enum mhi_pm_state transition_state) +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) { - enum mhi_pm_state cur_state, prev_state; + enum mhi_pm_state cur_state; struct mhi_event *mhi_event; struct mhi_cmd_ctxt *cmd_ctxt; struct mhi_cmd *mhi_cmd; @@ -456,33 +456,13 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, struct device *dev = &mhi_cntrl->mhi_dev->dev; int ret, i; - dev_dbg(dev, "Transitioning from PM state: %s to: %s\n", - to_mhi_pm_state_str(mhi_cntrl->pm_state), - to_mhi_pm_state_str(transition_state)); + dev_dbg(dev, "Processing disable transition with PM state: %s\n", + to_mhi_pm_state_str(mhi_cntrl->pm_state)); mutex_lock(&mhi_cntrl->pm_mutex); - write_lock_irq(&mhi_cntrl->pm_lock); - prev_state = mhi_cntrl->pm_state; - cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); - if (cur_state == transition_state) { - mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; - mhi_cntrl->dev_state = MHI_STATE_RESET; - } - write_unlock_irq(&mhi_cntrl->pm_lock); - - /* Wake up threads waiting for state transition */ - wake_up_all(&mhi_cntrl->state_event); - - if (cur_state != transition_state) { - dev_err(dev, "Failed to transition to state: %s from: %s\n", - to_mhi_pm_state_str(transition_state), - to_mhi_pm_state_str(cur_state)); - mutex_unlock(&mhi_cntrl->pm_mutex); - return; - } /* Trigger MHI RESET so that the device will not access host memory */ - if (MHI_REG_ACCESS_VALID(prev_state)) { + if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) { u32 in_reset = -1; unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms); @@ -785,8 +765,7 @@ void mhi_pm_st_worker(struct work_struct *work) mhi_pm_sys_error_transition(mhi_cntrl); break; case DEV_ST_TRANSITION_DISABLE: - mhi_pm_disable_transition - (mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS); + mhi_pm_disable_transition(mhi_cntrl); break; default: break; @@ -1153,23 +1132,33 @@ EXPORT_SYMBOL_GPL(mhi_async_power_up); void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) { - enum mhi_pm_state cur_state; + enum mhi_pm_state cur_state, transition_state; struct device *dev = &mhi_cntrl->mhi_dev->dev; /* If it's not a graceful shutdown, force MHI to linkdown state */ - if (!graceful) { - mutex_lock(&mhi_cntrl->pm_mutex); - write_lock_irq(&mhi_cntrl->pm_lock); - cur_state = mhi_tryset_pm_state(mhi_cntrl, - MHI_PM_LD_ERR_FATAL_DETECT); - write_unlock_irq(&mhi_cntrl->pm_lock); - mutex_unlock(&mhi_cntrl->pm_mutex); - if (cur_state != MHI_PM_LD_ERR_FATAL_DETECT) - dev_dbg(dev, "Failed to move to state: %s from: %s\n", - to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT), - to_mhi_pm_state_str(mhi_cntrl->pm_state)); + transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS : + MHI_PM_LD_ERR_FATAL_DETECT; + + mutex_lock(&mhi_cntrl->pm_mutex); + write_lock_irq(&mhi_cntrl->pm_lock); + cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state); + if (cur_state != transition_state) { + dev_err(dev, "Failed to move to state: %s from: %s\n", + to_mhi_pm_state_str(transition_state), + to_mhi_pm_state_str(mhi_cntrl->pm_state)); + /* Force link down or error fatal detected state */ + mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT; } + /* mark device inactive to avoid any further host processing */ + mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION; + mhi_cntrl->dev_state = MHI_STATE_RESET; + + wake_up_all(&mhi_cntrl->state_event); + + write_unlock_irq(&mhi_cntrl->pm_lock); + mutex_unlock(&mhi_cntrl->pm_mutex); + mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); /* Wait for shutdown to complete */ -- cgit v1.2.3 From 6cc1716102b55497dd557e8295a3177315332f9a Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 9 Nov 2020 12:47:31 -0800 Subject: bus: mhi: core: Remove MHI event ring IRQ handlers when powering down While powering down, the device may or may not acknowledge an MHI RESET issued by host for a graceful shutdown scenario and end up sending an incoming data packet after tasklets have been killed. If a rogue device sends this interrupt for a data transfer event ring update, it can result in a tasklet getting scheduled while a clean up is ongoing or has completed and cause access to freed memory leading to a NULL pointer exception. Remove the interrupt handlers for MHI event rings early on to avoid this scenario. Signed-off-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/pm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index ffbf6f539510..a671f585ce35 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -494,6 +494,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { if (mhi_event->offload_ev) continue; + free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); tasklet_kill(&mhi_event->task); } @@ -1164,7 +1165,7 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) /* Wait for shutdown to complete */ flush_work(&mhi_cntrl->st_worker); - mhi_deinit_free_irq(mhi_cntrl); + free_irq(mhi_cntrl->irq[0], mhi_cntrl); if (!mhi_cntrl->pre_init) { /* Free all allocated resources */ -- cgit v1.2.3 From 7af8109efad54ee015626e5dcac133a6f4e473da Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Tue, 10 Nov 2020 14:32:21 +0800 Subject: phy: sun4i-usb: remove enable_pmu_unk1 from sun50i_h6_cfg For the current code, enable_pmu_unk1 only works in non-a83t and non-h6 types. So let's delete it from the sun50i_h6_cfg. Signed-off-by: Yangtao Li Link: https://lore.kernel.org/r/dc8cbb7b3cd59902a6719f207d18a232903fac8a.1604988979.git.frank@allwinnertech.com Signed-off-by: Vinod Koul --- drivers/phy/allwinner/phy-sun4i-usb.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c index 406d5943f8a8..788dd5cdbb7d 100644 --- a/drivers/phy/allwinner/phy-sun4i-usb.c +++ b/drivers/phy/allwinner/phy-sun4i-usb.c @@ -965,7 +965,6 @@ static const struct sun4i_usb_phy_cfg sun50i_h6_cfg = { .disc_thresh = 3, .phyctl_offset = REG_PHYCTL_A33, .dedicated_clocks = true, - .enable_pmu_unk1 = true, .phy0_dual_route = true, .missing_phys = BIT(1) | BIT(2), }; -- cgit v1.2.3 From 36a94760c98954e50ea621f7a9603fee3621deb7 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Fri, 13 Nov 2020 12:34:23 +0100 Subject: phy: phy-bcm-ns-usb3: drop support for deprecated DT binding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initially this PHY driver was implementing MDIO access on its own. It was caused by lack of proper hardware design understanding. It has been changed back in 2017. DT bindings were changed and driver was updated to use MDIO layer. It should be really safe now to drop the old deprecated code. All Linux stored DT files don't use it for 3,5 year. There is close to 0 chance there is any bootloader with its own DTB using old the binding. Signed-off-by: Rafał Miłecki Acked-by: Florian Fainelli Link: https://lore.kernel.org/r/20201113113423.9466-1-zajec5@gmail.com Signed-off-by: Vinod Koul --- drivers/phy/broadcom/phy-bcm-ns-usb3.c | 156 ++------------------------------- 1 file changed, 5 insertions(+), 151 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c index 42baf4d6dd5d..eb10ffa13a62 100644 --- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c +++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c @@ -22,8 +22,6 @@ #include #include -#define BCM_NS_USB3_MII_MNG_TIMEOUT_US 1000 /* usecs */ - #define BCM_NS_USB3_PHY_BASE_ADDR_REG 0x1f #define BCM_NS_USB3_PHY_PLL30_BLOCK 0x8000 #define BCM_NS_USB3_PHY_TX_PMD_BLOCK 0x8040 @@ -51,11 +49,8 @@ struct bcm_ns_usb3 { struct device *dev; enum bcm_ns_family family; void __iomem *dmp; - void __iomem *ccb_mii; struct mdio_device *mdiodev; struct phy *phy; - - int (*phy_write)(struct bcm_ns_usb3 *usb3, u16 reg, u16 value); }; static const struct of_device_id bcm_ns_usb3_id_table[] = { @@ -69,13 +64,9 @@ static const struct of_device_id bcm_ns_usb3_id_table[] = { }, {}, }; -MODULE_DEVICE_TABLE(of, bcm_ns_usb3_id_table); static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg, - u16 value) -{ - return usb3->phy_write(usb3, reg, value); -} + u16 value); static int bcm_ns_usb3_phy_init_ns_bx(struct bcm_ns_usb3 *usb3) { @@ -187,8 +178,8 @@ static const struct phy_ops ops = { * MDIO driver code **************************************************/ -static int bcm_ns_usb3_mdiodev_phy_write(struct bcm_ns_usb3 *usb3, u16 reg, - u16 value) +static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg, + u16 value) { struct mdio_device *mdiodev = usb3->mdiodev; @@ -229,8 +220,6 @@ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev) return PTR_ERR(usb3->dmp); } - usb3->phy_write = bcm_ns_usb3_mdiodev_phy_write; - usb3->phy = devm_phy_create(dev, NULL, &ops); if (IS_ERR(usb3->phy)) { dev_err(dev, "Failed to create PHY\n"); @@ -254,142 +243,7 @@ static struct mdio_driver bcm_ns_usb3_mdio_driver = { .probe = bcm_ns_usb3_mdio_probe, }; -/************************************************** - * Platform driver code - **************************************************/ - -static int bcm_ns_usb3_wait_reg(struct bcm_ns_usb3 *usb3, void __iomem *addr, - u32 mask, u32 value, int usec) -{ - u32 val; - int ret; - - ret = readl_poll_timeout_atomic(addr, val, ((val & mask) == value), - 10, usec); - if (ret) - dev_err(usb3->dev, "Timeout waiting for register %p\n", addr); - - return ret; -} - -static inline int bcm_ns_usb3_mii_mng_wait_idle(struct bcm_ns_usb3 *usb3) -{ - return bcm_ns_usb3_wait_reg(usb3, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL, - 0x0100, 0x0000, - BCM_NS_USB3_MII_MNG_TIMEOUT_US); -} - -static int bcm_ns_usb3_platform_phy_write(struct bcm_ns_usb3 *usb3, u16 reg, - u16 value) -{ - u32 tmp = 0; - int err; - - err = bcm_ns_usb3_mii_mng_wait_idle(usb3); - if (err < 0) { - dev_err(usb3->dev, "Couldn't write 0x%08x value\n", value); - return err; - } - - /* TODO: Use a proper MDIO bus layer */ - tmp |= 0x58020000; /* Magic value for MDIO PHY write */ - tmp |= reg << 18; - tmp |= value; - writel(tmp, usb3->ccb_mii + BCMA_CCB_MII_MNG_CMD_DATA); - - return bcm_ns_usb3_mii_mng_wait_idle(usb3); -} - -static int bcm_ns_usb3_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - const struct of_device_id *of_id; - struct bcm_ns_usb3 *usb3; - struct phy_provider *phy_provider; - - usb3 = devm_kzalloc(dev, sizeof(*usb3), GFP_KERNEL); - if (!usb3) - return -ENOMEM; - - usb3->dev = dev; - - of_id = of_match_device(bcm_ns_usb3_id_table, dev); - if (!of_id) - return -EINVAL; - usb3->family = (enum bcm_ns_family)of_id->data; - - usb3->dmp = devm_platform_ioremap_resource_byname(pdev, "dmp"); - if (IS_ERR(usb3->dmp)) { - dev_err(dev, "Failed to map DMP regs\n"); - return PTR_ERR(usb3->dmp); - } - - usb3->ccb_mii = devm_platform_ioremap_resource_byname(pdev, "ccb-mii"); - if (IS_ERR(usb3->ccb_mii)) { - dev_err(dev, "Failed to map ChipCommon B MII regs\n"); - return PTR_ERR(usb3->ccb_mii); - } - - /* Enable MDIO. Setting MDCDIV as 26 */ - writel(0x0000009a, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL); - - /* Wait for MDIO? */ - udelay(2); - - usb3->phy_write = bcm_ns_usb3_platform_phy_write; - - usb3->phy = devm_phy_create(dev, NULL, &ops); - if (IS_ERR(usb3->phy)) { - dev_err(dev, "Failed to create PHY\n"); - return PTR_ERR(usb3->phy); - } - - phy_set_drvdata(usb3->phy, usb3); - platform_set_drvdata(pdev, usb3); - - phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (!IS_ERR(phy_provider)) - dev_info(dev, "Registered Broadcom Northstar USB 3.0 PHY driver\n"); - - return PTR_ERR_OR_ZERO(phy_provider); -} - -static struct platform_driver bcm_ns_usb3_driver = { - .probe = bcm_ns_usb3_probe, - .driver = { - .name = "bcm_ns_usb3", - .of_match_table = bcm_ns_usb3_id_table, - }, -}; - -static int __init bcm_ns_usb3_module_init(void) -{ - int err; - - /* - * For backward compatibility we register as MDIO and platform driver. - * After getting MDIO binding commonly used (e.g. switching all DT files - * to use it) we should deprecate the old binding and eventually drop - * support for it. - */ - - err = mdio_driver_register(&bcm_ns_usb3_mdio_driver); - if (err) - return err; - - err = platform_driver_register(&bcm_ns_usb3_driver); - if (err) - mdio_driver_unregister(&bcm_ns_usb3_mdio_driver); - - return err; -} -module_init(bcm_ns_usb3_module_init); - -static void __exit bcm_ns_usb3_module_exit(void) -{ - platform_driver_unregister(&bcm_ns_usb3_driver); - mdio_driver_unregister(&bcm_ns_usb3_mdio_driver); -} -module_exit(bcm_ns_usb3_module_exit) +mdio_module_driver(bcm_ns_usb3_mdio_driver); MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, bcm_ns_usb3_id_table); -- cgit v1.2.3 From 76aefb221146dbe0de124f566329c76d5dcf118a Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Mon, 16 Nov 2020 11:13:15 +0100 Subject: phy: amlogic: Add AXG MIPI D-PHY driver The Amlogic AXG SoCs embeds a MIPI D-PHY used to communicate with DSI panels. Signed-off-by: Neil Armstrong Link: https://lore.kernel.org/r/20201116101315.71720-3-narmstrong@baylibre.com Signed-off-by: Vinod Koul --- drivers/phy/amlogic/Kconfig | 12 + drivers/phy/amlogic/Makefile | 1 + drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c | 413 ++++++++++++++++++++++++++ 3 files changed, 426 insertions(+) create mode 100644 drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c (limited to 'drivers') diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig index 617cf073e9aa..99e8a4c7f1f3 100644 --- a/drivers/phy/amlogic/Kconfig +++ b/drivers/phy/amlogic/Kconfig @@ -70,3 +70,15 @@ config PHY_MESON_AXG_MIPI_PCIE_ANALOG Enable this to support the Meson MIPI + PCIE analog PHY found in Meson AXG SoCs. If unsure, say N. + +config PHY_MESON_AXG_MIPI_DPHY + tristate "Meson AXG MIPI DPHY driver" + default ARCH_MESON + depends on OF && (ARCH_MESON || COMPILE_TEST) + select GENERIC_PHY + select REGMAP_MMIO + select GENERIC_PHY_MIPI_DPHY + help + Enable this to support the Meson MIPI DPHY found in Meson AXG + SoCs. + If unsure, say N. diff --git a/drivers/phy/amlogic/Makefile b/drivers/phy/amlogic/Makefile index 99702a45e9be..8fa07fbd0d92 100644 --- a/drivers/phy/amlogic/Makefile +++ b/drivers/phy/amlogic/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_PHY_MESON_G12A_USB2) += phy-meson-g12a-usb2.o obj-$(CONFIG_PHY_MESON_G12A_USB3_PCIE) += phy-meson-g12a-usb3-pcie.o obj-$(CONFIG_PHY_MESON_AXG_PCIE) += phy-meson-axg-pcie.o obj-$(CONFIG_PHY_MESON_AXG_MIPI_PCIE_ANALOG) += phy-meson-axg-mipi-pcie-analog.o +obj-$(CONFIG_PHY_MESON_AXG_MIPI_DPHY) += phy-meson-axg-mipi-dphy.o diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c new file mode 100644 index 000000000000..cd2332bf0e31 --- /dev/null +++ b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Meson AXG MIPI DPHY driver + * + * Copyright (C) 2018 Amlogic, Inc. All rights reserved + * Copyright (C) 2020 BayLibre, SAS + * Author: Neil Armstrong + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* [31] soft reset for the phy. + * 1: reset. 0: dessert the reset. + * [30] clock lane soft reset. + * [29] data byte lane 3 soft reset. + * [28] data byte lane 2 soft reset. + * [27] data byte lane 1 soft reset. + * [26] data byte lane 0 soft reset. + * [25] mipi dsi pll clock selection. + * 1: clock from fixed 850Mhz clock source. 0: from VID2 PLL. + * [12] mipi HSbyteclk enable. + * [11] mipi divider clk selection. + * 1: select the mipi DDRCLKHS from clock divider. + * 0: from PLL clock. + * [10] mipi clock divider control. + * 1: /4. 0: /2. + * [9] mipi divider output enable. + * [8] mipi divider counter enable. + * [7] PLL clock enable. + * [5] LPDT data endian. + * 1 = transfer the high bit first. 0 : transfer the low bit first. + * [4] HS data endian. + * [3] force data byte lane in stop mode. + * [2] force data byte lane 0 in receiver mode. + * [1] write 1 to sync the txclkesc input. the internal logic have to + * use txclkesc to decide Txvalid and Txready. + * [0] enalbe the MIPI DPHY TxDDRClk. + */ +#define MIPI_DSI_PHY_CTRL 0x0 + +/* [31] clk lane tx_hs_en control selection. + * 1: from register. 0: use clk lane state machine. + * [30] register bit for clock lane tx_hs_en. + * [29] clk lane tx_lp_en contrl selection. + * 1: from register. 0: from clk lane state machine. + * [28] register bit for clock lane tx_lp_en. + * [27] chan0 tx_hs_en control selection. + * 1: from register. 0: from chan0 state machine. + * [26] register bit for chan0 tx_hs_en. + * [25] chan0 tx_lp_en control selection. + * 1: from register. 0: from chan0 state machine. + * [24] register bit from chan0 tx_lp_en. + * [23] chan0 rx_lp_en control selection. + * 1: from register. 0: from chan0 state machine. + * [22] register bit from chan0 rx_lp_en. + * [21] chan0 contention detection enable control selection. + * 1: from register. 0: from chan0 state machine. + * [20] register bit from chan0 contention dectection enable. + * [19] chan1 tx_hs_en control selection. + * 1: from register. 0: from chan0 state machine. + * [18] register bit for chan1 tx_hs_en. + * [17] chan1 tx_lp_en control selection. + * 1: from register. 0: from chan0 state machine. + * [16] register bit from chan1 tx_lp_en. + * [15] chan2 tx_hs_en control selection. + * 1: from register. 0: from chan0 state machine. + * [14] register bit for chan2 tx_hs_en. + * [13] chan2 tx_lp_en control selection. + * 1: from register. 0: from chan0 state machine. + * [12] register bit from chan2 tx_lp_en. + * [11] chan3 tx_hs_en control selection. + * 1: from register. 0: from chan0 state machine. + * [10] register bit for chan3 tx_hs_en. + * [9] chan3 tx_lp_en control selection. + * 1: from register. 0: from chan0 state machine. + * [8] register bit from chan3 tx_lp_en. + * [4] clk chan power down. this bit is also used as the power down + * of the whole MIPI_DSI_PHY. + * [3] chan3 power down. + * [2] chan2 power down. + * [1] chan1 power down. + * [0] chan0 power down. + */ +#define MIPI_DSI_CHAN_CTRL 0x4 + +/* [24] rx turn watch dog triggered. + * [23] rx esc watchdog triggered. + * [22] mbias ready. + * [21] txclkesc synced and ready. + * [20:17] clk lane state. {mbias_ready, tx_stop, tx_ulps, tx_hs_active} + * [16:13] chan3 state{0, tx_stop, tx_ulps, tx_hs_active} + * [12:9] chan2 state.{0, tx_stop, tx_ulps, tx_hs_active} + * [8:5] chan1 state. {0, tx_stop, tx_ulps, tx_hs_active} + * [4:0] chan0 state. {TX_STOP, tx_ULPS, hs_active, direction, rxulpsesc} + */ +#define MIPI_DSI_CHAN_STS 0x8 + +/* [31:24] TCLK_PREPARE. + * [23:16] TCLK_ZERO. + * [15:8] TCLK_POST. + * [7:0] TCLK_TRAIL. + */ +#define MIPI_DSI_CLK_TIM 0xc + +/* [31:24] THS_PREPARE. + * [23:16] THS_ZERO. + * [15:8] THS_TRAIL. + * [7:0] THS_EXIT. + */ +#define MIPI_DSI_HS_TIM 0x10 + +/* [31:24] tTA_GET. + * [23:16] tTA_GO. + * [15:8] tTA_SURE. + * [7:0] tLPX. + */ +#define MIPI_DSI_LP_TIM 0x14 + +/* wait time to MIPI DIS analog ready. */ +#define MIPI_DSI_ANA_UP_TIM 0x18 + +/* TINIT. */ +#define MIPI_DSI_INIT_TIM 0x1c + +/* TWAKEUP. */ +#define MIPI_DSI_WAKEUP_TIM 0x20 + +/* when in RxULPS check state, after the the logic enable the analog, + * how long we should wait to check the lP state . + */ +#define MIPI_DSI_LPOK_TIM 0x24 + +/* Watchdog for RX low power state no finished. */ +#define MIPI_DSI_LP_WCHDOG 0x28 + +/* tMBIAS, after send power up signals to analog, + * how long we should wait for analog powered up. + */ +#define MIPI_DSI_ANA_CTRL 0x2c + +/* [31:8] reserved for future. + * [7:0] tCLK_PRE. + */ +#define MIPI_DSI_CLK_TIM1 0x30 + +/* watchdog for turn around waiting time. */ +#define MIPI_DSI_TURN_WCHDOG 0x34 + +/* When in RxULPS state, how frequency we should to check + * if the TX side out of ULPS state. + */ +#define MIPI_DSI_ULPS_CHECK 0x38 +#define MIPI_DSI_TEST_CTRL0 0x3c +#define MIPI_DSI_TEST_CTRL1 0x40 + +struct phy_meson_axg_mipi_dphy_priv { + struct device *dev; + struct regmap *regmap; + struct clk *clk; + struct reset_control *reset; + struct phy *analog; + struct phy_configure_opts_mipi_dphy config; +}; + +static const struct regmap_config phy_meson_axg_mipi_dphy_regmap_conf = { + .reg_bits = 8, + .val_bits = 32, + .reg_stride = 4, + .max_register = MIPI_DSI_TEST_CTRL1, +}; + +static int phy_meson_axg_mipi_dphy_init(struct phy *phy) +{ + struct phy_meson_axg_mipi_dphy_priv *priv = phy_get_drvdata(phy); + int ret; + + ret = phy_init(priv->analog); + if (ret) + return ret; + + ret = reset_control_reset(priv->reset); + if (ret) + return ret; + + return 0; +} + +static int phy_meson_axg_mipi_dphy_configure(struct phy *phy, + union phy_configure_opts *opts) +{ + struct phy_meson_axg_mipi_dphy_priv *priv = phy_get_drvdata(phy); + int ret; + + ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy); + if (ret) + return ret; + + ret = phy_configure(priv->analog, opts); + if (ret) + return ret; + + memcpy(&priv->config, opts, sizeof(priv->config)); + + return 0; +} + +static int phy_meson_axg_mipi_dphy_power_on(struct phy *phy) +{ + struct phy_meson_axg_mipi_dphy_priv *priv = phy_get_drvdata(phy); + int ret; + unsigned long temp; + + ret = phy_power_on(priv->analog); + if (ret) + return ret; + + /* enable phy clock */ + regmap_write(priv->regmap, MIPI_DSI_PHY_CTRL, 0x1); + regmap_write(priv->regmap, MIPI_DSI_PHY_CTRL, + BIT(0) | /* enable the DSI PLL clock . */ + BIT(7) | /* enable pll clock which connected to DDR clock path */ + BIT(8)); /* enable the clock divider counter */ + + /* enable the divider clock out */ + regmap_update_bits(priv->regmap, MIPI_DSI_PHY_CTRL, BIT(9), BIT(9)); + + /* enable the byte clock generation. */ + regmap_update_bits(priv->regmap, MIPI_DSI_PHY_CTRL, BIT(12), BIT(12)); + regmap_update_bits(priv->regmap, MIPI_DSI_PHY_CTRL, BIT(31), BIT(31)); + regmap_update_bits(priv->regmap, MIPI_DSI_PHY_CTRL, BIT(31), 0); + + /* Calculate lanebyteclk period in ps */ + temp = (1000000 * 100) / (priv->config.hs_clk_rate / 1000); + temp = temp * 8 * 10; + + regmap_write(priv->regmap, MIPI_DSI_CLK_TIM, + DIV_ROUND_UP(priv->config.clk_trail, temp) | + (DIV_ROUND_UP(priv->config.clk_post + + priv->config.hs_trail, temp) << 8) | + (DIV_ROUND_UP(priv->config.clk_zero, temp) << 16) | + (DIV_ROUND_UP(priv->config.clk_prepare, temp) << 24)); + regmap_write(priv->regmap, MIPI_DSI_CLK_TIM1, + DIV_ROUND_UP(priv->config.clk_pre, temp)); + + regmap_write(priv->regmap, MIPI_DSI_HS_TIM, + DIV_ROUND_UP(priv->config.hs_exit, temp) | + (DIV_ROUND_UP(priv->config.hs_trail, temp) << 8) | + (DIV_ROUND_UP(priv->config.hs_zero, temp) << 16) | + (DIV_ROUND_UP(priv->config.hs_prepare, temp) << 24)); + + regmap_write(priv->regmap, MIPI_DSI_LP_TIM, + DIV_ROUND_UP(priv->config.lpx, temp) | + (DIV_ROUND_UP(priv->config.ta_sure, temp) << 8) | + (DIV_ROUND_UP(priv->config.ta_go, temp) << 16) | + (DIV_ROUND_UP(priv->config.ta_get, temp) << 24)); + + regmap_write(priv->regmap, MIPI_DSI_ANA_UP_TIM, 0x0100); + regmap_write(priv->regmap, MIPI_DSI_INIT_TIM, + DIV_ROUND_UP(priv->config.init * NSEC_PER_MSEC, temp)); + regmap_write(priv->regmap, MIPI_DSI_WAKEUP_TIM, + DIV_ROUND_UP(priv->config.wakeup * NSEC_PER_MSEC, temp)); + regmap_write(priv->regmap, MIPI_DSI_LPOK_TIM, 0x7C); + regmap_write(priv->regmap, MIPI_DSI_ULPS_CHECK, 0x927C); + regmap_write(priv->regmap, MIPI_DSI_LP_WCHDOG, 0x1000); + regmap_write(priv->regmap, MIPI_DSI_TURN_WCHDOG, 0x1000); + + /* Powerup the analog circuit */ + switch (priv->config.lanes) { + case 1: + regmap_write(priv->regmap, MIPI_DSI_CHAN_CTRL, 0xe); + break; + case 2: + regmap_write(priv->regmap, MIPI_DSI_CHAN_CTRL, 0xc); + break; + case 3: + regmap_write(priv->regmap, MIPI_DSI_CHAN_CTRL, 0x8); + break; + case 4: + default: + regmap_write(priv->regmap, MIPI_DSI_CHAN_CTRL, 0); + break; + } + + /* Trigger a sync active for esc_clk */ + regmap_update_bits(priv->regmap, MIPI_DSI_PHY_CTRL, BIT(1), BIT(1)); + + return 0; +} + +static int phy_meson_axg_mipi_dphy_power_off(struct phy *phy) +{ + struct phy_meson_axg_mipi_dphy_priv *priv = phy_get_drvdata(phy); + + regmap_write(priv->regmap, MIPI_DSI_CHAN_CTRL, 0xf); + regmap_write(priv->regmap, MIPI_DSI_PHY_CTRL, BIT(31)); + + phy_power_off(priv->analog); + + return 0; +} + +static int phy_meson_axg_mipi_dphy_exit(struct phy *phy) +{ + struct phy_meson_axg_mipi_dphy_priv *priv = phy_get_drvdata(phy); + int ret; + + ret = phy_exit(priv->analog); + if (ret) + return ret; + + return reset_control_reset(priv->reset); +} + +static const struct phy_ops phy_meson_axg_mipi_dphy_ops = { + .configure = phy_meson_axg_mipi_dphy_configure, + .init = phy_meson_axg_mipi_dphy_init, + .exit = phy_meson_axg_mipi_dphy_exit, + .power_on = phy_meson_axg_mipi_dphy_power_on, + .power_off = phy_meson_axg_mipi_dphy_power_off, + .owner = THIS_MODULE, +}; + +static int phy_meson_axg_mipi_dphy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct phy_provider *phy_provider; + struct resource *res; + struct phy_meson_axg_mipi_dphy_priv *priv; + struct phy *phy; + void __iomem *base; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + platform_set_drvdata(pdev, priv); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + priv->regmap = devm_regmap_init_mmio(dev, base, + &phy_meson_axg_mipi_dphy_regmap_conf); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + + priv->clk = devm_clk_get(dev, "pclk"); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->reset = devm_reset_control_get(dev, "phy"); + if (IS_ERR(priv->reset)) + return PTR_ERR(priv->reset); + + priv->analog = devm_phy_get(dev, "analog"); + if (IS_ERR(priv->analog)) + return PTR_ERR(priv->analog); + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + ret = reset_control_deassert(priv->reset); + if (ret) + return ret; + + phy = devm_phy_create(dev, NULL, &phy_meson_axg_mipi_dphy_ops); + if (IS_ERR(phy)) { + ret = PTR_ERR(phy); + if (ret != -EPROBE_DEFER) + dev_err(dev, "failed to create PHY\n"); + + return ret; + } + + phy_set_drvdata(phy, priv); + + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct of_device_id phy_meson_axg_mipi_dphy_of_match[] = { + { .compatible = "amlogic,axg-mipi-dphy", }, + { }, +}; +MODULE_DEVICE_TABLE(of, phy_meson_axg_mipi_dphy_of_match); + +static struct platform_driver phy_meson_axg_mipi_dphy_driver = { + .probe = phy_meson_axg_mipi_dphy_probe, + .driver = { + .name = "phy-meson-axg-mipi-dphy", + .of_match_table = phy_meson_axg_mipi_dphy_of_match, + }, +}; +module_platform_driver(phy_meson_axg_mipi_dphy_driver); + +MODULE_AUTHOR("Neil Armstrong "); +MODULE_DESCRIPTION("Meson AXG MIPI DPHY driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 8eff8b4e22d9885f1509a68bf9a7cc1961c5dee4 Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Mon, 16 Nov 2020 11:16:47 +0100 Subject: phy: amlogic: phy-meson-axg-mipi-pcie-analog: add support for MIPI DSI analog The AXG Analog MIPI-DSI PHY also provides functions to the PCIe PHY, thus we need to have inclusive support for both interfaces at runtime. This fixes the regmap get from parent node, removes cell param to select a mode and implement runtime configuration & power on/off for both functions since they are not exclusive. Signed-off-by: Neil Armstrong Reviewed-by: Remi Pommarel Link: https://lore.kernel.org/r/20201116101647.73448-4-narmstrong@baylibre.com Signed-off-by: Vinod Koul --- drivers/phy/amlogic/Kconfig | 1 + .../phy/amlogic/phy-meson-axg-mipi-pcie-analog.c | 204 ++++++++++++++------- 2 files changed, 136 insertions(+), 69 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig index 99e8a4c7f1f3..db5d0cd757e3 100644 --- a/drivers/phy/amlogic/Kconfig +++ b/drivers/phy/amlogic/Kconfig @@ -66,6 +66,7 @@ config PHY_MESON_AXG_MIPI_PCIE_ANALOG depends on OF && (ARCH_MESON || COMPILE_TEST) select GENERIC_PHY select REGMAP_MMIO + select GENERIC_PHY_MIPI_DPHY help Enable this to support the Meson MIPI + PCIE analog PHY found in Meson AXG SoCs. diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c index 1431cbf885e1..6eb21551bdd9 100644 --- a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c +++ b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c @@ -4,9 +4,13 @@ * * Copyright (C) 2019 Remi Pommarel */ +#include +#include #include #include #include +#include +#include #include #include @@ -14,10 +18,10 @@ #define HHI_MIPI_CNTL0_COMMON_BLOCK GENMASK(31, 28) #define HHI_MIPI_CNTL0_ENABLE BIT(29) #define HHI_MIPI_CNTL0_BANDGAP BIT(26) -#define HHI_MIPI_CNTL0_DECODE_TO_RTERM GENMASK(15, 12) -#define HHI_MIPI_CNTL0_OUTPUT_EN BIT(3) +#define HHI_MIPI_CNTL0_DIF_REF_CTL1 GENMASK(25, 16) +#define HHI_MIPI_CNTL0_DIF_REF_CTL0 GENMASK(15, 0) -#define HHI_MIPI_CNTL1 0x01 +#define HHI_MIPI_CNTL1 0x04 #define HHI_MIPI_CNTL1_CH0_CML_PDR_EN BIT(12) #define HHI_MIPI_CNTL1_LP_ABILITY GENMASK(5, 4) #define HHI_MIPI_CNTL1_LP_RESISTER BIT(3) @@ -25,100 +29,170 @@ #define HHI_MIPI_CNTL1_INPUT_SEL BIT(1) #define HHI_MIPI_CNTL1_PRBS7_EN BIT(0) -#define HHI_MIPI_CNTL2 0x02 +#define HHI_MIPI_CNTL2 0x08 #define HHI_MIPI_CNTL2_CH_PU GENMASK(31, 25) #define HHI_MIPI_CNTL2_CH_CTL GENMASK(24, 19) #define HHI_MIPI_CNTL2_CH0_DIGDR_EN BIT(18) #define HHI_MIPI_CNTL2_CH_DIGDR_EN BIT(17) #define HHI_MIPI_CNTL2_LPULPS_EN BIT(16) -#define HHI_MIPI_CNTL2_CH_EN(n) BIT(15 - (n)) +#define HHI_MIPI_CNTL2_CH_EN GENMASK(15, 11) #define HHI_MIPI_CNTL2_CH0_LP_CTL GENMASK(10, 1) +#define DSI_LANE_0 (1 << 4) +#define DSI_LANE_1 (1 << 3) +#define DSI_LANE_CLK (1 << 2) +#define DSI_LANE_2 (1 << 1) +#define DSI_LANE_3 (1 << 0) +#define DSI_LANE_MASK (0x1F) + struct phy_axg_mipi_pcie_analog_priv { struct phy *phy; - unsigned int mode; struct regmap *regmap; + bool dsi_configured; + bool dsi_enabled; + bool powered; + struct phy_configure_opts_mipi_dphy config; }; -static const struct regmap_config phy_axg_mipi_pcie_analog_regmap_conf = { - .reg_bits = 8, - .val_bits = 32, - .reg_stride = 4, - .max_register = HHI_MIPI_CNTL2, -}; +static void phy_bandgap_enable(struct phy_axg_mipi_pcie_analog_priv *priv) +{ + regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, + HHI_MIPI_CNTL0_BANDGAP, HHI_MIPI_CNTL0_BANDGAP); -static int phy_axg_mipi_pcie_analog_power_on(struct phy *phy) + regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, + HHI_MIPI_CNTL0_ENABLE, HHI_MIPI_CNTL0_ENABLE); +} + +static void phy_bandgap_disable(struct phy_axg_mipi_pcie_analog_priv *priv) { - struct phy_axg_mipi_pcie_analog_priv *priv = phy_get_drvdata(phy); + regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, + HHI_MIPI_CNTL0_BANDGAP, 0); + regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, + HHI_MIPI_CNTL0_ENABLE, 0); +} - /* MIPI not supported yet */ - if (priv->mode != PHY_TYPE_PCIE) - return -EINVAL; +static void phy_dsi_analog_enable(struct phy_axg_mipi_pcie_analog_priv *priv) +{ + u32 reg; regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, - HHI_MIPI_CNTL0_BANDGAP, HHI_MIPI_CNTL0_BANDGAP); + HHI_MIPI_CNTL0_DIF_REF_CTL1, + FIELD_PREP(HHI_MIPI_CNTL0_DIF_REF_CTL1, 0x1b8)); + regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, + BIT(31), BIT(31)); + regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, + HHI_MIPI_CNTL0_DIF_REF_CTL0, + FIELD_PREP(HHI_MIPI_CNTL0_DIF_REF_CTL0, 0x8)); + + regmap_write(priv->regmap, HHI_MIPI_CNTL1, 0x001e); + + regmap_write(priv->regmap, HHI_MIPI_CNTL2, + (0x26e0 << 16) | (0x459 << 0)); + + reg = DSI_LANE_CLK; + switch (priv->config.lanes) { + case 4: + reg |= DSI_LANE_3; + fallthrough; + case 3: + reg |= DSI_LANE_2; + fallthrough; + case 2: + reg |= DSI_LANE_1; + fallthrough; + case 1: + reg |= DSI_LANE_0; + break; + default: + reg = 0; + } + + regmap_update_bits(priv->regmap, HHI_MIPI_CNTL2, + HHI_MIPI_CNTL2_CH_EN, + FIELD_PREP(HHI_MIPI_CNTL2_CH_EN, reg)); + + priv->dsi_enabled = true; +} +static void phy_dsi_analog_disable(struct phy_axg_mipi_pcie_analog_priv *priv) +{ regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, - HHI_MIPI_CNTL0_ENABLE, HHI_MIPI_CNTL0_ENABLE); - return 0; + HHI_MIPI_CNTL0_DIF_REF_CTL1, + FIELD_PREP(HHI_MIPI_CNTL0_DIF_REF_CTL1, 0)); + regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, BIT(31), 0); + regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, + HHI_MIPI_CNTL0_DIF_REF_CTL1, 0); + + regmap_write(priv->regmap, HHI_MIPI_CNTL1, 0x6); + + regmap_write(priv->regmap, HHI_MIPI_CNTL2, 0x00200000); + + priv->dsi_enabled = false; } -static int phy_axg_mipi_pcie_analog_power_off(struct phy *phy) +static int phy_axg_mipi_pcie_analog_configure(struct phy *phy, + union phy_configure_opts *opts) { struct phy_axg_mipi_pcie_analog_priv *priv = phy_get_drvdata(phy); + int ret; - /* MIPI not supported yet */ - if (priv->mode != PHY_TYPE_PCIE) - return -EINVAL; + ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy); + if (ret) + return ret; + + memcpy(&priv->config, opts, sizeof(priv->config)); + + priv->dsi_configured = true; + + /* If PHY was already powered on, setup the DSI analog part */ + if (priv->powered) { + /* If reconfiguring, disable & reconfigure */ + if (priv->dsi_enabled) + phy_dsi_analog_disable(priv); + + usleep_range(100, 200); + + phy_dsi_analog_enable(priv); + } - regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, - HHI_MIPI_CNTL0_BANDGAP, 0); - regmap_update_bits(priv->regmap, HHI_MIPI_CNTL0, - HHI_MIPI_CNTL0_ENABLE, 0); return 0; } -static int phy_axg_mipi_pcie_analog_init(struct phy *phy) +static int phy_axg_mipi_pcie_analog_power_on(struct phy *phy) { + struct phy_axg_mipi_pcie_analog_priv *priv = phy_get_drvdata(phy); + + phy_bandgap_enable(priv); + + if (priv->dsi_configured) + phy_dsi_analog_enable(priv); + + priv->powered = true; + return 0; } -static int phy_axg_mipi_pcie_analog_exit(struct phy *phy) +static int phy_axg_mipi_pcie_analog_power_off(struct phy *phy) { + struct phy_axg_mipi_pcie_analog_priv *priv = phy_get_drvdata(phy); + + phy_bandgap_disable(priv); + + if (priv->dsi_enabled) + phy_dsi_analog_disable(priv); + + priv->powered = false; + return 0; } static const struct phy_ops phy_axg_mipi_pcie_analog_ops = { - .init = phy_axg_mipi_pcie_analog_init, - .exit = phy_axg_mipi_pcie_analog_exit, + .configure = phy_axg_mipi_pcie_analog_configure, .power_on = phy_axg_mipi_pcie_analog_power_on, .power_off = phy_axg_mipi_pcie_analog_power_off, .owner = THIS_MODULE, }; -static struct phy *phy_axg_mipi_pcie_analog_xlate(struct device *dev, - struct of_phandle_args *args) -{ - struct phy_axg_mipi_pcie_analog_priv *priv = dev_get_drvdata(dev); - unsigned int mode; - - if (args->args_count != 1) { - dev_err(dev, "invalid number of arguments\n"); - return ERR_PTR(-EINVAL); - } - - mode = args->args[0]; - - /* MIPI mode is not supported yet */ - if (mode != PHY_TYPE_PCIE) { - dev_err(dev, "invalid phy mode select argument\n"); - return ERR_PTR(-EINVAL); - } - - priv->mode = mode; - return priv->phy; -} - static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev) { struct phy_provider *phy; @@ -126,27 +200,20 @@ static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev) struct phy_axg_mipi_pcie_analog_priv *priv; struct device_node *np = dev->of_node; struct regmap *map; - struct resource *res; - void __iomem *base; int ret; priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); - if (IS_ERR(base)) { - dev_err(dev, "failed to get regmap base\n"); - return PTR_ERR(base); - } - - map = devm_regmap_init_mmio(dev, base, - &phy_axg_mipi_pcie_analog_regmap_conf); + /* Get the hhi system controller node */ + map = syscon_node_to_regmap(of_get_parent(dev->of_node)); if (IS_ERR(map)) { - dev_err(dev, "failed to get HHI regmap\n"); + dev_err(dev, + "failed to get HHI regmap\n"); return PTR_ERR(map); } + priv->regmap = map; priv->phy = devm_phy_create(dev, np, &phy_axg_mipi_pcie_analog_ops); @@ -160,8 +227,7 @@ static int phy_axg_mipi_pcie_analog_probe(struct platform_device *pdev) phy_set_drvdata(priv->phy, priv); dev_set_drvdata(dev, priv); - phy = devm_of_phy_provider_register(dev, - phy_axg_mipi_pcie_analog_xlate); + phy = devm_of_phy_provider_register(dev, of_phy_simple_xlate); return PTR_ERR_OR_ZERO(phy); } -- cgit v1.2.3 From 496db029142f1392b8eebc19bcc49796feb7c8ba Mon Sep 17 00:00:00 2001 From: Jaehoon Chung Date: Fri, 20 Nov 2020 11:26:27 +0100 Subject: phy: samsung: phy-exynos-pcie: rework driver to support Exynos5433 PCIe PHY Exynos5440 SoC support has been dropped since commit 8c83315da1cf ("ARM: dts: exynos: Remove Exynos5440"). Rework this driver to support PCIe PHY variant found in the Exynos5433 SoCs. Signed-off-by: Jaehoon Chung [mszyprow: reworked the driver to support only Exynos5433 variant, rebased onto current kernel code, rewrote commit message] Signed-off-by: Marek Szyprowski Acked-by: Krzysztof Kozlowski Reviewed-by: Jingoo Han Link: https://lore.kernel.org/r/20201120102627.14450-1-m.szyprowski@samsung.com Signed-off-by: Vinod Koul --- drivers/phy/samsung/phy-exynos-pcie.c | 301 +++++++++++++--------------------- 1 file changed, 112 insertions(+), 189 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c index c98fff5c1ac8..d91de323dd0e 100644 --- a/drivers/phy/samsung/phy-exynos-pcie.c +++ b/drivers/phy/samsung/phy-exynos-pcie.c @@ -4,70 +4,41 @@ * * Phy provider for PCIe controller on Exynos SoC series * - * Copyright (C) 2017 Samsung Electronics Co., Ltd. + * Copyright (C) 2017-2020 Samsung Electronics Co., Ltd. * Jaehoon Chung */ -#include #include -#include -#include #include -#include -#include #include #include #include #include -/* PCIe Purple registers */ -#define PCIE_PHY_GLOBAL_RESET 0x000 -#define PCIE_PHY_COMMON_RESET 0x004 -#define PCIE_PHY_CMN_REG 0x008 -#define PCIE_PHY_MAC_RESET 0x00c -#define PCIE_PHY_PLL_LOCKED 0x010 -#define PCIE_PHY_TRSVREG_RESET 0x020 -#define PCIE_PHY_TRSV_RESET 0x024 - -/* PCIe PHY registers */ -#define PCIE_PHY_IMPEDANCE 0x004 -#define PCIE_PHY_PLL_DIV_0 0x008 -#define PCIE_PHY_PLL_BIAS 0x00c -#define PCIE_PHY_DCC_FEEDBACK 0x014 -#define PCIE_PHY_PLL_DIV_1 0x05c -#define PCIE_PHY_COMMON_POWER 0x064 -#define PCIE_PHY_COMMON_PD_CMN BIT(3) -#define PCIE_PHY_TRSV0_EMP_LVL 0x084 -#define PCIE_PHY_TRSV0_DRV_LVL 0x088 -#define PCIE_PHY_TRSV0_RXCDR 0x0ac -#define PCIE_PHY_TRSV0_POWER 0x0c4 -#define PCIE_PHY_TRSV0_PD_TSV BIT(7) -#define PCIE_PHY_TRSV0_LVCC 0x0dc -#define PCIE_PHY_TRSV1_EMP_LVL 0x144 -#define PCIE_PHY_TRSV1_RXCDR 0x16c -#define PCIE_PHY_TRSV1_POWER 0x184 -#define PCIE_PHY_TRSV1_PD_TSV BIT(7) -#define PCIE_PHY_TRSV1_LVCC 0x19c -#define PCIE_PHY_TRSV2_EMP_LVL 0x204 -#define PCIE_PHY_TRSV2_RXCDR 0x22c -#define PCIE_PHY_TRSV2_POWER 0x244 -#define PCIE_PHY_TRSV2_PD_TSV BIT(7) -#define PCIE_PHY_TRSV2_LVCC 0x25c -#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4 -#define PCIE_PHY_TRSV3_RXCDR 0x2ec -#define PCIE_PHY_TRSV3_POWER 0x304 -#define PCIE_PHY_TRSV3_PD_TSV BIT(7) -#define PCIE_PHY_TRSV3_LVCC 0x31c - -struct exynos_pcie_phy_data { - const struct phy_ops *ops; -}; +#define PCIE_PHY_OFFSET(x) ((x) * 0x4) + +/* Sysreg FSYS register offsets and bits for Exynos5433 */ +#define PCIE_EXYNOS5433_PHY_MAC_RESET 0x0208 +#define PCIE_MAC_RESET_MASK 0xFF +#define PCIE_MAC_RESET BIT(4) +#define PCIE_EXYNOS5433_PHY_L1SUB_CM_CON 0x1010 +#define PCIE_REFCLK_GATING_EN BIT(0) +#define PCIE_EXYNOS5433_PHY_COMMON_RESET 0x1020 +#define PCIE_PHY_RESET BIT(0) +#define PCIE_EXYNOS5433_PHY_GLOBAL_RESET 0x1040 +#define PCIE_GLOBAL_RESET BIT(0) +#define PCIE_REFCLK BIT(1) +#define PCIE_REFCLK_MASK 0x16 +#define PCIE_APP_REQ_EXIT_L1_MODE BIT(5) + +/* PMU PCIE PHY isolation control */ +#define EXYNOS5433_PMU_PCIE_PHY_OFFSET 0x730 /* For Exynos pcie phy */ struct exynos_pcie_phy { - const struct exynos_pcie_phy_data *drv_data; - void __iomem *phy_base; - void __iomem *blk_base; /* For exynos5440 */ + void __iomem *base; + struct regmap *pmureg; + struct regmap *fsysreg; }; static void exynos_pcie_phy_writel(void __iomem *base, u32 val, u32 offset) @@ -75,153 +46,103 @@ static void exynos_pcie_phy_writel(void __iomem *base, u32 val, u32 offset) writel(val, base + offset); } -static u32 exynos_pcie_phy_readl(void __iomem *base, u32 offset) -{ - return readl(base + offset); -} - -/* For Exynos5440 specific functions */ -static int exynos5440_pcie_phy_init(struct phy *phy) +/* Exynos5433 specific functions */ +static int exynos5433_pcie_phy_init(struct phy *phy) { struct exynos_pcie_phy *ep = phy_get_drvdata(phy); - /* DCC feedback control off */ - exynos_pcie_phy_writel(ep->phy_base, 0x29, PCIE_PHY_DCC_FEEDBACK); - - /* set TX/RX impedance */ - exynos_pcie_phy_writel(ep->phy_base, 0xd5, PCIE_PHY_IMPEDANCE); - - /* set 50Mhz PHY clock */ - exynos_pcie_phy_writel(ep->phy_base, 0x14, PCIE_PHY_PLL_DIV_0); - exynos_pcie_phy_writel(ep->phy_base, 0x12, PCIE_PHY_PLL_DIV_1); - - /* set TX Differential output for lane 0 */ - exynos_pcie_phy_writel(ep->phy_base, 0x7f, PCIE_PHY_TRSV0_DRV_LVL); - - /* set TX Pre-emphasis Level Control for lane 0 to minimum */ - exynos_pcie_phy_writel(ep->phy_base, 0x0, PCIE_PHY_TRSV0_EMP_LVL); - - /* set RX clock and data recovery bandwidth */ - exynos_pcie_phy_writel(ep->phy_base, 0xe7, PCIE_PHY_PLL_BIAS); - exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV0_RXCDR); - exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV1_RXCDR); - exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV2_RXCDR); - exynos_pcie_phy_writel(ep->phy_base, 0x82, PCIE_PHY_TRSV3_RXCDR); - - /* change TX Pre-emphasis Level Control for lanes */ - exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV0_EMP_LVL); - exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV1_EMP_LVL); - exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV2_EMP_LVL); - exynos_pcie_phy_writel(ep->phy_base, 0x39, PCIE_PHY_TRSV3_EMP_LVL); - - /* set LVCC */ - exynos_pcie_phy_writel(ep->phy_base, 0x20, PCIE_PHY_TRSV0_LVCC); - exynos_pcie_phy_writel(ep->phy_base, 0xa0, PCIE_PHY_TRSV1_LVCC); - exynos_pcie_phy_writel(ep->phy_base, 0xa0, PCIE_PHY_TRSV2_LVCC); - exynos_pcie_phy_writel(ep->phy_base, 0xa0, PCIE_PHY_TRSV3_LVCC); - - /* pulse for common reset */ - exynos_pcie_phy_writel(ep->blk_base, 1, PCIE_PHY_COMMON_RESET); - udelay(500); - exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_COMMON_RESET); - + regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_COMMON_RESET, + PCIE_PHY_RESET, 1); + regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET, + PCIE_MAC_RESET, 0); + + /* PHY refclk 24MHz */ + regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET, + PCIE_REFCLK_MASK, PCIE_REFCLK); + regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET, + PCIE_GLOBAL_RESET, 0); + + + exynos_pcie_phy_writel(ep->base, 0x11, PCIE_PHY_OFFSET(0x3)); + + /* band gap reference on */ + exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x20)); + exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x4b)); + + /* jitter tunning */ + exynos_pcie_phy_writel(ep->base, 0x34, PCIE_PHY_OFFSET(0x4)); + exynos_pcie_phy_writel(ep->base, 0x02, PCIE_PHY_OFFSET(0x7)); + exynos_pcie_phy_writel(ep->base, 0x41, PCIE_PHY_OFFSET(0x21)); + exynos_pcie_phy_writel(ep->base, 0x7F, PCIE_PHY_OFFSET(0x14)); + exynos_pcie_phy_writel(ep->base, 0xC0, PCIE_PHY_OFFSET(0x15)); + exynos_pcie_phy_writel(ep->base, 0x61, PCIE_PHY_OFFSET(0x36)); + + /* D0 uninit.. */ + exynos_pcie_phy_writel(ep->base, 0x44, PCIE_PHY_OFFSET(0x3D)); + + /* 24MHz */ + exynos_pcie_phy_writel(ep->base, 0x94, PCIE_PHY_OFFSET(0x8)); + exynos_pcie_phy_writel(ep->base, 0xA7, PCIE_PHY_OFFSET(0x9)); + exynos_pcie_phy_writel(ep->base, 0x93, PCIE_PHY_OFFSET(0xA)); + exynos_pcie_phy_writel(ep->base, 0x6B, PCIE_PHY_OFFSET(0xC)); + exynos_pcie_phy_writel(ep->base, 0xA5, PCIE_PHY_OFFSET(0xF)); + exynos_pcie_phy_writel(ep->base, 0x34, PCIE_PHY_OFFSET(0x16)); + exynos_pcie_phy_writel(ep->base, 0xA3, PCIE_PHY_OFFSET(0x17)); + exynos_pcie_phy_writel(ep->base, 0xA7, PCIE_PHY_OFFSET(0x1A)); + exynos_pcie_phy_writel(ep->base, 0x71, PCIE_PHY_OFFSET(0x23)); + exynos_pcie_phy_writel(ep->base, 0x4C, PCIE_PHY_OFFSET(0x24)); + + exynos_pcie_phy_writel(ep->base, 0x0E, PCIE_PHY_OFFSET(0x26)); + exynos_pcie_phy_writel(ep->base, 0x14, PCIE_PHY_OFFSET(0x7)); + exynos_pcie_phy_writel(ep->base, 0x48, PCIE_PHY_OFFSET(0x43)); + exynos_pcie_phy_writel(ep->base, 0x44, PCIE_PHY_OFFSET(0x44)); + exynos_pcie_phy_writel(ep->base, 0x03, PCIE_PHY_OFFSET(0x45)); + exynos_pcie_phy_writel(ep->base, 0xA7, PCIE_PHY_OFFSET(0x48)); + exynos_pcie_phy_writel(ep->base, 0x13, PCIE_PHY_OFFSET(0x54)); + exynos_pcie_phy_writel(ep->base, 0x04, PCIE_PHY_OFFSET(0x31)); + exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x32)); + + regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_COMMON_RESET, + PCIE_PHY_RESET, 0); + regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_MAC_RESET, + PCIE_MAC_RESET_MASK, PCIE_MAC_RESET); return 0; } -static int exynos5440_pcie_phy_power_on(struct phy *phy) +static int exynos5433_pcie_phy_power_on(struct phy *phy) { struct exynos_pcie_phy *ep = phy_get_drvdata(phy); - u32 val; - - exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_COMMON_RESET); - exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_CMN_REG); - exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_TRSVREG_RESET); - exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_TRSV_RESET); - - val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_COMMON_POWER); - val &= ~PCIE_PHY_COMMON_PD_CMN; - exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_COMMON_POWER); - - val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV0_POWER); - val &= ~PCIE_PHY_TRSV0_PD_TSV; - exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV0_POWER); - - val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV1_POWER); - val &= ~PCIE_PHY_TRSV1_PD_TSV; - exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV1_POWER); - - val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV2_POWER); - val &= ~PCIE_PHY_TRSV2_PD_TSV; - exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV2_POWER); - - val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV3_POWER); - val &= ~PCIE_PHY_TRSV3_PD_TSV; - exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV3_POWER); + regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET, + BIT(0), 1); + regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_GLOBAL_RESET, + PCIE_APP_REQ_EXIT_L1_MODE, 0); + regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON, + PCIE_REFCLK_GATING_EN, 0); return 0; } -static int exynos5440_pcie_phy_power_off(struct phy *phy) +static int exynos5433_pcie_phy_power_off(struct phy *phy) { struct exynos_pcie_phy *ep = phy_get_drvdata(phy); - u32 val; - - if (readl_poll_timeout(ep->phy_base + PCIE_PHY_PLL_LOCKED, val, - (val != 0), 1, 500)) { - dev_err(&phy->dev, "PLL Locked: 0x%x\n", val); - return -ETIMEDOUT; - } - - val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_COMMON_POWER); - val |= PCIE_PHY_COMMON_PD_CMN; - exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_COMMON_POWER); - - val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV0_POWER); - val |= PCIE_PHY_TRSV0_PD_TSV; - exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV0_POWER); - - val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV1_POWER); - val |= PCIE_PHY_TRSV1_PD_TSV; - exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV1_POWER); - - val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV2_POWER); - val |= PCIE_PHY_TRSV2_PD_TSV; - exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV2_POWER); - - val = exynos_pcie_phy_readl(ep->phy_base, PCIE_PHY_TRSV3_POWER); - val |= PCIE_PHY_TRSV3_PD_TSV; - exynos_pcie_phy_writel(ep->phy_base, val, PCIE_PHY_TRSV3_POWER); + regmap_update_bits(ep->fsysreg, PCIE_EXYNOS5433_PHY_L1SUB_CM_CON, + PCIE_REFCLK_GATING_EN, PCIE_REFCLK_GATING_EN); + regmap_update_bits(ep->pmureg, EXYNOS5433_PMU_PCIE_PHY_OFFSET, + BIT(0), 0); return 0; } -static int exynos5440_pcie_phy_reset(struct phy *phy) -{ - struct exynos_pcie_phy *ep = phy_get_drvdata(phy); - - exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_MAC_RESET); - exynos_pcie_phy_writel(ep->blk_base, 1, PCIE_PHY_GLOBAL_RESET); - exynos_pcie_phy_writel(ep->blk_base, 0, PCIE_PHY_GLOBAL_RESET); - - return 0; -} - -static const struct phy_ops exynos5440_phy_ops = { - .init = exynos5440_pcie_phy_init, - .power_on = exynos5440_pcie_phy_power_on, - .power_off = exynos5440_pcie_phy_power_off, - .reset = exynos5440_pcie_phy_reset, +static const struct phy_ops exynos5433_phy_ops = { + .init = exynos5433_pcie_phy_init, + .power_on = exynos5433_pcie_phy_power_on, + .power_off = exynos5433_pcie_phy_power_off, .owner = THIS_MODULE, }; -static const struct exynos_pcie_phy_data exynos5440_pcie_phy_data = { - .ops = &exynos5440_phy_ops, -}; - static const struct of_device_id exynos_pcie_phy_match[] = { { - .compatible = "samsung,exynos5440-pcie-phy", - .data = &exynos5440_pcie_phy_data, + .compatible = "samsung,exynos5433-pcie-phy", }, {}, }; @@ -232,27 +153,30 @@ static int exynos_pcie_phy_probe(struct platform_device *pdev) struct exynos_pcie_phy *exynos_phy; struct phy *generic_phy; struct phy_provider *phy_provider; - const struct exynos_pcie_phy_data *drv_data; - - drv_data = of_device_get_match_data(dev); - if (!drv_data) - return -ENODEV; exynos_phy = devm_kzalloc(dev, sizeof(*exynos_phy), GFP_KERNEL); if (!exynos_phy) return -ENOMEM; - exynos_phy->phy_base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(exynos_phy->phy_base)) - return PTR_ERR(exynos_phy->phy_base); + exynos_phy->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(exynos_phy->base)) + return PTR_ERR(exynos_phy->base); - exynos_phy->blk_base = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(exynos_phy->blk_base)) - return PTR_ERR(exynos_phy->blk_base); + exynos_phy->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node, + "samsung,pmu-syscon"); + if (IS_ERR(exynos_phy->pmureg)) { + dev_err(&pdev->dev, "PMU regmap lookup failed.\n"); + return PTR_ERR(exynos_phy->pmureg); + } - exynos_phy->drv_data = drv_data; + exynos_phy->fsysreg = syscon_regmap_lookup_by_phandle(dev->of_node, + "samsung,fsys-sysreg"); + if (IS_ERR(exynos_phy->fsysreg)) { + dev_err(&pdev->dev, "FSYS sysreg regmap lookup failed.\n"); + return PTR_ERR(exynos_phy->fsysreg); + } - generic_phy = devm_phy_create(dev, dev->of_node, drv_data->ops); + generic_phy = devm_phy_create(dev, dev->of_node, &exynos5433_phy_ops); if (IS_ERR(generic_phy)) { dev_err(dev, "failed to create PHY\n"); return PTR_ERR(generic_phy); @@ -272,5 +196,4 @@ static struct platform_driver exynos_pcie_phy_driver = { .suppress_bind_attrs = true, } }; - builtin_platform_driver(exynos_pcie_phy_driver); -- cgit v1.2.3 From 768a711e2d4b5e348962254a1c4f9fbfb6f13904 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 20 Nov 2020 16:04:01 +0530 Subject: phy: samsung: phy-exynos-pcie: fix typo 'tunning' Fix the typo s/tunning/tuning Fixes: 496db029142f ("phy: samsung: phy-exynos-pcie: rework driver to support Exynos5433 PCIe PHY") Signed-off-by: Vinod Koul --- drivers/phy/samsung/phy-exynos-pcie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/phy/samsung/phy-exynos-pcie.c b/drivers/phy/samsung/phy-exynos-pcie.c index d91de323dd0e..578cfe07d07a 100644 --- a/drivers/phy/samsung/phy-exynos-pcie.c +++ b/drivers/phy/samsung/phy-exynos-pcie.c @@ -69,7 +69,7 @@ static int exynos5433_pcie_phy_init(struct phy *phy) exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x20)); exynos_pcie_phy_writel(ep->base, 0, PCIE_PHY_OFFSET(0x4b)); - /* jitter tunning */ + /* jitter tuning */ exynos_pcie_phy_writel(ep->base, 0x34, PCIE_PHY_OFFSET(0x4)); exynos_pcie_phy_writel(ep->base, 0x02, PCIE_PHY_OFFSET(0x7)); exynos_pcie_phy_writel(ep->base, 0x41, PCIE_PHY_OFFSET(0x21)); -- cgit v1.2.3 From 6e06a85556f9725865af91c9fe502c9669518ed5 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 24 Nov 2020 09:33:14 +0800 Subject: soundwire: bus: add comments to explain interrupt loop filter The interrupt handling in SoundWire requires software to re-read the interrupt status after clearing an interrupt. In case the interrupt is still outstanding, the code in bus.c will loop a number of times, however that loop is limited to the interrupts detected in the first read. This strategy helps meet SoundWire requirements without remaining forever in an interrupt handler. Add a couple of comments to document this design. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Guennadi Liakhovetski Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20201124013318.8963-2-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- drivers/soundwire/bus.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index ffe4600fd95b..45131b9f5080 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -1334,6 +1334,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status) "SDW_DP0_INT read failed:%d\n", status2); return status2; } + /* filter to limit loop to interrupts identified in the first status read */ status &= status2; count++; @@ -1404,6 +1405,7 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave, "SDW_DPN_INT read failed:%d\n", status2); return status2; } + /* filter to limit loop to interrupts identified in the first status read */ status &= status2; count++; @@ -1589,7 +1591,10 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave) sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; } - /* Make sure no interrupts are pending */ + /* + * Make sure no interrupts are pending, but filter to limit loop + * to interrupts identified in the first status read + */ buf &= _buf; buf2[0] &= _buf2[0]; buf2[1] &= _buf2[1]; -- cgit v1.2.3 From 7ffaba042e0e997832032b2d7b062dc27a7ebb2d Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 24 Nov 2020 09:33:15 +0800 Subject: soundwire: bus: reset slave_notify status at each loop The code loops multiple times to deal with pending interrupts, but we never reset the slave_notify status. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Guennadi Liakhovetski Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20201124013318.8963-3-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- drivers/soundwire/bus.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index 45131b9f5080..d6e646521819 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -1425,7 +1425,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave) u8 clear = 0, bit, port_status[15] = {0}; int port_num, stat, ret, count = 0; unsigned long port; - bool slave_notify = false; + bool slave_notify; u8 sdca_cascade = 0; u8 buf, buf2[2], _buf, _buf2[2]; bool parity_check; @@ -1467,6 +1467,8 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave) } do { + slave_notify = false; + /* * Check parity, bus clash and Slave (impl defined) * interrupt -- cgit v1.2.3 From b35991de7a59a06a4ea5aee90b4926f01a72f71b Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 24 Nov 2020 09:33:17 +0800 Subject: soundwire: bus: only clear valid DP0 interrupts We should only access the fields that are relevant for DP0, and never write to reserved or read-only SDCA_CASCADE fields. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Guennadi Liakhovetski Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20201124013318.8963-5-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- drivers/soundwire/bus.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index d6e646521819..f66a804f9b74 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -1280,7 +1280,7 @@ static int sdw_initialize_slave(struct sdw_slave *slave) static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status) { - u8 clear = 0, impl_int_mask; + u8 clear, impl_int_mask; int status, status2, ret, count = 0; status = sdw_read(slave, SDW_DP0_INT); @@ -1291,6 +1291,8 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status) } do { + clear = status & ~SDW_DP0_INTERRUPTS; + if (status & SDW_DP0_INT_TEST_FAIL) { dev_err(&slave->dev, "Test fail for port 0\n"); clear |= SDW_DP0_INT_TEST_FAIL; @@ -1319,7 +1321,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status) *slave_status = clear; } - /* clear the interrupt */ + /* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */ ret = sdw_write(slave, SDW_DP0_INT, clear); if (ret < 0) { dev_err(slave->bus->dev, @@ -1340,7 +1342,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status) count++; /* we can get alerts while processing so keep retrying */ - } while (status != 0 && count < SDW_READ_INTR_CLEAR_RETRY); + } while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); if (count == SDW_READ_INTR_CLEAR_RETRY) dev_warn(slave->bus->dev, "Reached MAX_RETRY on DP0 read\n"); -- cgit v1.2.3 From 47b8520997a8ec4a1d2b322c61b59ebe078764a9 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 24 Nov 2020 09:33:18 +0800 Subject: soundwire: bus: only clear valid DPN interrupts Mirror the changes made for DP0 and don't modify reserved fields. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Guennadi Liakhovetski Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20201124013318.8963-6-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- drivers/soundwire/bus.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index f66a804f9b74..d1e8c3a54976 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -1353,7 +1353,7 @@ static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status) static int sdw_handle_port_interrupt(struct sdw_slave *slave, int port, u8 *slave_status) { - u8 clear = 0, impl_int_mask; + u8 clear, impl_int_mask; int status, status2, ret, count = 0; u32 addr; @@ -1370,6 +1370,8 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave, } do { + clear = status & ~SDW_DPN_INTERRUPTS; + if (status & SDW_DPN_INT_TEST_FAIL) { dev_err(&slave->dev, "Test fail for port:%d\n", port); clear |= SDW_DPN_INT_TEST_FAIL; @@ -1392,7 +1394,7 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave, *slave_status = clear; } - /* clear the interrupt */ + /* clear the interrupt but don't touch reserved fields */ ret = sdw_write(slave, addr, clear); if (ret < 0) { dev_err(slave->bus->dev, @@ -1413,7 +1415,7 @@ static int sdw_handle_port_interrupt(struct sdw_slave *slave, count++; /* we can get alerts while processing so keep retrying */ - } while (status != 0 && count < SDW_READ_INTR_CLEAR_RETRY); + } while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); if (count == SDW_READ_INTR_CLEAR_RETRY) dev_warn(slave->bus->dev, "Reached MAX_RETRY on port read"); -- cgit v1.2.3 From 47edc0104c61d609b0898a302267b7269d87a6af Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Wed, 25 Nov 2020 11:21:55 +0530 Subject: soundwire: qcom: Fix build failure when slimbus is module Commit 5bd773242f75 ("soundwire: qcom: avoid dependency on CONFIG_SLIMBUS") removed hard dependency on Slimbus for qcom driver but it results in build failure when: CONFIG_SOUNDWIRE_QCOM=y CONFIG_SLIMBUS=m drivers/soundwire/qcom.o: In function `qcom_swrm_probe': qcom.c:(.text+0xf44): undefined reference to `slimbus_bus' Fix this by using IS_REACHABLE() in driver which is recommended to be used with imply. Fixes: 5bd773242f75 ("soundwire: qcom: avoid dependency on CONFIG_SLIMBUS") Reported-by: kernel test robot Tested-by: Srinivas Kandagatla Reviewed-by: Srinivas Kandagatla Acked-by: Randy Dunlap # build-tested Link: https://lore.kernel.org/r/20201125055155.GD8403@vkoul-mobl Signed-off-by: Vinod Koul --- drivers/soundwire/qcom.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index fbca4ebf63e9..6d22df01f354 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -799,7 +799,7 @@ static int qcom_swrm_probe(struct platform_device *pdev) data = of_device_get_match_data(dev); ctrl->rows_index = sdw_find_row_index(data->default_rows); ctrl->cols_index = sdw_find_col_index(data->default_cols); -#if IS_ENABLED(CONFIG_SLIMBUS) +#if IS_REACHABLE(CONFIG_SLIMBUS) if (dev->parent->bus == &slimbus_bus) { #else if (false) { -- cgit v1.2.3 From abe9af53c0dce47509afac18264efaaf4059a3c8 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Fri, 27 Nov 2020 10:24:45 +0000 Subject: slimbus: fix a kernel-doc markup Fix the name of the enum on its kernel-doc markup: enum slim_ch_aux_fmt -> enum slim_ch_aux_bit_fmt Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20201127102451.17114-2-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/slimbus/slimbus.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h index c73035915f1d..00a7f112574b 100644 --- a/drivers/slimbus/slimbus.h +++ b/drivers/slimbus/slimbus.h @@ -244,7 +244,7 @@ enum slim_ch_data_fmt { }; /** - * enum slim_ch_aux_fmt: SLIMbus channel Aux Field format IDs according to + * enum slim_ch_aux_bit_fmt: SLIMbus channel Aux Field format IDs according to * Table 63 of SLIMbus Spec 2.0 * @SLIM_CH_AUX_FMT_NOT_APPLICABLE: Undefined * @SLIM_CH_AUX_FMT_ZCUV_TUNNEL_IEC60958: ZCUV for tunneling IEC60958 -- cgit v1.2.3 From a899d324863a3d15ce0eea513884e1b73a758c58 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Fri, 27 Nov 2020 10:24:46 +0000 Subject: slimbus: qcom-ngd-ctrl: add Sub System Restart support This patch adds SSR(SubSystem Restart) support which includes, synchronisation between SSR and QMI server notifications. Also with this patch now NGD is taken down by SSR instead of QMI server down notification. NGD up path now relies on both SSR and QMI notifications and particularly sequence of SSR up followed by QMI server up notification. Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20201127102451.17114-3-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/slimbus/Kconfig | 3 +- drivers/slimbus/qcom-ngd-ctrl.c | 97 +++++++++++++++++++++++++++++++++++++++-- 2 files changed, 95 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig index 8cd595148d17..02534ce86e46 100644 --- a/drivers/slimbus/Kconfig +++ b/drivers/slimbus/Kconfig @@ -22,9 +22,10 @@ config SLIM_QCOM_CTRL config SLIM_QCOM_NGD_CTRL tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component" - depends on HAS_IOMEM && DMA_ENGINE && NET + depends on HAS_IOMEM && DMA_ENGINE && NET && REMOTEPROC depends on ARCH_QCOM || COMPILE_TEST select QCOM_QMI_HELPERS + select QCOM_RPROC_COMMON help Select driver if Qualcomm's SLIMbus Satellite Non-Generic Device Component is programmed using Linux kernel. diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 218aefc3531c..d32e6b37514d 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -13,6 +13,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -155,8 +158,14 @@ struct qcom_slim_ngd_ctrl { struct qcom_slim_ngd_dma_desc txdesc[QCOM_SLIM_NGD_DESC_NUM]; struct completion reconf; struct work_struct m_work; + struct work_struct ngd_up_work; struct workqueue_struct *mwq; + struct completion qmi_up; spinlock_t tx_buf_lock; + struct mutex tx_lock; + struct mutex ssr_lock; + struct notifier_block nb; + void *notifier; enum qcom_slim_ngd_state state; dma_addr_t rx_phys_base; dma_addr_t tx_phys_base; @@ -868,14 +877,18 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl, if (txn->msg && txn->msg->wbuf) memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes); + mutex_lock(&ctrl->tx_lock); ret = qcom_slim_ngd_tx_msg_post(ctrl, pbuf, txn->rl); - if (ret) + if (ret) { + mutex_unlock(&ctrl->tx_lock); return ret; + } timeout = wait_for_completion_timeout(&tx_sent, HZ); if (!timeout) { dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, txn->mt); + mutex_unlock(&ctrl->tx_lock); return -ETIMEDOUT; } @@ -884,10 +897,12 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl, if (!timeout) { dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, txn->mt); + mutex_unlock(&ctrl->tx_lock); return -ETIMEDOUT; } } + mutex_unlock(&ctrl->tx_lock); return 0; } @@ -1200,6 +1215,13 @@ capability_retry: } } +static int qcom_slim_ngd_update_device_status(struct device *dev, void *null) +{ + slim_report_absent(to_slim_device(dev)); + + return 0; +} + static int qcom_slim_ngd_runtime_resume(struct device *dev) { struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); @@ -1267,7 +1289,7 @@ static int qcom_slim_ngd_qmi_new_server(struct qmi_handle *hdl, qmi->svc_info.sq_node = service->node; qmi->svc_info.sq_port = service->port; - qcom_slim_ngd_enable(ctrl, true); + complete(&ctrl->qmi_up); return 0; } @@ -1280,10 +1302,9 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl, struct qcom_slim_ngd_ctrl *ctrl = container_of(qmi, struct qcom_slim_ngd_ctrl, qmi); + reinit_completion(&ctrl->qmi_up); qmi->svc_info.sq_node = 0; qmi->svc_info.sq_port = 0; - - qcom_slim_ngd_enable(ctrl, false); } static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = { @@ -1333,6 +1354,64 @@ static const struct of_device_id qcom_slim_ngd_dt_match[] = { MODULE_DEVICE_TABLE(of, qcom_slim_ngd_dt_match); +static void qcom_slim_ngd_down(struct qcom_slim_ngd_ctrl *ctrl) +{ + mutex_lock(&ctrl->ssr_lock); + device_for_each_child(ctrl->ctrl.dev, NULL, + qcom_slim_ngd_update_device_status); + qcom_slim_ngd_enable(ctrl, false); + mutex_unlock(&ctrl->ssr_lock); +} + +static void qcom_slim_ngd_up_worker(struct work_struct *work) +{ + struct qcom_slim_ngd_ctrl *ctrl; + + ctrl = container_of(work, struct qcom_slim_ngd_ctrl, ngd_up_work); + + /* Make sure qmi service is up before continuing */ + wait_for_completion_interruptible(&ctrl->qmi_up); + + mutex_lock(&ctrl->ssr_lock); + qcom_slim_ngd_enable(ctrl, true); + mutex_unlock(&ctrl->ssr_lock); +} + +static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl, + unsigned long action) +{ + switch (action) { + case QCOM_SSR_BEFORE_SHUTDOWN: + /* Make sure the last dma xfer is finished */ + mutex_lock(&ctrl->tx_lock); + if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) { + pm_runtime_get_noresume(ctrl->dev); + ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN; + qcom_slim_ngd_down(ctrl); + qcom_slim_ngd_exit_dma(ctrl); + } + mutex_unlock(&ctrl->tx_lock); + break; + case QCOM_SSR_AFTER_POWERUP: + schedule_work(&ctrl->ngd_up_work); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static int qcom_slim_ngd_ssr_notify(struct notifier_block *nb, + unsigned long action, + void *data) +{ + struct qcom_slim_ngd_ctrl *ctrl = container_of(nb, + struct qcom_slim_ngd_ctrl, nb); + + return qcom_slim_ngd_ssr_pdr_notify(ctrl, action); +} + static int of_qcom_slim_ngd_register(struct device *parent, struct qcom_slim_ngd_ctrl *ctrl) { @@ -1397,6 +1476,7 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev) } INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker); + INIT_WORK(&ctrl->ngd_up_work, qcom_slim_ngd_up_worker); ctrl->mwq = create_singlethread_workqueue("ngd_master"); if (!ctrl->mwq) { dev_err(&pdev->dev, "Failed to start master worker\n"); @@ -1444,6 +1524,11 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev) return ret; } + ctrl->nb.notifier_call = qcom_slim_ngd_ssr_notify; + ctrl->notifier = qcom_register_ssr_notifier("lpass", &ctrl->nb); + if (IS_ERR(ctrl->notifier)) + return PTR_ERR(ctrl->notifier); + ctrl->dev = dev; ctrl->framer.rootfreq = SLIM_ROOT_FREQ >> 3; ctrl->framer.superfreq = @@ -1457,9 +1542,12 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev) ctrl->ctrl.wakeup = NULL; ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN; + mutex_init(&ctrl->tx_lock); + mutex_init(&ctrl->ssr_lock); spin_lock_init(&ctrl->tx_buf_lock); init_completion(&ctrl->reconf); init_completion(&ctrl->qmi.qmi_comp); + init_completion(&ctrl->qmi_up); platform_driver_register(&qcom_slim_ngd_driver); return of_qcom_slim_ngd_register(dev, ctrl); @@ -1477,6 +1565,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev) struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); + qcom_unregister_ssr_notifier(ctrl->notifier, &ctrl->nb); qcom_slim_ngd_enable(ctrl, false); qcom_slim_ngd_exit_dma(ctrl); qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi); -- cgit v1.2.3 From e1ae85e1830e167a63f94007e50e088b86aa0a16 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Fri, 27 Nov 2020 10:24:47 +0000 Subject: slimbus: qcom-ngd-ctrl: add Protection Domain Restart Support Add support to protection domain restart. Protection domain restart would also restart the service just like SSR. Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20201127102451.17114-4-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/slimbus/Kconfig | 1 + drivers/slimbus/qcom-ngd-ctrl.c | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) (limited to 'drivers') diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig index 02534ce86e46..7d59956b5dfb 100644 --- a/drivers/slimbus/Kconfig +++ b/drivers/slimbus/Kconfig @@ -26,6 +26,7 @@ config SLIM_QCOM_NGD_CTRL depends on ARCH_QCOM || COMPILE_TEST select QCOM_QMI_HELPERS select QCOM_RPROC_COMMON + select QCOM_PDR_HELPERS help Select driver if Qualcomm's SLIMbus Satellite Non-Generic Device Component is programmed using Linux kernel. diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index d32e6b37514d..943d55a0bc59 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "slimbus.h" @@ -166,6 +167,7 @@ struct qcom_slim_ngd_ctrl { struct mutex ssr_lock; struct notifier_block nb; void *notifier; + struct pdr_handle *pdr; enum qcom_slim_ngd_state state; dma_addr_t rx_phys_base; dma_addr_t tx_phys_base; @@ -1382,6 +1384,7 @@ static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl, { switch (action) { case QCOM_SSR_BEFORE_SHUTDOWN: + case SERVREG_SERVICE_STATE_DOWN: /* Make sure the last dma xfer is finished */ mutex_lock(&ctrl->tx_lock); if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) { @@ -1393,6 +1396,7 @@ static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl, mutex_unlock(&ctrl->tx_lock); break; case QCOM_SSR_AFTER_POWERUP: + case SERVREG_SERVICE_STATE_UP: schedule_work(&ctrl->ngd_up_work); break; default: @@ -1412,6 +1416,12 @@ static int qcom_slim_ngd_ssr_notify(struct notifier_block *nb, return qcom_slim_ngd_ssr_pdr_notify(ctrl, action); } +static void slim_pd_status(int state, char *svc_path, void *priv) +{ + struct qcom_slim_ngd_ctrl *ctrl = (struct qcom_slim_ngd_ctrl *)priv; + + qcom_slim_ngd_ssr_pdr_notify(ctrl, state); +} static int of_qcom_slim_ngd_register(struct device *parent, struct qcom_slim_ngd_ctrl *ctrl) { @@ -1499,6 +1509,7 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev) struct qcom_slim_ngd_ctrl *ctrl; struct resource *res; int ret; + struct pdr_service *pds; ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); if (!ctrl) @@ -1549,6 +1560,18 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev) init_completion(&ctrl->qmi.qmi_comp); init_completion(&ctrl->qmi_up); + ctrl->pdr = pdr_handle_alloc(slim_pd_status, ctrl); + if (IS_ERR(ctrl->pdr)) { + dev_err(dev, "Failed to init PDR handle\n"); + return PTR_ERR(ctrl->pdr); + } + + pds = pdr_add_lookup(ctrl->pdr, "avs/audio", "msm/adsp/audio_pd"); + if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) { + dev_err(dev, "pdr add lookup failed: %d\n", ret); + return PTR_ERR(pds); + } + platform_driver_register(&qcom_slim_ngd_driver); return of_qcom_slim_ngd_register(dev, ctrl); } @@ -1565,6 +1588,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev) struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); + pdr_handle_release(ctrl->pdr); qcom_unregister_ssr_notifier(ctrl->notifier, &ctrl->nb); qcom_slim_ngd_enable(ctrl, false); qcom_slim_ngd_exit_dma(ctrl); -- cgit v1.2.3 From 50df9842030f899d60e96f1c3ed0c9692338adf7 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 27 Nov 2020 10:24:48 +0000 Subject: slimbus: messaging: Fix fall-through warnings for Clang In preparation to enable -Wimplicit-fallthrough for Clang, fix a warning by explicitly adding a break statement instead of letting the code fall through to the next case. Link: https://github.com/KSPP/linux/issues/115 Signed-off-by: Gustavo A. R. Silva Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20201127102451.17114-5-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/slimbus/messaging.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c index d5879142dbef..f2b5d347d227 100644 --- a/drivers/slimbus/messaging.c +++ b/drivers/slimbus/messaging.c @@ -258,6 +258,7 @@ int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg, case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION: case SLIM_MSG_MC_CLEAR_INFORMATION: txn->rl += msg->num_bytes; + break; default: break; } -- cgit v1.2.3 From a35c6e18abae71c1321a9d1d16d975bd7de65273 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Fri, 27 Nov 2020 10:24:49 +0000 Subject: slimbus: qcom-ngd-ctrl: Constify static structs qcom_slim_qmi_msg_handlers[] and qcom_slim_ngd_qmi_svc_event_ops are only used as input arguments to qmi_handle_init() which accepts const pointers to both qmi_ops and qmi_msg_handler. Make them const to allow the compiler to put them in read-only memory. Signed-off-by: Rikard Falkeborn Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20201127102451.17114-6-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/slimbus/qcom-ngd-ctrl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 943d55a0bc59..172ddcc2a241 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -434,7 +434,7 @@ static int qcom_slim_qmi_send_power_request(struct qcom_slim_ngd_ctrl *ctrl, return 0; } -static struct qmi_msg_handler qcom_slim_qmi_msg_handlers[] = { +static const struct qmi_msg_handler qcom_slim_qmi_msg_handlers[] = { { .type = QMI_RESPONSE, .msg_id = SLIMBUS_QMI_POWER_RESP_V01, @@ -1309,7 +1309,7 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl, qmi->svc_info.sq_port = 0; } -static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = { +static const struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = { .new_server = qcom_slim_ngd_qmi_new_server, .del_server = qcom_slim_ngd_qmi_del_server, }; -- cgit v1.2.3 From 39014ce6d6028614a46395923a2c92d058b6fa87 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Fri, 27 Nov 2020 10:24:50 +0000 Subject: slimbus: qcom-ngd-ctrl: Avoid sending power requests without QMI Attempting to send a power request during PM operations, when the QMI handle isn't initialized results in a NULL pointer dereference. So check if the QMI handle has been initialized before attempting to post the power requests. Fixes: 917809e2280b ("slimbus: ngd: Add qcom SLIMBus NGD driver") Signed-off-by: Bjorn Andersson Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20201127102451.17114-7-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/slimbus/qcom-ngd-ctrl.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 172ddcc2a241..82dad7490588 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -1229,6 +1229,9 @@ static int qcom_slim_ngd_runtime_resume(struct device *dev) struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); int ret = 0; + if (!ctrl->qmi.handle) + return 0; + if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP) ret = qcom_slim_ngd_power_up(ctrl); if (ret) { @@ -1616,6 +1619,9 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev) struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev); int ret = 0; + if (!ctrl->qmi.handle) + return 0; + ret = qcom_slim_qmi_power_request(ctrl, false); if (ret && ret != -EBUSY) dev_info(ctrl->dev, "slim resource not idle:%d\n", ret); -- cgit v1.2.3 From d3bb5fe9ca108c07c0db7e04cc490b97b1c235f1 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Fri, 27 Nov 2020 10:24:51 +0000 Subject: slimbus: qcom-ngd-ctrl: remove redundant out of memory messages Failure of dma_alloc_coherent will already throw a error message, so addition message is really redundant here. Remove it! Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20201127102451.17114-8-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/slimbus/qcom-ngd-ctrl.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 82dad7490588..c054e83ab636 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -689,7 +689,6 @@ static int qcom_slim_ngd_init_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl) ctrl->rx_base = dma_alloc_coherent(dev, size, &ctrl->rx_phys_base, GFP_KERNEL); if (!ctrl->rx_base) { - dev_err(dev, "dma_alloc_coherent failed\n"); ret = -ENOMEM; goto rel_rx; } @@ -728,7 +727,6 @@ static int qcom_slim_ngd_init_tx_msgq(struct qcom_slim_ngd_ctrl *ctrl) ctrl->tx_base = dma_alloc_coherent(dev, size, &ctrl->tx_phys_base, GFP_KERNEL); if (!ctrl->tx_base) { - dev_err(dev, "dma_alloc_coherent failed\n"); ret = -EINVAL; goto rel_tx; } -- cgit v1.2.3 From fd3bb8f54a88107570334c156efb0c724a261003 Mon Sep 17 00:00:00 2001 From: Evan Green Date: Fri, 27 Nov 2020 10:28:34 +0000 Subject: nvmem: core: Add support for keepout regions Introduce support into the nvmem core for arrays of register ranges that should not result in actual device access. For these regions a constant byte (repeated) is returned instead on read, and writes are quietly ignored and returned as successful. This is useful for instance if certain efuse regions are protected from access by Linux because they contain secret info to another part of the system (like an integrated modem). Signed-off-by: Evan Green Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20201127102837.19366-3-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/nvmem/core.c | 153 +++++++++++++++++++++++++++++++++++++++-- include/linux/nvmem-provider.h | 17 +++++ 2 files changed, 166 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index a09ff8409f60..177f5bf27c6d 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -34,6 +34,8 @@ struct nvmem_device { struct bin_attribute eeprom; struct device *base_dev; struct list_head cells; + const struct nvmem_keepout *keepout; + unsigned int nkeepout; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; struct gpio_desc *wp_gpio; @@ -66,8 +68,8 @@ static LIST_HEAD(nvmem_lookup_list); static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); -static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, - void *val, size_t bytes) +static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, + void *val, size_t bytes) { if (nvmem->reg_read) return nvmem->reg_read(nvmem->priv, offset, val, bytes); @@ -75,8 +77,8 @@ static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, return -EINVAL; } -static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, - void *val, size_t bytes) +static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, + void *val, size_t bytes) { int ret; @@ -90,6 +92,88 @@ static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, return -EINVAL; } +static int nvmem_access_with_keepouts(struct nvmem_device *nvmem, + unsigned int offset, void *val, + size_t bytes, int write) +{ + + unsigned int end = offset + bytes; + unsigned int kend, ksize; + const struct nvmem_keepout *keepout = nvmem->keepout; + const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; + int rc; + + /* + * Skip all keepouts before the range being accessed. + * Keepouts are sorted. + */ + while ((keepout < keepoutend) && (keepout->end <= offset)) + keepout++; + + while ((offset < end) && (keepout < keepoutend)) { + /* Access the valid portion before the keepout. */ + if (offset < keepout->start) { + kend = min(end, keepout->start); + ksize = kend - offset; + if (write) + rc = __nvmem_reg_write(nvmem, offset, val, ksize); + else + rc = __nvmem_reg_read(nvmem, offset, val, ksize); + + if (rc) + return rc; + + offset += ksize; + val += ksize; + } + + /* + * Now we're aligned to the start of this keepout zone. Go + * through it. + */ + kend = min(end, keepout->end); + ksize = kend - offset; + if (!write) + memset(val, keepout->value, ksize); + + val += ksize; + offset += ksize; + keepout++; + } + + /* + * If we ran out of keepouts but there's still stuff to do, send it + * down directly + */ + if (offset < end) { + ksize = end - offset; + if (write) + return __nvmem_reg_write(nvmem, offset, val, ksize); + else + return __nvmem_reg_read(nvmem, offset, val, ksize); + } + + return 0; +} + +static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, + void *val, size_t bytes) +{ + if (!nvmem->nkeepout) + return __nvmem_reg_read(nvmem, offset, val, bytes); + + return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false); +} + +static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, + void *val, size_t bytes) +{ + if (!nvmem->nkeepout) + return __nvmem_reg_write(nvmem, offset, val, bytes); + + return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true); +} + #ifdef CONFIG_NVMEM_SYSFS static const char * const nvmem_type_str[] = { [NVMEM_TYPE_UNKNOWN] = "Unknown", @@ -533,6 +617,59 @@ nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) return cell; } +static int nvmem_validate_keepouts(struct nvmem_device *nvmem) +{ + unsigned int cur = 0; + const struct nvmem_keepout *keepout = nvmem->keepout; + const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout; + + while (keepout < keepoutend) { + /* Ensure keepouts are sorted and don't overlap. */ + if (keepout->start < cur) { + dev_err(&nvmem->dev, + "Keepout regions aren't sorted or overlap.\n"); + + return -ERANGE; + } + + if (keepout->end < keepout->start) { + dev_err(&nvmem->dev, + "Invalid keepout region.\n"); + + return -EINVAL; + } + + /* + * Validate keepouts (and holes between) don't violate + * word_size constraints. + */ + if ((keepout->end - keepout->start < nvmem->word_size) || + ((keepout->start != cur) && + (keepout->start - cur < nvmem->word_size))) { + + dev_err(&nvmem->dev, + "Keepout regions violate word_size constraints.\n"); + + return -ERANGE; + } + + /* Validate keepouts don't violate stride (alignment). */ + if (!IS_ALIGNED(keepout->start, nvmem->stride) || + !IS_ALIGNED(keepout->end, nvmem->stride)) { + + dev_err(&nvmem->dev, + "Keepout regions violate stride.\n"); + + return -EINVAL; + } + + cur = keepout->end; + keepout++; + } + + return 0; +} + static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) { struct device_node *parent, *child; @@ -647,6 +784,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) nvmem->type = config->type; nvmem->reg_read = config->reg_read; nvmem->reg_write = config->reg_write; + nvmem->keepout = config->keepout; + nvmem->nkeepout = config->nkeepout; if (!config->no_of_node) nvmem->dev.of_node = config->dev->of_node; @@ -671,6 +810,12 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) nvmem->dev.groups = nvmem_dev_groups; #endif + if (nvmem->nkeepout) { + rval = nvmem_validate_keepouts(nvmem); + if (rval) + goto err_put_device; + } + dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); rval = device_register(&nvmem->dev); diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 06409a6c40bc..e162b757b6d5 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -30,6 +30,19 @@ enum nvmem_type { #define NVMEM_DEVID_NONE (-1) #define NVMEM_DEVID_AUTO (-2) +/** + * struct nvmem_keepout - NVMEM register keepout range. + * + * @start: The first byte offset to avoid. + * @end: One beyond the last byte offset to avoid. + * @value: The byte to fill reads with for this region. + */ +struct nvmem_keepout { + unsigned int start; + unsigned int end; + unsigned char value; +}; + /** * struct nvmem_config - NVMEM device configuration * @@ -39,6 +52,8 @@ enum nvmem_type { * @owner: Pointer to exporter module. Used for refcounting. * @cells: Optional array of pre-defined NVMEM cells. * @ncells: Number of elements in cells. + * @keepout: Optional array of keepout ranges (sorted ascending by start). + * @nkeepout: Number of elements in the keepout array. * @type: Type of the nvmem storage * @read_only: Device is read-only. * @root_only: Device is accessibly to root only. @@ -66,6 +81,8 @@ struct nvmem_config { struct gpio_desc *wp_gpio; const struct nvmem_cell_info *cells; int ncells; + const struct nvmem_keepout *keepout; + unsigned int nkeepout; enum nvmem_type type; bool read_only; bool root_only; -- cgit v1.2.3 From 044ee8f85267599a9b0112911f5c16d4548b4289 Mon Sep 17 00:00:00 2001 From: Evan Green Date: Fri, 27 Nov 2020 10:28:36 +0000 Subject: nvmem: qfprom: Don't touch certain fuses Some fuse ranges are protected by the XPU such that the AP cannot access them. Attempting to do so causes an SError. Use the newly introduced per-soc compatible string, and the newly introduced nvmem keepout support to attach the set of regions we should not access. Reviewed-by: Douglas Anderson Signed-off-by: Evan Green Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20201127102837.19366-5-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/nvmem/qfprom.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'drivers') diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c index 5e9e60e2e591..6cace24dfbf7 100644 --- a/drivers/nvmem/qfprom.c +++ b/drivers/nvmem/qfprom.c @@ -12,6 +12,7 @@ #include #include #include +#include #include /* Blow timer clock frequency in Mhz */ @@ -88,6 +89,28 @@ struct qfprom_touched_values { u32 timer_val; }; +/** + * struct qfprom_soc_compatible_data - Data matched against the SoC + * compatible string. + * + * @keepout: Array of keepout regions for this SoC. + * @nkeepout: Number of elements in the keepout array. + */ +struct qfprom_soc_compatible_data { + const struct nvmem_keepout *keepout; + unsigned int nkeepout; +}; + +static const struct nvmem_keepout sc7180_qfprom_keepout[] = { + {.start = 0x128, .end = 0x148}, + {.start = 0x220, .end = 0x228} +}; + +static const struct qfprom_soc_compatible_data sc7180_qfprom = { + .keepout = sc7180_qfprom_keepout, + .nkeepout = ARRAY_SIZE(sc7180_qfprom_keepout) +}; + /** * qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing. * @priv: Our driver data. @@ -281,6 +304,7 @@ static int qfprom_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct resource *res; struct nvmem_device *nvmem; + const struct qfprom_soc_compatible_data *soc_data; struct qfprom_priv *priv; int ret; @@ -299,6 +323,11 @@ static int qfprom_probe(struct platform_device *pdev) econfig.priv = priv; priv->dev = dev; + soc_data = device_get_match_data(dev); + if (soc_data) { + econfig.keepout = soc_data->keepout; + econfig.nkeepout = soc_data->nkeepout; + } /* * If more than one region is provided then the OS has the ability @@ -354,6 +383,7 @@ static int qfprom_probe(struct platform_device *pdev) static const struct of_device_id qfprom_of_match[] = { { .compatible = "qcom,qfprom",}, + { .compatible = "qcom,sc7180-qfprom", .data = &sc7180_qfprom}, {/* sentinel */}, }; MODULE_DEVICE_TABLE(of, qfprom_of_match); -- cgit v1.2.3 From 3311bf18467272388039922a5e29c4925b291f73 Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Fri, 27 Nov 2020 10:28:37 +0000 Subject: nvmem: imx-ocotp: add support for the unaliged word count When offset is not 4 bytes aligned, directly shift righty by 2 bits will cause reading out wrong data. Since imx ocotp only supports 4 bytes reading once, we need handle offset is not 4 bytes aligned and enlarge the bytes to 4 bytes aligned. After reading finished, copy the needed data from buffer to caller and free buffer. Signed-off-by: Peng Fan Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20201127102837.19366-6-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/nvmem/imx-ocotp.c | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c index 7a1ebd6fd08b..08f41328cc71 100644 --- a/drivers/nvmem/imx-ocotp.c +++ b/drivers/nvmem/imx-ocotp.c @@ -4,6 +4,8 @@ * * Copyright (c) 2015 Pengutronix, Philipp Zabel * + * Copyright 2019 NXP + * * Based on the barebox ocotp driver, * Copyright (c) 2010 Baruch Siach , * Orex Computed Radiography @@ -158,22 +160,30 @@ static int imx_ocotp_read(void *context, unsigned int offset, { struct ocotp_priv *priv = context; unsigned int count; - u32 *buf = val; + u8 *buf, *p; int i, ret; - u32 index; + u32 index, num_bytes; index = offset >> 2; - count = bytes >> 2; + num_bytes = round_up((offset % 4) + bytes, 4); + count = num_bytes >> 2; if (count > (priv->params->nregs - index)) count = priv->params->nregs - index; + p = kzalloc(num_bytes, GFP_KERNEL); + if (!p) + return -ENOMEM; + mutex_lock(&ocotp_mutex); + buf = p; + ret = clk_prepare_enable(priv->clk); if (ret < 0) { mutex_unlock(&ocotp_mutex); dev_err(priv->dev, "failed to prepare/enable ocotp clk\n"); + kfree(p); return ret; } @@ -184,7 +194,7 @@ static int imx_ocotp_read(void *context, unsigned int offset, } for (i = index; i < (index + count); i++) { - *buf++ = readl(priv->base + IMX_OCOTP_OFFSET_B0W0 + + *(u32 *)buf = readl(priv->base + IMX_OCOTP_OFFSET_B0W0 + i * IMX_OCOTP_OFFSET_PER_WORD); /* 47.3.1.2 @@ -193,13 +203,21 @@ static int imx_ocotp_read(void *context, unsigned int offset, * software before any new write, read or reload access can be * issued */ - if (*(buf - 1) == IMX_OCOTP_READ_LOCKED_VAL) + if (*((u32 *)buf) == IMX_OCOTP_READ_LOCKED_VAL) imx_ocotp_clr_err_if_set(priv); + + buf += 4; } + index = offset % 4; + memcpy(val, &p[index], bytes); + read_end: clk_disable_unprepare(priv->clk); mutex_unlock(&ocotp_mutex); + + kfree(p); + return ret; } @@ -447,7 +465,7 @@ static struct nvmem_config imx_ocotp_nvmem_config = { .name = "imx-ocotp", .read_only = false, .word_size = 4, - .stride = 4, + .stride = 1, .reg_read = imx_ocotp_read, .reg_write = imx_ocotp_write, }; -- cgit v1.2.3 From ac0f82b1b4956e348a6b2de8104308144ffb6ef7 Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Fri, 27 Nov 2020 10:52:42 -0700 Subject: coresight: etm4x: Skip setting LPOVERRIDE bit for qcom, skip-power-up There is a bug on the systems supporting to skip power up (qcom,skip-power-up) where setting LPOVERRIDE bit(low-power state override behaviour) will result in CPU hangs/lockups even on the implementations which supports it. So skip setting the LPOVERRIDE bit for such platforms. Fixes: 02510a5aa78d ("coresight: etm4x: Add support to skip trace unit power up") Cc: stable@vger.kernel.org Reviewed-by: Suzuki K Poulose Signed-off-by: Sai Prakash Ranjan Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-2-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-etm4x-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index abd706b216ac..6096d7abf80d 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -779,7 +779,7 @@ static void etm4_init_arch_data(void *info) * LPOVERRIDE, bit[23] implementation supports * low-power state override */ - if (BMVAL(etmidr5, 23, 23)) + if (BMVAL(etmidr5, 23, 23) && (!drvdata->skip_power_up)) drvdata->lpoverride = true; else drvdata->lpoverride = false; -- cgit v1.2.3 From 3ac1e0069b2c4fafce1317b389e7d1cfac69a039 Mon Sep 17 00:00:00 2001 From: Zou Wei Date: Fri, 27 Nov 2020 10:52:43 -0700 Subject: coresight: core: Remove unneeded semicolon Fixes coccicheck warning: ./drivers/hwtracing/coresight/coresight-core.c:421:4-5: Unneeded semicolon Reported-by: Hulk Robot Signed-off-by: Zou Wei Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-3-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index cc9e8025c533..29c83eac3106 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -418,7 +418,7 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode) if (ret) { coresight_control_assoc_ectdev(csdev, false); return ret; - }; + } } csdev->enable = true; } -- cgit v1.2.3 From 93dd64404cbe63b0afba371acd8db36e84b286c7 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Fri, 27 Nov 2020 10:52:44 -0700 Subject: coresight: etm4x: Fix accesses to TRCVMIDCTLR1 TRCVMIDCTRL1 is only implemented only if the TRCIDR4.NUMVMIDC > 4. We must not touch the register otherwise. Cc: stable@vger.kernel.org Cc: Mathieu Poirier Cc: Mike Leach Signed-off-by: Suzuki K Poulose Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-4-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-etm4x-core.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 6096d7abf80d..e67365d1ce28 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -193,7 +193,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) writeq_relaxed(config->vmid_val[i], drvdata->base + TRCVMIDCVRn(i)); writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0); - writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1); + if (drvdata->numvmidc > 4) + writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1); if (!drvdata->skip_power_up) { /* @@ -1243,7 +1244,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1); state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0); - state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1); + if (drvdata->numvmidc > 4) + state->trcvmidcctlr1 = readl(drvdata->base + TRCVMIDCCTLR1); state->trcclaimset = readl(drvdata->base + TRCCLAIMCLR); @@ -1353,7 +1355,8 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1); writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0); - writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1); + if (drvdata->numvmidc > 4) + writel_relaxed(state->trcvmidcctlr1, drvdata->base + TRCVMIDCCTLR1); writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET); -- cgit v1.2.3 From f2603b22e3d2dcffd8b0736e5c68df497af6bc84 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Fri, 27 Nov 2020 10:52:45 -0700 Subject: coresight: etm4x: Fix accesses to TRCCIDCTLR1 The TRCCIDCTLR1 is only implemented if TRCIDR4.NUMCIDC > 4. Don't touch the register if it is not implemented. Cc: stable@vger.kernel.org Cc: Mathieu Poirier Cc: Mike Leach Signed-off-by: Suzuki K Poulose Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-5-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-etm4x-core.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index e67365d1ce28..af0ab2f44865 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -187,7 +187,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) writeq_relaxed(config->ctxid_pid[i], drvdata->base + TRCCIDCVRn(i)); writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0); - writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1); + if (drvdata->numcidc > 4) + writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1); for (i = 0; i < drvdata->numvmidc; i++) writeq_relaxed(config->vmid_val[i], @@ -1241,7 +1242,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trcvmidcvr[i] = readq(drvdata->base + TRCVMIDCVRn(i)); state->trccidcctlr0 = readl(drvdata->base + TRCCIDCCTLR0); - state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1); + if (drvdata->numcidc > 4) + state->trccidcctlr1 = readl(drvdata->base + TRCCIDCCTLR1); state->trcvmidcctlr0 = readl(drvdata->base + TRCVMIDCCTLR0); if (drvdata->numvmidc > 4) @@ -1352,7 +1354,8 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) drvdata->base + TRCVMIDCVRn(i)); writel_relaxed(state->trccidcctlr0, drvdata->base + TRCCIDCCTLR0); - writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1); + if (drvdata->numcidc > 4) + writel_relaxed(state->trccidcctlr1, drvdata->base + TRCCIDCCTLR1); writel_relaxed(state->trcvmidcctlr0, drvdata->base + TRCVMIDCCTLR0); if (drvdata->numvmidc > 4) -- cgit v1.2.3 From 4e2187274fe8f3ddf55870e7e6b1aa7d65ba12eb Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Fri, 27 Nov 2020 10:52:46 -0700 Subject: coresight: etm4x: Update TRCIDR3.NUMPROCS handling to match v4.2 Since ETMv4.2, TRCIDR3.NUMPROCS has been extended to a 5bit field by encoding the top 2 bits[4:3] in TRCIDR3.[13:12], which were RES0. Fix the driver to compute the field correctly for ETMv4.2+ Cc: Mike Leach Cc: Mathieu Poirier Signed-off-by: Suzuki K Poulose Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-6-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-etm4x-core.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index af0ab2f44865..cbbe755d1d16 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -724,8 +724,13 @@ static void etm4_init_arch_data(void *info) else drvdata->sysstall = false; - /* NUMPROC, bits[30:28] the number of PEs available for tracing */ - drvdata->nr_pe = BMVAL(etmidr3, 28, 30); + /* + * NUMPROC - the number of PEs available for tracing, 5bits + * = TRCIDR3.bits[13:12]bits[30:28] + * bits[4:3] = TRCIDR3.bits[13:12] (since etm-v4.2, otherwise RES0) + * bits[3:0] = TRCIDR3.bits[30:28] + */ + drvdata->nr_pe = (BMVAL(etmidr3, 12, 13) << 3) | BMVAL(etmidr3, 28, 30); /* NOOVERFLOW, bit[31] is trace overflow prevention supported */ if (BMVAL(etmidr3, 31, 31)) -- cgit v1.2.3 From 6288b4ceca868eac4bf729532f8d845e3ecbed98 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Fri, 27 Nov 2020 10:52:47 -0700 Subject: coresight: etm4x: Fix accesses to TRCPROCSELR TRCPROCSELR is not implemented if the TRCIDR3.NUMPROC == 0. Skip accessing the register in such cases. Cc: stable@vger.kernel.org Cc: Mathieu Poirier Cc: Mike Leach Signed-off-by: Suzuki K Poulose Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-7-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-etm4x-core.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index cbbe755d1d16..28dd278f6d47 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -124,8 +124,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) if (coresight_timeout(drvdata->base, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); - - writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR); + if (drvdata->nr_pe) + writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR); writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR); /* nothing specific implemented */ writel_relaxed(0x0, drvdata->base + TRCAUXCTLR); @@ -1185,7 +1185,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata) state = drvdata->save_state; state->trcprgctlr = readl(drvdata->base + TRCPRGCTLR); - state->trcprocselr = readl(drvdata->base + TRCPROCSELR); + if (drvdata->nr_pe) + state->trcprocselr = readl(drvdata->base + TRCPROCSELR); state->trcconfigr = readl(drvdata->base + TRCCONFIGR); state->trcauxctlr = readl(drvdata->base + TRCAUXCTLR); state->trceventctl0r = readl(drvdata->base + TRCEVENTCTL0R); @@ -1292,7 +1293,8 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) writel_relaxed(state->trcclaimset, drvdata->base + TRCCLAIMSET); writel_relaxed(state->trcprgctlr, drvdata->base + TRCPRGCTLR); - writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR); + if (drvdata->nr_pe) + writel_relaxed(state->trcprocselr, drvdata->base + TRCPROCSELR); writel_relaxed(state->trcconfigr, drvdata->base + TRCCONFIGR); writel_relaxed(state->trcauxctlr, drvdata->base + TRCAUXCTLR); writel_relaxed(state->trceventctl0r, drvdata->base + TRCEVENTCTL0R); -- cgit v1.2.3 From 60c519c5d3629c21ba356782434d5b612d312de4 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Fri, 27 Nov 2020 10:52:48 -0700 Subject: coresight: etm4x: Handle TRCVIPCSSCTLR accesses TRCVIPCSSCTLR is not present if the TRCIDR4.NUMPC > 0. Thus we should only access the register if it is present, preventing any undesired behavior. Cc: stable@vger.kernel.org Signed-off-by: Suzuki K Poulose Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-8-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-etm4x-core.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 28dd278f6d47..d78a37b6592c 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -141,8 +141,9 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR); writel_relaxed(config->vissctlr, drvdata->base + TRCVISSCTLR); - writel_relaxed(config->vipcssctlr, - drvdata->base + TRCVIPCSSCTLR); + if (drvdata->nr_pe_cmp) + writel_relaxed(config->vipcssctlr, + drvdata->base + TRCVIPCSSCTLR); for (i = 0; i < drvdata->nrseqstate - 1; i++) writel_relaxed(config->seq_ctrl[i], drvdata->base + TRCSEQEVRn(i)); @@ -1202,7 +1203,8 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trcvictlr = readl(drvdata->base + TRCVICTLR); state->trcviiectlr = readl(drvdata->base + TRCVIIECTLR); state->trcvissctlr = readl(drvdata->base + TRCVISSCTLR); - state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR); + if (drvdata->nr_pe_cmp) + state->trcvipcssctlr = readl(drvdata->base + TRCVIPCSSCTLR); state->trcvdctlr = readl(drvdata->base + TRCVDCTLR); state->trcvdsacctlr = readl(drvdata->base + TRCVDSACCTLR); state->trcvdarcctlr = readl(drvdata->base + TRCVDARCCTLR); @@ -1310,7 +1312,8 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) writel_relaxed(state->trcvictlr, drvdata->base + TRCVICTLR); writel_relaxed(state->trcviiectlr, drvdata->base + TRCVIIECTLR); writel_relaxed(state->trcvissctlr, drvdata->base + TRCVISSCTLR); - writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR); + if (drvdata->nr_pe_cmp) + writel_relaxed(state->trcvipcssctlr, drvdata->base + TRCVIPCSSCTLR); writel_relaxed(state->trcvdctlr, drvdata->base + TRCVDCTLR); writel_relaxed(state->trcvdsacctlr, drvdata->base + TRCVDSACCTLR); writel_relaxed(state->trcvdarcctlr, drvdata->base + TRCVDARCCTLR); -- cgit v1.2.3 From a4ecf0e047c66382de1463d2e47d5683b934e008 Mon Sep 17 00:00:00 2001 From: Qi Liu Date: Fri, 27 Nov 2020 10:52:49 -0700 Subject: coresight: Remove unnecessary THIS_MODULE of funnel and replicator driver As THIS_MODULE has been set in platform_driver_register(), so remove it from static funnel driver and static replicator driver to avoid set it twice. Signed-off-by: Qi Liu Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-9-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-funnel.c | 2 +- drivers/hwtracing/coresight/coresight-replicator.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index af40814ce560..39be46b74dfe 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -356,7 +356,7 @@ static struct platform_driver static_funnel_driver = { .remove = static_funnel_remove, .driver = { .name = "coresight-static-funnel", - .owner = THIS_MODULE, + /* THIS_MODULE is taken care of by platform_driver_register() */ .of_match_table = static_funnel_match, .acpi_match_table = ACPI_PTR(static_funnel_ids), .pm = &funnel_dev_pm_ops, diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 62afdde0e5ea..6772f23e5c4b 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -374,7 +374,7 @@ static struct platform_driver static_replicator_driver = { .remove = static_replicator_remove, .driver = { .name = "coresight-static-replicator", - .owner = THIS_MODULE, + /* THIS_MODULE is taken care of by platform_driver_register() */ .of_match_table = of_match_ptr(static_replicator_match), .acpi_match_table = ACPI_PTR(static_replicator_acpi_ids), .pm = &replicator_dev_pm_ops, -- cgit v1.2.3 From 868663dd5d69fef05bfb004f91da5c30e9b93461 Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Fri, 27 Nov 2020 10:52:50 -0700 Subject: coresight: tmc-etf: Fix NULL ptr dereference in tmc_enable_etf_sink_perf() There was a report of NULL pointer dereference in ETF enable path for perf CS mode with PID monitoring. It is almost 100% reproducible when the process to monitor is something very active such as chrome and with ETF as the sink and not ETR. Currently in a bid to find the pid, the owner is dereferenced via task_pid_nr() call in tmc_enable_etf_sink_perf() and with owner being NULL, we get a NULL pointer dereference. Looking at the ETR and other places in the kernel, ETF and the ETB are the only places trying to dereference the task(owner) in tmc_enable_etf_sink_perf() which is also called from the sched_in path as in the call trace. Owner(task) is NULL even in the case of ETR in tmc_enable_etr_sink_perf(), but since we cache the PID in alloc_buffer() callback and it is done as part of etm_setup_aux() when allocating buffer for ETR sink, we never dereference this NULL pointer and we are safe. So lets do the same thing with ETF and cache the PID to which the cs_buffer belongs in tmc_alloc_etf_buffer() as done for ETR. This will also remove the unnecessary function calls(task_pid_nr()) since we are caching the PID. Easily reproducible running below: perf record -e cs_etm/@tmc_etf0/ -N -p Unable to handle kernel NULL pointer dereference at virtual address 0000000000000548 Mem abort info: ESR = 0x96000006 EC = 0x25: DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000006 CM = 0, WnR = 0 ... Call trace: tmc_enable_etf_sink+0xe4/0x280 coresight_enable_path+0x168/0x1fc etm_event_start+0x8c/0xf8 etm_event_add+0x38/0x54 event_sched_in+0x194/0x2ac group_sched_in+0x54/0x12c flexible_sched_in+0xd8/0x120 visit_groups_merge+0x100/0x16c ctx_flexible_sched_in+0x50/0x74 ctx_sched_in+0xa4/0xa8 perf_event_sched_in+0x60/0x6c perf_event_context_sched_in+0x98/0xe0 __perf_event_task_sched_in+0x5c/0xd8 finish_task_switch+0x184/0x1cc schedule_tail+0x20/0xec ret_from_fork+0x4/0x18 Fixes: 880af782c6e8 ("coresight: tmc-etf: Add support for CPU-wide trace scenarios") Cc: stable@vger.kernel.org Signed-off-by: Sai Prakash Ranjan Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-10-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-priv.h | 2 ++ drivers/hwtracing/coresight/coresight-tmc-etf.c | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 65a29293b6cb..f5f654ea2994 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -87,6 +87,7 @@ enum cs_mode { * struct cs_buffer - keep track of a recording session' specifics * @cur: index of the current buffer * @nr_pages: max number of pages granted to us + * @pid: PID this cs_buffer belongs to * @offset: offset within the current buffer * @data_size: how much we collected in this run * @snapshot: is this run in snapshot mode @@ -95,6 +96,7 @@ enum cs_mode { struct cs_buffers { unsigned int cur; unsigned int nr_pages; + pid_t pid; unsigned long offset; local_t data_size; bool snapshot; diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c index 44402d413ebb..989d965f3d90 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etf.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c @@ -227,6 +227,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data) unsigned long flags; struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct perf_output_handle *handle = data; + struct cs_buffers *buf = etm_perf_sink_config(handle); spin_lock_irqsave(&drvdata->spinlock, flags); do { @@ -243,7 +244,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data) } /* Get a handle on the pid of the process to monitor */ - pid = task_pid_nr(handle->event->owner); + pid = buf->pid; if (drvdata->pid != -1 && drvdata->pid != pid) { ret = -EBUSY; @@ -399,6 +400,7 @@ static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, if (!buf) return NULL; + buf->pid = task_pid_nr(event->owner); buf->snapshot = overwrite; buf->nr_pages = nr_pages; buf->data_pages = pages; -- cgit v1.2.3 From 22b2beaa7f166f550424cbb3b988aeaa7ef0425a Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Fri, 27 Nov 2020 10:52:51 -0700 Subject: coresight: etb10: Fix possible NULL ptr dereference in etb_enable_perf() There was a report of NULL pointer dereference in ETF enable path for perf CS mode with PID monitoring. It is almost 100% reproducible when the process to monitor is something very active such as chrome and with ETF as the sink, not ETR. But code path shows that ETB has a similar path as ETF, so there could be possible NULL pointer dereference crash in ETB as well. Currently in a bid to find the pid, the owner is dereferenced via task_pid_nr() call in etb_enable_perf() and with owner being NULL, we can get a NULL pointer dereference, so have a similar fix as ETF where we cache PID in alloc_buffer() callback which is called as the part of etm_setup_aux(). Fixes: 75d7dbd38824 ("coresight: etb10: Add support for CPU-wide trace scenarios") Cc: stable@vger.kernel.org Signed-off-by: Sai Prakash Ranjan Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-11-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-etb10.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 248cc82c838e..1b320ab581ca 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -176,6 +176,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data) unsigned long flags; struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); struct perf_output_handle *handle = data; + struct cs_buffers *buf = etm_perf_sink_config(handle); spin_lock_irqsave(&drvdata->spinlock, flags); @@ -186,7 +187,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data) } /* Get a handle on the pid of the process to monitor */ - pid = task_pid_nr(handle->event->owner); + pid = buf->pid; if (drvdata->pid != -1 && drvdata->pid != pid) { ret = -EBUSY; @@ -383,6 +384,7 @@ static void *etb_alloc_buffer(struct coresight_device *csdev, if (!buf) return NULL; + buf->pid = task_pid_nr(event->owner); buf->snapshot = overwrite; buf->nr_pages = nr_pages; buf->data_pages = pages; -- cgit v1.2.3 From 92815c0041ffd16799b408ff9fce9bae697e2bf8 Mon Sep 17 00:00:00 2001 From: Kaixu Xia Date: Fri, 27 Nov 2020 10:52:52 -0700 Subject: coresight: tmc-etr: Assign boolean values to a bool variable Fix the following coccinelle warnings: ./drivers/hwtracing/coresight/coresight-tmc-etr.c:957:2-15: WARNING: Assignment of 0/1 to bool variable Fix them by assigning boolean values. Reported-by: Tosk Robot Signed-off-by: Kaixu Xia Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-12-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-tmc-etr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 714f9e867e5f..525f0ecc129c 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -954,11 +954,11 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata) dev_dbg(&drvdata->csdev->dev, "tmc memory error detected, truncating buffer\n"); etr_buf->len = 0; - etr_buf->full = 0; + etr_buf->full = false; return; } - etr_buf->full = status & TMC_STS_FULL; + etr_buf->full = !!(status & TMC_STS_FULL); WARN_ON(!etr_buf->ops || !etr_buf->ops->sync); -- cgit v1.2.3 From 1cc573d5754e92372a7e30e35468644f8811e1a4 Mon Sep 17 00:00:00 2001 From: Mao Jinlong Date: Fri, 27 Nov 2020 10:52:53 -0700 Subject: coresight: tmc-etr: Check if page is valid before dma_map_page() alloc_pages_node() return should be checked before calling dma_map_page() to make sure that valid page is mapped or else it can lead to aborts as below: Unable to handle kernel paging request at virtual address ffffffc008000000 Mem abort info: ... pc : __dma_inv_area+0x40/0x58 lr : dma_direct_map_page+0xd8/0x1c8 Call trace: __dma_inv_area tmc_pages_alloc tmc_alloc_data_pages tmc_alloc_sg_table tmc_init_etr_sg_table tmc_alloc_etr_buf tmc_enable_etr_sink_sysfs tmc_enable_etr_sink coresight_enable_path coresight_enable enable_source_store dev_attr_store sysfs_kf_write Fixes: 99443ea19e8b ("coresight: Add generic TMC sg table framework") Cc: stable@vger.kernel.org Reviewed-by: Suzuki K Poulose Signed-off-by: Mao Jinlong Signed-off-by: Sai Prakash Ranjan Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-13-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-tmc-etr.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 525f0ecc129c..a31a4d7ae25e 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -217,6 +217,8 @@ static int tmc_pages_alloc(struct tmc_pages *tmc_pages, } else { page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); + if (!page) + goto err; } paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir); if (dma_mapping_error(real_dev, paddr)) -- cgit v1.2.3 From 56a9ecd2a26d7c4997e7b4355a5559e3adc23ac6 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Fri, 27 Nov 2020 10:52:54 -0700 Subject: coresight: Fix W=1 warnings in core framework CC drivers/hwtracing/coresight/coresight-etm4x-sysfs.o CC drivers/hwtracing/coresight/coresight-stm.o drivers/hwtracing/coresight/coresight-core.c:440: warning: Function parameter or member 'csdev' not described in 'coresight_disable_source' drivers/hwtracing/coresight/coresight-core.c:670: warning: Function parameter or member 'csdev' not described in 'coresight_get_ref' drivers/hwtracing/coresight/coresight-core.c:687: warning: Function parameter or member 'csdev' not described in 'coresight_put_ref' drivers/hwtracing/coresight/coresight-core.c:758: warning: Function parameter or member 'sink' not described in '_coresight_build_path' CC drivers/hwtracing/coresight/coresight-cpu-debug.o CC drivers/hwtracing/coresight/coresight-catu.o Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-14-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-core.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 29c83eac3106..4ba801dffcb7 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -432,7 +432,7 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode) * coresight_disable_source - Drop the reference count by 1 and disable * the device if there are no users left. * - * @csdev - The coresight device to disable + * @csdev: The coresight device to disable * * Returns true if the device has been disabled. */ @@ -663,6 +663,9 @@ struct coresight_device *coresight_get_sink_by_id(u32 id) /** * coresight_get_ref- Helper function to increase reference count to module * and device. + * + * @csdev: The coresight device to get a reference on. + * * Return true in successful case and power up the device. * Return false when failed to get reference of module. */ @@ -682,6 +685,8 @@ static inline bool coresight_get_ref(struct coresight_device *csdev) /** * coresight_put_ref- Helper function to decrease reference count to module * and device. Power off the device. + * + * @csdev: The coresight device to decrement a reference from. */ static inline void coresight_put_ref(struct coresight_device *csdev) { @@ -744,6 +749,7 @@ static void coresight_drop_device(struct coresight_device *csdev) /** * _coresight_build_path - recursively build a path from a @csdev to a sink. * @csdev: The device to start from. + * @sink: The final sink we want in this path. * @path: The list to add devices to. * * The tree of Coresight device is traversed until an activated sink is -- cgit v1.2.3 From ea966a4a5fa59d9dc57f19427e3ba2b70766860b Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Fri, 27 Nov 2020 10:52:55 -0700 Subject: coresight-tpiu: Fix W=1 warning in TPIU driver CC drivers/hwtracing/coresight/coresight-etm4x-core.o CC drivers/hwtracing/coresight/coresight-etm4x-sysfs.o CC drivers/hwtracing/coresight/coresight-stm.o drivers/hwtracing/coresight/coresight-tpiu.c:53: warning: Cannot understand * @base: memory mapped base address for this component. on line 53 - I thought it was a doc line CC drivers/hwtracing/coresight/coresight-cpu-debug.o CC drivers/hwtracing/coresight/coresight-catu.o Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-15-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-tpiu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 566c57e03596..010762a46087 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -49,7 +49,7 @@ DEFINE_CORESIGHT_DEVLIST(tpiu_devs, "tpiu"); -/** +/* * @base: memory mapped base address for this component. * @atclk: optional clock for the core parts of the TPIU. * @csdev: component vitals needed by the framework. -- cgit v1.2.3 From f80c21509984190b7fd462ed2c9ac5a294a99dc5 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Fri, 27 Nov 2020 10:52:56 -0700 Subject: coresight-stm: Fix W=1 warning in STM driver CC drivers/hwtracing/coresight/coresight-cti-core.o CC drivers/hwtracing/coresight/coresight-cti-platform.o CC drivers/hwtracing/coresight/coresight-cti-sysfs.o drivers/hwtracing/coresight/coresight-stm.c:109: warning: Function parameter or member 'guaranteed' not described in 'channel_space' Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201127175256.1092685-16-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-stm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index b0ad912651a9..32d29704206b 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -96,7 +96,7 @@ module_param_named( boot_nr_channel, boot_nr_channel, int, S_IRUGO ); -/** +/* * struct channel_space - central management entity for extended ports * @base: memory mapped base address where channels start. * @phys: physical base address of channel region. -- cgit v1.2.3 From 206e7383b34316cf56cdde96eab9d97e9a1dbd70 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Thu, 26 Nov 2020 11:20:35 +0100 Subject: bus: mhi: core: Indexed MHI controller name Today the MHI controller name is simply cloned from the underlying bus device (its parent), that gives the following device structure for e.g. a MHI/PCI controller: devices/pci0000:00/0000:00:01.2/0000:02:00.0/0000:02:00.0 devices/pci0000:00/0000:00:01.2/0000:02:00.0/0000:02:00.0/0000:02:00.0_IPCR ... That's quite misleading/confusing and can cause device registering issues because of duplicate dev name (e.g. if a PCI device register two different MHI instances). This patch changes MHI core to create indexed mhi controller names (mhi0, mhi1...) in the same way as other busses (i2c0, usb0...). The previous example becomes: devices/pci0000:00/0000:00:01.2/0000:02:00.0/mhi0 devices/pci0000:00/0000:00:01.2/0000:02:00.0/mhi0/mhi0_IPCR ... v2: move index field at the end of mhi_controller struct (before bool) to avoid breaking well packed alignment. Signed-off-by: Loic Poulain Reviewed-by: Jeffrey Hugo Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/init.c | 18 ++++++++++++++++-- drivers/bus/mhi/core/main.c | 2 +- include/linux/mhi.h | 2 ++ 3 files changed, 19 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 655d539c6808..1d6f7b6c1fcd 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -18,6 +19,8 @@ #include #include "internal.h" +static DEFINE_IDA(mhi_controller_ida); + const char * const mhi_ee_str[MHI_EE_MAX] = { [MHI_EE_PBL] = "PBL", [MHI_EE_SBL] = "SBL", @@ -940,6 +943,12 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >> SOC_HW_VERSION_MINOR_VER_SHFT; + mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); + if (mhi_cntrl->index < 0) { + ret = mhi_cntrl->index; + goto error_ida_alloc; + } + /* Register controller with MHI bus */ mhi_dev = mhi_alloc_device(mhi_cntrl); if (IS_ERR(mhi_dev)) { @@ -950,8 +959,8 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; mhi_dev->mhi_cntrl = mhi_cntrl; - dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev)); - mhi_dev->name = dev_name(mhi_cntrl->cntrl_dev); + dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index); + mhi_dev->name = dev_name(&mhi_dev->dev); /* Init wakeup source */ device_init_wakeup(&mhi_dev->dev, true); @@ -970,6 +979,9 @@ error_add_dev: put_device(&mhi_dev->dev); error_alloc_dev: + ida_free(&mhi_controller_ida, mhi_cntrl->index); + +error_ida_alloc: kfree(mhi_cntrl->mhi_cmd); error_alloc_cmd: @@ -1004,6 +1016,8 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) device_del(&mhi_dev->dev); put_device(&mhi_dev->dev); + + ida_free(&mhi_controller_ida, mhi_cntrl->index); } EXPORT_SYMBOL_GPL(mhi_unregister_controller); diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c index 4eb93d8bea1d..702c31b6aefa 100644 --- a/drivers/bus/mhi/core/main.c +++ b/drivers/bus/mhi/core/main.c @@ -331,7 +331,7 @@ void mhi_create_devices(struct mhi_controller *mhi_cntrl) /* Channel name is same for both UL and DL */ mhi_dev->name = mhi_chan->name; dev_set_name(&mhi_dev->dev, "%s_%s", - dev_name(mhi_cntrl->cntrl_dev), + dev_name(&mhi_cntrl->mhi_dev->dev), mhi_dev->name); /* Init wakeup source if available */ diff --git a/include/linux/mhi.h b/include/linux/mhi.h index d31efcf02ae7..aa9757e71f1f 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -348,6 +348,7 @@ struct mhi_controller_config { * @read_reg: Read a MHI register via the physical link (required) * @write_reg: Write a MHI register via the physical link (required) * @buffer_len: Bounce buffer length + * @index: Index of the MHI controller instance * @bounce_buf: Use of bounce buffer * @fbc_download: MHI host needs to do complete image transfer (optional) * @pre_init: MHI host needs to do pre-initialization before power up @@ -438,6 +439,7 @@ struct mhi_controller { u32 val); size_t buffer_len; + int index; bool bounce_buf; bool fbc_download; bool pre_init; -- cgit v1.2.3 From 10ea8bcda5ae5463889e15dfc98aa2a73270ea58 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Wed, 25 Nov 2020 14:24:49 +0100 Subject: bus: mhi: core: Fix device hierarchy This patch fixes the hierarchical structure of MHI devices. Indeed, MHI client devices are directly 'enumerated' from the mhi controller and therefore must be direct descendants/children of their mhi controller device, in accordance with the Linux Device Model. Today both MHI clients and controller devices are at the same level, this patch ensures that MHI controller is parent of its client devices. The hierarchy is especially important for power management (safe suspend/resume order). It is also useful for userspace to determine relationship between MHI client devices and controllers. Signed-off-by: Loic Poulain Reviewed-by: Bjorn Andersson Reviewed-by: Manivannan Sadhasivam Reviewed-by: Hemant Kumar Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/debugfs.c | 4 +++- drivers/bus/mhi/core/init.c | 10 +++++++++- drivers/bus/mhi/core/pm.c | 4 ++-- 3 files changed, 14 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/core/debugfs.c index 3a48801e01f4..7d43138ce66d 100644 --- a/drivers/bus/mhi/core/debugfs.c +++ b/drivers/bus/mhi/core/debugfs.c @@ -159,7 +159,9 @@ static int mhi_debugfs_devices_show(struct seq_file *m, void *d) return -ENODEV; } - device_for_each_child(mhi_cntrl->cntrl_dev, m, mhi_device_info_show); + /* Show controller and client(s) info */ + mhi_device_info_show(&mhi_cntrl->mhi_dev->dev, m); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, m, mhi_device_info_show); return 0; } diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 1d6f7b6c1fcd..96cde9c0034c 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -1144,7 +1144,15 @@ struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) device_initialize(dev); dev->bus = &mhi_bus_type; dev->release = mhi_release_device; - dev->parent = mhi_cntrl->cntrl_dev; + + if (mhi_cntrl->mhi_dev) { + /* for MHI client devices, parent is the MHI controller device */ + dev->parent = &mhi_cntrl->mhi_dev->dev; + } else { + /* for MHI controller device, parent is the bus device (e.g. pci device) */ + dev->parent = mhi_cntrl->cntrl_dev; + } + mhi_dev->mhi_cntrl = mhi_cntrl; mhi_dev->dev_wake = 0; diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c index a671f585ce35..681960c72d2a 100644 --- a/drivers/bus/mhi/core/pm.c +++ b/drivers/bus/mhi/core/pm.c @@ -504,7 +504,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) wake_up_all(&mhi_cntrl->state_event); dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); - device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); mutex_lock(&mhi_cntrl->pm_mutex); @@ -637,7 +637,7 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl) wake_up_all(&mhi_cntrl->state_event); dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); - device_for_each_child(mhi_cntrl->cntrl_dev, NULL, mhi_destroy_device); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); mutex_lock(&mhi_cntrl->pm_mutex); -- cgit v1.2.3 From 23c15ae615175637ae4449aa6e64c0584474ddaa Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Wed, 30 Sep 2020 14:25:55 +0300 Subject: habanalabs: change aggregate cs counters to atomic In case we will have multiple contexts/processes, we can't just increment aggregated counters. We need to make them atomic as they can be incremented by multiple processes Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 42 +++++++++++----------- drivers/misc/habanalabs/common/habanalabs.h | 17 ++++++++- drivers/misc/habanalabs/common/habanalabs_ioctl.c | 18 ++++++++-- drivers/misc/habanalabs/common/hw_queue.c | 7 +++- 4 files changed, 59 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index b2b974ecc431..9d49dd1558af 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -242,20 +242,6 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) kfree(job); } -static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx) -{ - hdev->aggregated_cs_counters.device_in_reset_drop_cnt += - ctx->cs_counters.device_in_reset_drop_cnt; - hdev->aggregated_cs_counters.out_of_mem_drop_cnt += - ctx->cs_counters.out_of_mem_drop_cnt; - hdev->aggregated_cs_counters.parsing_drop_cnt += - ctx->cs_counters.parsing_drop_cnt; - hdev->aggregated_cs_counters.queue_full_drop_cnt += - ctx->cs_counters.queue_full_drop_cnt; - hdev->aggregated_cs_counters.max_cs_in_flight_drop_cnt += - ctx->cs_counters.max_cs_in_flight_drop_cnt; -} - static void cs_do_release(struct kref *ref) { struct hl_cs *cs = container_of(ref, struct hl_cs, @@ -358,7 +344,6 @@ static void cs_do_release(struct kref *ref) complete_all(&cs->fence->completion); hl_fence_put(cs->fence); - cs_counters_aggregate(hdev, cs->ctx); kfree(cs->jobs_in_queue_cnt); kfree(cs); @@ -397,11 +382,14 @@ static void cs_timedout(struct work_struct *work) static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, enum hl_cs_type cs_type, struct hl_cs **cs_new) { - struct hl_cs_compl *cs_cmpl; + struct hl_cs_counters_atomic *cntr; struct hl_fence *other = NULL; + struct hl_cs_compl *cs_cmpl; struct hl_cs *cs; int rc; + cntr = &hdev->aggregated_cs_counters; + cs = kzalloc(sizeof(*cs), GFP_ATOMIC); if (!cs) return -ENOMEM; @@ -436,6 +424,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, dev_dbg_ratelimited(hdev->dev, "Rejecting CS because of too many in-flights CS\n"); ctx->cs_counters.max_cs_in_flight_drop_cnt++; + atomic64_inc(&cntr->max_cs_in_flight_drop_cnt); rc = -EAGAIN; goto free_fence; } @@ -610,6 +599,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, { struct hl_device *hdev = hpriv->hdev; struct hl_cs_chunk *cs_chunk_array; + struct hl_cs_counters_atomic *cntr; struct hl_cs_job *job; struct hl_cs *cs; struct hl_cb *cb; @@ -617,6 +607,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, u32 size_to_copy; int rc, i; + cntr = &hdev->aggregated_cs_counters; *cs_seq = ULLONG_MAX; if (num_chunks > HL_MAX_JOBS_PER_CS) { @@ -664,6 +655,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, &is_kernel_allocated_cb); if (rc) { hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&cntr->parsing_drop_cnt); goto free_cs_object; } @@ -671,6 +663,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk); if (!cb) { hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&cntr->parsing_drop_cnt); rc = -EINVAL; goto free_cs_object; } @@ -685,6 +678,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, is_kernel_allocated_cb); if (!job) { hpriv->ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc(&cntr->out_of_mem_drop_cnt); dev_err(hdev->dev, "Failed to allocate a new job\n"); rc = -ENOMEM; if (is_kernel_allocated_cb) @@ -718,6 +712,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, rc = cs_parser(hpriv, job); if (rc) { hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&cntr->parsing_drop_cnt); dev_err(hdev->dev, "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", cs->ctx->asid, cs->sequence, job->id, rc); @@ -727,6 +722,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, if (int_queues_only) { hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&cntr->parsing_drop_cnt); dev_err(hdev->dev, "Reject CS %d.%llu because only internal queues jobs are present\n", cs->ctx->asid, cs->sequence); @@ -768,20 +764,22 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, void __user *chunks, u32 num_chunks, u64 *cs_seq) { - struct hl_device *hdev = hpriv->hdev; - struct hl_ctx *ctx = hpriv->ctx; + u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size; struct hl_cs_chunk *cs_chunk_array, *chunk; struct hw_queue_properties *hw_queue_prop; + u64 *signal_seq_arr = NULL, signal_seq; + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_counters_atomic *cntr; struct hl_fence *sig_fence = NULL; + struct hl_ctx *ctx = hpriv->ctx; + enum hl_queue_type q_type; struct hl_cs_job *job; struct hl_cs *cs; struct hl_cb *cb; - enum hl_queue_type q_type; - u64 *signal_seq_arr = NULL, signal_seq; - u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size; int rc; *cs_seq = ULLONG_MAX; + cntr = &hdev->aggregated_cs_counters; if (num_chunks > HL_MAX_JOBS_PER_CS) { dev_err(hdev->dev, @@ -920,6 +918,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, job = hl_cs_allocate_job(hdev, q_type, true); if (!job) { ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc(&cntr->out_of_mem_drop_cnt); dev_err(hdev->dev, "Failed to allocate a new job\n"); rc = -ENOMEM; goto put_cs; @@ -934,6 +933,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, q_type == QUEUE_TYPE_HW && hdev->mmu_enable); if (!cb) { ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc(&cntr->out_of_mem_drop_cnt); kfree(job); rc = -EFAULT; goto put_cs; diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 6ed974d2def0..f493fb2c871c 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1067,6 +1067,21 @@ struct hl_cs_parser { u8 contains_dma_pkt; }; +/** + * struct hl_info_cs_counters - command submission counters + * @out_of_mem_drop_cnt: dropped due to memory allocation issue + * @parsing_drop_cnt: dropped due to error in packet parsing + * @queue_full_drop_cnt: dropped due to queue full + * @device_in_reset_drop_cnt: dropped due to device in reset + * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight + */ +struct hl_cs_counters_atomic { + atomic64_t out_of_mem_drop_cnt; + atomic64_t parsing_drop_cnt; + atomic64_t queue_full_drop_cnt; + atomic64_t device_in_reset_drop_cnt; + atomic64_t max_cs_in_flight_drop_cnt; +}; /* * MEMORY STRUCTURE @@ -1649,7 +1664,7 @@ struct hl_device { struct hl_device_idle_busy_ts *idle_busy_ts_arr; - struct hl_cs_counters aggregated_cs_counters; + struct hl_cs_counters_atomic aggregated_cs_counters; struct hl_mmu_priv mmu_priv; struct hl_mmu_funcs mmu_func; diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 07317ea49129..350a768309bd 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -314,10 +314,13 @@ static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args) static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) { - struct hl_device *hdev = hpriv->hdev; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; struct hl_info_cs_counters cs_counters = { {0} }; + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_counters_atomic *cntr; u32 max_size = args->return_size; - void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + cntr = &hdev->aggregated_cs_counters; if ((!max_size) || (!out)) return -EINVAL; @@ -325,6 +328,17 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) memcpy(&cs_counters.cs_counters, &hdev->aggregated_cs_counters, sizeof(struct hl_cs_counters)); + cs_counters.cs_counters.out_of_mem_drop_cnt = + atomic64_read(&cntr->out_of_mem_drop_cnt); + cs_counters.cs_counters.parsing_drop_cnt = + atomic64_read(&cntr->parsing_drop_cnt); + cs_counters.cs_counters.queue_full_drop_cnt = + atomic64_read(&cntr->queue_full_drop_cnt); + cs_counters.cs_counters.device_in_reset_drop_cnt = + atomic64_read(&cntr->device_in_reset_drop_cnt); + cs_counters.cs_counters.max_cs_in_flight_drop_cnt = + atomic64_read(&cntr->max_cs_in_flight_drop_cnt); + if (hpriv->ctx) memcpy(&cs_counters.ctx_cs_counters, &hpriv->ctx->cs_counters, sizeof(struct hl_cs_counters)); diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index 250cf9cefc06..f36e8a98affd 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -484,17 +484,21 @@ static void init_signal_wait_cs(struct hl_cs *cs) */ int hl_hw_queue_schedule_cs(struct hl_cs *cs) { + struct hl_cs_counters_atomic *cntr; struct hl_ctx *ctx = cs->ctx; struct hl_device *hdev = ctx->hdev; struct hl_cs_job *job, *tmp; struct hl_hw_queue *q; - u32 max_queues; int rc = 0, i, cq_cnt; + u32 max_queues; + + cntr = &hdev->aggregated_cs_counters; hdev->asic_funcs->hw_queues_lock(hdev); if (hl_device_disabled_or_in_reset(hdev)) { ctx->cs_counters.device_in_reset_drop_cnt++; + atomic64_inc(&cntr->device_in_reset_drop_cnt); dev_err(hdev->dev, "device is disabled or in reset, CS rejected!\n"); rc = -EPERM; @@ -528,6 +532,7 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) if (rc) { ctx->cs_counters.queue_full_drop_cnt++; + atomic64_inc(&cntr->queue_full_drop_cnt); goto unroll_cq_resv; } -- cgit v1.2.3 From 20b7525dc430aa97748c5ddaff9f791ce1022690 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Wed, 30 Sep 2020 15:51:10 +0300 Subject: habanalabs/gaudi: move mmu_prepare to context init Currently mmu_prepare is located at context switch. Since we support a single context, no reason to reconfigure the MMU registers every context switch. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 7ea6b4368a91..ac0b4e076148 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -4500,8 +4500,6 @@ static int gaudi_context_switch(struct hl_device *hdev, u32 asid) return rc; } - gaudi_mmu_prepare(hdev, asid); - gaudi_restore_user_registers(hdev); return 0; @@ -6353,6 +6351,8 @@ static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev) static int gaudi_ctx_init(struct hl_ctx *ctx) { + gaudi_mmu_prepare(ctx->hdev, ctx->asid); + return 0; } -- cgit v1.2.3 From 28958207e9187da6171f3a418464a47216e0f3e2 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Thu, 1 Oct 2020 13:44:22 +0300 Subject: habanalabs: we need CPU queues for hwmon F/W can be loaded but device CPU queues disabled. In that case, HWMON should be disabled. This is only relevant when debugging Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/hwmon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c index 2ac29cb2fe61..892a5e2b0b9d 100644 --- a/drivers/misc/habanalabs/common/hwmon.c +++ b/drivers/misc/habanalabs/common/hwmon.c @@ -542,7 +542,7 @@ int hl_hwmon_init(struct hl_device *hdev) struct asic_fixed_properties *prop = &hdev->asic_prop; int rc; - if ((hdev->hwmon_initialized) || !(hdev->fw_loading)) + if ((hdev->hwmon_initialized) || !(hdev->cpu_queues_enable)) return 0; if (hdev->hl_chip_info->info) { -- cgit v1.2.3 From 596553dbf9b2354c87eca5d96d96c09eb84b80e4 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Thu, 1 Oct 2020 13:46:37 +0300 Subject: habanalabs: support multiple types of firmwares The driver now loads the firmware in two stages. For debugging purposes we need to support situations where only the first stage firmware is loaded. Therefore, use a bitmask to determine which F/W is loaded Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 9 +++++++-- drivers/misc/habanalabs/common/habanalabs.h | 14 +++++++++++++- drivers/misc/habanalabs/common/habanalabs_drv.c | 1 + 3 files changed, 21 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index cd41c7ceb0e7..1340afa8ce3b 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -574,6 +574,9 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, u32 status; int rc; + if (!(hdev->fw_loading & FW_TYPE_BOOT_CPU)) + return 0; + dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n", cpu_timeout / USEC_PER_SEC); @@ -631,6 +634,8 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, 10000, cpu_timeout); + dev_dbg(hdev->dev, "uboot status = %d\n", status); + /* Read U-Boot version now in case we will later fail */ hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT); @@ -640,8 +645,8 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, goto out; } - if (!hdev->fw_loading) { - dev_info(hdev->dev, "Skip loading FW\n"); + if (!(hdev->fw_loading & FW_TYPE_LINUX)) { + dev_info(hdev->dev, "Skip loading Linux F/W\n"); goto out; } diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index f493fb2c871c..4ab5c28b14c8 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -124,6 +124,18 @@ enum hl_fw_component { FW_COMP_PREBOOT }; +/** + * enum hl_fw_types - F/W types to load + * @FW_TYPE_LINUX: Linux image for device CPU + * @FW_TYPE_BOOT_CPU: Boot image for device CPU + * @FW_TYPE_ALL_TYPES: Mask for all types + */ +enum hl_fw_types { + FW_TYPE_LINUX = 0x1, + FW_TYPE_BOOT_CPU = 0x2, + FW_TYPE_ALL_TYPES = (FW_TYPE_LINUX | FW_TYPE_BOOT_CPU) +}; + /** * enum hl_queue_type - Supported QUEUE types. * @QUEUE_TYPE_NA: queue is not available. @@ -1709,12 +1721,12 @@ struct hl_device { u8 supports_cb_mapping; /* Parameters for bring-up */ + u64 fw_loading; u8 mmu_enable; u8 mmu_huge_page_opt; u8 cpu_enable; u8 reset_pcilink; u8 cpu_queues_enable; - u8 fw_loading; u8 pldm; u8 axi_drain; u8 sram_scrambler_enable; diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c index f9067d3ef437..4c49d6cefa98 100644 --- a/drivers/misc/habanalabs/common/habanalabs_drv.c +++ b/drivers/misc/habanalabs/common/habanalabs_drv.c @@ -241,6 +241,7 @@ static void set_driver_behavior_per_device(struct hl_device *hdev) hdev->dram_scrambler_enable = 1; hdev->bmc_enable = 1; hdev->hard_reset_on_fw_events = 1; + hdev->fw_loading = FW_TYPE_ALL_TYPES; } /* -- cgit v1.2.3 From 8f503146746b07ede1d0ceef728332e6bb50329d Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Sat, 3 Oct 2020 00:14:27 +0300 Subject: habanalabs: minimize prints when everything is fine No need to print when the driver starts to initialize the H/W. Drivers should be silent when everything is OK. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 2 -- drivers/misc/habanalabs/goya/goya.c | 2 -- 2 files changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index ac0b4e076148..817f14c6b858 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -2946,8 +2946,6 @@ static int gaudi_hw_init(struct hl_device *hdev) { int rc; - dev_info(hdev->dev, "Starting initialization of H/W\n"); - gaudi_pre_hw_init(hdev); gaudi_init_pci_dma_qmans(hdev); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 235d47b2420f..5b5e18629917 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -2505,8 +2505,6 @@ static int goya_hw_init(struct hl_device *hdev) struct asic_fixed_properties *prop = &hdev->asic_prop; int rc; - dev_info(hdev->dev, "Starting initialization of H/W\n"); - /* Perform read from the device to make sure device is up */ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); -- cgit v1.2.3 From f3a965c25075285f4b7af48d8613610a403231b6 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Sun, 4 Oct 2020 23:00:39 +0300 Subject: habanalabs: don't init vm module if no MMU In case we are running without MMU enabled (debug mode), no need to initialize the VM module in the driver. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/memory.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 84227819e4d1..75dd18771868 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -1685,27 +1685,19 @@ int hl_vm_ctx_init(struct hl_ctx *ctx) * In case of DRAM mapping, the returned address is the physical * address of the memory related to the given handle. */ - if (ctx->hdev->mmu_enable) { - dram_range_start = prop->dmmu.start_addr; - dram_range_end = prop->dmmu.end_addr; - host_range_start = prop->pmmu.start_addr; - host_range_end = prop->pmmu.end_addr; - host_huge_range_start = prop->pmmu_huge.start_addr; - host_huge_range_end = prop->pmmu_huge.end_addr; - } else { - dram_range_start = prop->dram_user_base_address; - dram_range_end = prop->dram_end_address; - host_range_start = prop->dram_user_base_address; - host_range_end = prop->dram_end_address; - host_huge_range_start = prop->dram_user_base_address; - host_huge_range_end = prop->dram_end_address; - } + if (!ctx->hdev->mmu_enable) + return 0; + + dram_range_start = prop->dmmu.start_addr; + dram_range_end = prop->dmmu.end_addr; + host_range_start = prop->pmmu.start_addr; + host_range_end = prop->pmmu.end_addr; + host_huge_range_start = prop->pmmu_huge.start_addr; + host_huge_range_end = prop->pmmu_huge.end_addr; return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end, - host_huge_range_start, - host_huge_range_end, - dram_range_start, - dram_range_end); + host_huge_range_start, host_huge_range_end, + dram_range_start, dram_range_end); } /* @@ -1737,6 +1729,9 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) struct hlist_node *tmp_node; int i; + if (!ctx->hdev->mmu_enable) + return; + hl_debugfs_remove_ctx_mem_hash(hdev, ctx); /* -- cgit v1.2.3 From 3cf74b3656a24bd4d3862354ec2c8f205ff464f0 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 10 Sep 2020 09:17:50 +0300 Subject: habanalabs: sync stream structures refactor Refactor sync stream implementation by adding more structures for better readability. In addition reducing allocated resources. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs.h | 78 +++++++---- drivers/misc/habanalabs/common/hw_queue.c | 197 ++++++++++++++++------------ drivers/misc/habanalabs/gaudi/gaudi.c | 37 +++--- drivers/misc/habanalabs/goya/goya.c | 4 +- 4 files changed, 180 insertions(+), 136 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 4ab5c28b14c8..0f128154bfa2 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -68,9 +68,6 @@ #define HL_RSVD_SOBS 4 #define HL_RSVD_MONS 2 -#define HL_RSVD_SOBS_IN_USE 2 -#define HL_RSVD_MONS_IN_USE 1 - #define HL_MAX_SOB_VAL (1 << 15) #define IS_POWER_OF_2(n) (n != 0 && ((n & (n - 1)) == 0)) @@ -80,6 +77,22 @@ #define HL_MAX_DCORES 4 +/** + * struct hl_gen_wait_properties - properties for generating a wait CB + * @data: command buffer + * @q_idx: queue id is used to extract fence register address + * @sob_id: SOB id to use in this wait CB + * @sob_val: SOB value to wait for + * @mon_id: monitor to use in this wait CB + */ +struct hl_gen_wait_properties { + void *data; + u32 q_idx; + u16 sob_id; + u16 sob_val; + u16 mon_id; +}; + /** * struct pgt_info - MMU hop page info. * @node: hash linked-list node for the pgts shadow hash of pgts. @@ -502,9 +515,27 @@ struct hl_cs_job; #define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M /** - * struct hl_hw_queue - describes a H/W transport queue. + * struct hl_sync_stream_properties - + * describes a H/W queue sync stream properties * @hw_sob: array of the used H/W SOBs by this H/W queue. + * @next_sob_val: the next value to use for the currently used SOB. + * @base_sob_id: the base SOB id of the SOBs used by this queue. + * @base_mon_id: the base MON id of the MONs used by this queue. + * @curr_sob_offset: the id offset to the currently used SOB from the + * HL_RSVD_SOBS that are being used by this queue. + */ +struct hl_sync_stream_properties { + struct hl_hw_sob hw_sob[HL_RSVD_SOBS]; + u16 next_sob_val; + u16 base_sob_id; + u16 base_mon_id; + u8 curr_sob_offset; +}; + +/** + * struct hl_hw_queue - describes a H/W transport queue. * @shadow_queue: pointer to a shadow queue that holds pointers to jobs. + * @sync_stream_prop: sync stream queue properties * @queue_type: type of queue. * @kernel_address: holds the queue's kernel virtual address. * @bus_address: holds the queue's DMA address. @@ -514,33 +545,24 @@ struct hl_cs_job; * @cq_id: the id for the corresponding CQ for this H/W queue. * @msi_vec: the IRQ number of the H/W queue. * @int_queue_len: length of internal queue (number of entries). - * @next_sob_val: the next value to use for the currently used SOB. - * @base_sob_id: the base SOB id of the SOBs used by this queue. - * @base_mon_id: the base MON id of the MONs used by this queue. * @valid: is the queue valid (we have array of 32 queues, not all of them * exist). - * @curr_sob_offset: the id offset to the currently used SOB from the - * HL_RSVD_SOBS that are being used by this queue. * @supports_sync_stream: True if queue supports sync stream */ struct hl_hw_queue { - struct hl_hw_sob hw_sob[HL_RSVD_SOBS]; - struct hl_cs_job **shadow_queue; - enum hl_queue_type queue_type; - void *kernel_address; - dma_addr_t bus_address; - u32 pi; - atomic_t ci; - u32 hw_queue_id; - u32 cq_id; - u32 msi_vec; - u16 int_queue_len; - u16 next_sob_val; - u16 base_sob_id; - u16 base_mon_id; - u8 valid; - u8 curr_sob_offset; - u8 supports_sync_stream; + struct hl_cs_job **shadow_queue; + struct hl_sync_stream_properties sync_stream_prop; + enum hl_queue_type queue_type; + void *kernel_address; + dma_addr_t bus_address; + u32 pi; + atomic_t ci; + u32 hw_queue_id; + u32 cq_id; + u32 msi_vec; + u16 int_queue_len; + u8 valid; + u8 supports_sync_stream; }; /** @@ -823,8 +845,8 @@ struct hl_asic_funcs { u32 (*get_signal_cb_size)(struct hl_device *hdev); u32 (*get_wait_cb_size)(struct hl_device *hdev); void (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id); - void (*gen_wait_cb)(struct hl_device *hdev, void *data, u16 sob_id, - u16 sob_val, u16 mon_id, u32 q_idx); + void (*gen_wait_cb)(struct hl_device *hdev, + struct hl_gen_wait_properties *prop); void (*reset_sob)(struct hl_device *hdev, void *data); void (*set_dma_mask_from_fw)(struct hl_device *hdev); u64 (*get_device_time)(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index f36e8a98affd..f2d094063159 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -388,6 +388,89 @@ static void hw_queue_schedule_job(struct hl_cs_job *job) ext_and_hw_queue_submit_bd(hdev, q, ctl, len, ptr); } +static void init_signal_cs(struct hl_device *hdev, + struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl) +{ + struct hl_sync_stream_properties *prop; + struct hl_hw_sob *hw_sob; + u32 q_idx; + + q_idx = job->hw_queue_id; + prop = &hdev->kernel_queues[q_idx].sync_stream_prop; + hw_sob = &prop->hw_sob[prop->curr_sob_offset]; + + cs_cmpl->hw_sob = hw_sob; + cs_cmpl->sob_val = prop->next_sob_val++; + + dev_dbg(hdev->dev, + "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n", + cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx); + + hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb, + cs_cmpl->hw_sob->sob_id); + + kref_get(&hw_sob->kref); + + /* check for wraparound */ + if (prop->next_sob_val == HL_MAX_SOB_VAL) { + /* + * Decrement as we reached the max value. + * The release function won't be called here as we've + * just incremented the refcount. + */ + kref_put(&hw_sob->kref, hl_sob_reset_error); + prop->next_sob_val = 1; + /* only two SOBs are currently in use */ + prop->curr_sob_offset = + (prop->curr_sob_offset + 1) % HL_RSVD_SOBS; + + dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n", + prop->curr_sob_offset, q_idx); + } +} + +static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs, + struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl) +{ + struct hl_cs_compl *signal_cs_cmpl; + struct hl_sync_stream_properties *prop; + struct hl_gen_wait_properties wait_prop; + u32 q_idx; + + q_idx = job->hw_queue_id; + prop = &hdev->kernel_queues[q_idx].sync_stream_prop; + + signal_cs_cmpl = container_of(cs->signal_fence, + struct hl_cs_compl, + base_fence); + + /* copy the SOB id and value of the signal CS */ + cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; + cs_cmpl->sob_val = signal_cs_cmpl->sob_val; + + dev_dbg(hdev->dev, + "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n", + cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, + prop->base_mon_id, q_idx); + + wait_prop.data = (void *) job->patched_cb; + wait_prop.sob_id = cs_cmpl->hw_sob->sob_id; + wait_prop.sob_val = cs_cmpl->sob_val; + wait_prop.mon_id = prop->base_mon_id; + wait_prop.q_idx = q_idx; + hdev->asic_funcs->gen_wait_cb(hdev, &wait_prop); + + kref_get(&cs_cmpl->hw_sob->kref); + /* + * Must put the signal fence after the SOB refcnt increment so + * the SOB refcnt won't turn 0 and reset the SOB before the + * wait CS was submitted. + */ + mb(); + hl_fence_put(cs->signal_fence); + cs->signal_fence = NULL; +} + /* * init_signal_wait_cs - initialize a signal/wait CS * @cs: pointer to the signal/wait CS @@ -398,84 +481,18 @@ static void init_signal_wait_cs(struct hl_cs *cs) { struct hl_ctx *ctx = cs->ctx; struct hl_device *hdev = ctx->hdev; - struct hl_hw_queue *hw_queue; + struct hl_cs_job *job; struct hl_cs_compl *cs_cmpl = container_of(cs->fence, struct hl_cs_compl, base_fence); - struct hl_hw_sob *hw_sob; - struct hl_cs_job *job; - u32 q_idx; - /* There is only one job in a signal/wait CS */ job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node); - q_idx = job->hw_queue_id; - hw_queue = &hdev->kernel_queues[q_idx]; - - if (cs->type & CS_TYPE_SIGNAL) { - hw_sob = &hw_queue->hw_sob[hw_queue->curr_sob_offset]; - - cs_cmpl->hw_sob = hw_sob; - cs_cmpl->sob_val = hw_queue->next_sob_val++; - - dev_dbg(hdev->dev, - "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n", - cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx); - - hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb, - cs_cmpl->hw_sob->sob_id); - - kref_get(&hw_sob->kref); - - /* check for wraparound */ - if (hw_queue->next_sob_val == HL_MAX_SOB_VAL) { - /* - * Decrement as we reached the max value. - * The release function won't be called here as we've - * just incremented the refcount. - */ - kref_put(&hw_sob->kref, hl_sob_reset_error); - hw_queue->next_sob_val = 1; - /* only two SOBs are currently in use */ - hw_queue->curr_sob_offset = - (hw_queue->curr_sob_offset + 1) % - HL_RSVD_SOBS_IN_USE; - - dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n", - hw_queue->curr_sob_offset, q_idx); - } - } else if (cs->type & CS_TYPE_WAIT) { - struct hl_cs_compl *signal_cs_cmpl; - - signal_cs_cmpl = container_of(cs->signal_fence, - struct hl_cs_compl, - base_fence); - - /* copy the the SOB id and value of the signal CS */ - cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; - cs_cmpl->sob_val = signal_cs_cmpl->sob_val; - dev_dbg(hdev->dev, - "generate wait CB, sob_id: %d, sob_val: 0x%x, mon_id: %d, q_idx: %d\n", - cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, - hw_queue->base_mon_id, q_idx); - - hdev->asic_funcs->gen_wait_cb(hdev, job->patched_cb, - cs_cmpl->hw_sob->sob_id, - cs_cmpl->sob_val, - hw_queue->base_mon_id, - q_idx); - - kref_get(&cs_cmpl->hw_sob->kref); - /* - * Must put the signal fence after the SOB refcnt increment so - * the SOB refcnt won't turn 0 and reset the SOB before the - * wait CS was submitted. - */ - mb(); - hl_fence_put(cs->signal_fence); - cs->signal_fence = NULL; - } + if (cs->type & CS_TYPE_SIGNAL) + init_signal_cs(hdev, job, cs_cmpl); + else if (cs->type & CS_TYPE_WAIT) + init_wait_cs(hdev, cs, job, cs_cmpl); } /* @@ -719,22 +736,28 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx) { - struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx]; + struct hl_sync_stream_properties *sync_stream_prop; struct asic_fixed_properties *prop = &hdev->asic_prop; struct hl_hw_sob *hw_sob; - int sob, queue_idx = hdev->sync_stream_queue_idx++; + int sob, queue_idx; + + if (!hdev->kernel_queues[q_idx].supports_sync_stream) + return; + + sync_stream_prop = &hdev->kernel_queues[q_idx].sync_stream_prop; + queue_idx = hdev->sync_stream_queue_idx++; - hw_queue->base_sob_id = - prop->sync_stream_first_sob + queue_idx * HL_RSVD_SOBS; - hw_queue->base_mon_id = - prop->sync_stream_first_mon + queue_idx * HL_RSVD_MONS; - hw_queue->next_sob_val = 1; - hw_queue->curr_sob_offset = 0; + sync_stream_prop->base_sob_id = prop->sync_stream_first_sob + + (queue_idx * HL_RSVD_SOBS); + sync_stream_prop->base_mon_id = prop->sync_stream_first_mon + + (queue_idx * HL_RSVD_MONS); + sync_stream_prop->next_sob_val = 1; + sync_stream_prop->curr_sob_offset = 0; for (sob = 0 ; sob < HL_RSVD_SOBS ; sob++) { - hw_sob = &hw_queue->hw_sob[sob]; + hw_sob = &sync_stream_prop->hw_sob[sob]; hw_sob->hdev = hdev; - hw_sob->sob_id = hw_queue->base_sob_id + sob; + hw_sob->sob_id = sync_stream_prop->base_sob_id + sob; hw_sob->q_idx = q_idx; kref_init(&hw_sob->kref); } @@ -742,15 +765,16 @@ static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx) static void sync_stream_queue_reset(struct hl_device *hdev, u32 q_idx) { - struct hl_hw_queue *hw_queue = &hdev->kernel_queues[q_idx]; + struct hl_sync_stream_properties *prop = + &hdev->kernel_queues[q_idx].sync_stream_prop; /* * In case we got here due to a stuck CS, the refcnt might be bigger * than 1 and therefore we reset it. */ - kref_init(&hw_queue->hw_sob[hw_queue->curr_sob_offset].kref); - hw_queue->curr_sob_offset = 0; - hw_queue->next_sob_val = 1; + kref_init(&prop->hw_sob[prop->curr_sob_offset].kref); + prop->curr_sob_offset = 0; + prop->next_sob_val = 1; } /* @@ -793,8 +817,7 @@ static int queue_init(struct hl_device *hdev, struct hl_hw_queue *q, break; } - if (q->supports_sync_stream) - sync_stream_queue_init(hdev, q->hw_queue_id); + sync_stream_queue_init(hdev, q->hw_queue_id); if (rc) return rc; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 817f14c6b858..d5c6c00ea79f 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -472,9 +472,11 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) prop->max_pending_cs = GAUDI_MAX_PENDING_CS; prop->first_available_user_sob[HL_GAUDI_WS_DCORE] = - num_sync_stream_queues * HL_RSVD_SOBS; + prop->sync_stream_first_sob + + (num_sync_stream_queues * HL_RSVD_SOBS); prop->first_available_user_mon[HL_GAUDI_WS_DCORE] = - num_sync_stream_queues * HL_RSVD_MONS; + prop->sync_stream_first_mon + + (num_sync_stream_queues * HL_RSVD_MONS); return 0; } @@ -6466,16 +6468,16 @@ static u32 gaudi_add_fence_pkt(struct packet_fence *pkt) return pkt_size; } -static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id, - u16 sob_val, u16 mon_id, u32 q_idx) +static void gaudi_gen_wait_cb(struct hl_device *hdev, + struct hl_gen_wait_properties *prop) { - struct hl_cb *cb = (struct hl_cb *) data; + struct hl_cb *cb = (struct hl_cb *) prop->data; void *buf = cb->kernel_address; u64 monitor_base, fence_addr = 0; u32 size = 0; u16 msg_addr_offset; - switch (q_idx) { + switch (prop->q_idx) { case GAUDI_QUEUE_ID_DMA_0_0: fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_0; break; @@ -6515,7 +6517,7 @@ static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id, default: /* queue index should be valid here */ dev_crit(hdev->dev, "wrong queue id %d for wait packet\n", - q_idx); + prop->q_idx); return; } @@ -6528,17 +6530,15 @@ static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id, monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0; /* First monitor config packet: low address of the sync */ - msg_addr_offset = - (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_id * 4) - - monitor_base; + msg_addr_offset = (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + + prop->mon_id * 4) - monitor_base; size += gaudi_add_mon_msg_short(buf + size, (u32) fence_addr, msg_addr_offset); /* Second monitor config packet: high address of the sync */ - msg_addr_offset = - (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_id * 4) - - monitor_base; + msg_addr_offset = (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + + prop->mon_id * 4) - monitor_base; size += gaudi_add_mon_msg_short(buf + size, (u32) (fence_addr >> 32), msg_addr_offset); @@ -6547,18 +6547,17 @@ static void gaudi_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id, * Third monitor config packet: the payload, i.e. what to write when the * sync triggers */ - msg_addr_offset = - (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_id * 4) - - monitor_base; + msg_addr_offset = (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + + prop->mon_id * 4) - monitor_base; size += gaudi_add_mon_msg_short(buf + size, 1, msg_addr_offset); /* Fourth monitor config packet: bind the monitor to a sync object */ msg_addr_offset = - (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) - + (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + prop->mon_id * 4) - monitor_base; - size += gaudi_add_arm_monitor_pkt(buf + size, sob_id, sob_val, - msg_addr_offset); + size += gaudi_add_arm_monitor_pkt(buf + size, prop->sob_id, + prop->sob_val, msg_addr_offset); /* Fence packet */ size += gaudi_add_fence_pkt(buf + size); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 5b5e18629917..c0ab91592744 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5293,8 +5293,8 @@ static void goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id) } -static void goya_gen_wait_cb(struct hl_device *hdev, void *data, u16 sob_id, - u16 sob_val, u16 mon_id, u32 q_idx) +static void goya_gen_wait_cb(struct hl_device *hdev, + struct hl_gen_wait_properties *prop) { } -- cgit v1.2.3 From 2992c1dcd314b8140131298d3ad245de1ba1821b Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 10 Sep 2020 09:40:35 +0300 Subject: habanalabs: add support for multiple SOBs per monitor Support advanced monitor functionality to monitor more than a single SOB. In addition expand all CB generation functions with buffer offset in order to put in them multiple packets that are generated by different functions. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 32 +++++ drivers/misc/habanalabs/common/habanalabs.h | 16 ++- drivers/misc/habanalabs/common/hw_queue.c | 6 +- drivers/misc/habanalabs/gaudi/gaudi.c | 136 ++++++++++++++------- drivers/misc/habanalabs/goya/goya.c | 9 +- 5 files changed, 142 insertions(+), 57 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 9d49dd1558af..0d82c7dd93d0 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -38,6 +38,38 @@ void hl_sob_reset_error(struct kref *ref) hw_sob->q_idx, hw_sob->sob_id); } +/** + * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet + * @sob_base: sob base id + * @sob_mask: sob user mask, each bit represents a sob offset from sob base + * @mask: generated mask + * + * Return: 0 if given parameters are valid + */ +int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask) +{ + int i; + + if (sob_mask == 0) + return -EINVAL; + + if (sob_mask == 0x1) { + *mask = ~(1 << (sob_base & 0x7)); + } else { + /* find msb in order to verify sob range is valid */ + for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--) + if (BIT(i) & sob_mask) + break; + + if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1)) + return -EINVAL; + + *mask = ~sob_mask; + } + + return 0; +} + static void hl_fence_release(struct kref *kref) { struct hl_fence *fence = diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 0f128154bfa2..c61967213f89 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -77,20 +77,26 @@ #define HL_MAX_DCORES 4 +#define HL_MAX_SOBS_PER_MONITOR 8 + /** * struct hl_gen_wait_properties - properties for generating a wait CB * @data: command buffer * @q_idx: queue id is used to extract fence register address - * @sob_id: SOB id to use in this wait CB + * @size: offset in command buffer + * @sob_base: SOB base to use in this wait CB * @sob_val: SOB value to wait for * @mon_id: monitor to use in this wait CB + * @sob_mask: each bit represents a SOB offset from sob_base to be used */ struct hl_gen_wait_properties { void *data; u32 q_idx; - u16 sob_id; + u32 size; + u16 sob_base; u16 sob_val; u16 mon_id; + u8 sob_mask; }; /** @@ -844,8 +850,9 @@ struct hl_asic_funcs { int (*load_boot_fit_to_device)(struct hl_device *hdev); u32 (*get_signal_cb_size)(struct hl_device *hdev); u32 (*get_wait_cb_size)(struct hl_device *hdev); - void (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id); - void (*gen_wait_cb)(struct hl_device *hdev, + u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id, + u32 size); + u32 (*gen_wait_cb)(struct hl_device *hdev, struct hl_gen_wait_properties *prop); void (*reset_sob)(struct hl_device *hdev, void *data); void (*set_dma_mask_from_fw)(struct hl_device *hdev); @@ -1927,6 +1934,7 @@ void hl_cs_rollback_all(struct hl_device *hdev); struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, enum hl_queue_type queue_type, bool is_kernel_allocated_cb); void hl_sob_reset_error(struct kref *ref); +int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask); void hl_fence_put(struct hl_fence *fence); void hl_fence_get(struct hl_fence *fence); diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index f2d094063159..be1d0e2c99d8 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -407,7 +407,7 @@ static void init_signal_cs(struct hl_device *hdev, cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx); hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb, - cs_cmpl->hw_sob->sob_id); + cs_cmpl->hw_sob->sob_id, 0); kref_get(&hw_sob->kref); @@ -454,10 +454,12 @@ static void init_wait_cs(struct hl_device *hdev, struct hl_cs *cs, prop->base_mon_id, q_idx); wait_prop.data = (void *) job->patched_cb; - wait_prop.sob_id = cs_cmpl->hw_sob->sob_id; + wait_prop.sob_base = cs_cmpl->hw_sob->sob_id; + wait_prop.sob_mask = 0x1; wait_prop.sob_val = cs_cmpl->sob_val; wait_prop.mon_id = prop->base_mon_id; wait_prop.q_idx = q_idx; + wait_prop.size = 0; hdev->asic_funcs->gen_wait_cb(hdev, &wait_prop); kref_get(&cs_cmpl->hw_sob->kref); diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index d5c6c00ea79f..40cd561e57f4 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -6374,14 +6374,15 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev) sizeof(struct packet_msg_prot) * 2; } -static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id) +static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, + u32 size) { struct hl_cb *cb = (struct hl_cb *) data; struct packet_msg_short *pkt; - u32 value, ctl; + u32 value, ctl, pkt_size = sizeof(*pkt); - pkt = cb->kernel_address; - memset(pkt, 0, sizeof(*pkt)); + pkt = cb->kernel_address + size; + memset(pkt, 0, pkt_size); /* Inc by 1, Mode ADD */ value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1); @@ -6397,6 +6398,8 @@ static void gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id) pkt->value = cpu_to_le32(value); pkt->ctl = cpu_to_le32(ctl); + + return size + pkt_size; } static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value, @@ -6419,21 +6422,42 @@ static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value, return pkt_size; } -static u32 gaudi_add_arm_monitor_pkt(struct packet_msg_short *pkt, u16 sob_id, - u16 sob_val, u16 addr) +static u32 gaudi_add_arm_monitor_pkt(struct hl_device *hdev, + struct packet_msg_short *pkt, u16 sob_base, u8 sob_mask, + u16 sob_val, u16 mon_id) { + u64 monitor_base; u32 ctl, value, pkt_size = sizeof(*pkt); - u8 mask = ~(1 << (sob_id & 0x7)); + u16 msg_addr_offset; + u8 mask; + + if (hl_gen_sob_mask(sob_base, sob_mask, &mask)) { + dev_err(hdev->dev, + "sob_base %u (mask %#x) is not valid\n", + sob_base, sob_mask); + return 0; + } + + /* + * monitor_base should be the content of the base0 address registers, + * so it will be added to the msg short offsets + */ + monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0; + + msg_addr_offset = + (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) - + monitor_base; memset(pkt, 0, pkt_size); - value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_id / 8); + /* Monitor config packet: bind the monitor to a sync object */ + value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_base / 8); value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val); value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MODE_MASK, 0); /* GREATER OR EQUAL*/ value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MASK_MASK, mask); - ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr); + ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, msg_addr_offset); ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); @@ -6468,60 +6492,61 @@ static u32 gaudi_add_fence_pkt(struct packet_fence *pkt) return pkt_size; } -static void gaudi_gen_wait_cb(struct hl_device *hdev, - struct hl_gen_wait_properties *prop) +static int gaudi_get_fence_addr(struct hl_device *hdev, u32 queue_id, u64 *addr) { - struct hl_cb *cb = (struct hl_cb *) prop->data; - void *buf = cb->kernel_address; - u64 monitor_base, fence_addr = 0; - u32 size = 0; - u16 msg_addr_offset; + u32 offset; - switch (prop->q_idx) { + switch (queue_id) { case GAUDI_QUEUE_ID_DMA_0_0: - fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_0; + offset = mmDMA0_QM_CP_FENCE2_RDATA_0; break; case GAUDI_QUEUE_ID_DMA_0_1: - fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_1; + offset = mmDMA0_QM_CP_FENCE2_RDATA_1; break; case GAUDI_QUEUE_ID_DMA_0_2: - fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_2; + offset = mmDMA0_QM_CP_FENCE2_RDATA_2; break; case GAUDI_QUEUE_ID_DMA_0_3: - fence_addr = mmDMA0_QM_CP_FENCE2_RDATA_3; + offset = mmDMA0_QM_CP_FENCE2_RDATA_3; break; case GAUDI_QUEUE_ID_DMA_1_0: - fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_0; + offset = mmDMA1_QM_CP_FENCE2_RDATA_0; break; case GAUDI_QUEUE_ID_DMA_1_1: - fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_1; + offset = mmDMA1_QM_CP_FENCE2_RDATA_1; break; case GAUDI_QUEUE_ID_DMA_1_2: - fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_2; + offset = mmDMA1_QM_CP_FENCE2_RDATA_2; break; case GAUDI_QUEUE_ID_DMA_1_3: - fence_addr = mmDMA1_QM_CP_FENCE2_RDATA_3; + offset = mmDMA1_QM_CP_FENCE2_RDATA_3; break; case GAUDI_QUEUE_ID_DMA_5_0: - fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_0; + offset = mmDMA5_QM_CP_FENCE2_RDATA_0; break; case GAUDI_QUEUE_ID_DMA_5_1: - fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_1; + offset = mmDMA5_QM_CP_FENCE2_RDATA_1; break; case GAUDI_QUEUE_ID_DMA_5_2: - fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_2; + offset = mmDMA5_QM_CP_FENCE2_RDATA_2; break; case GAUDI_QUEUE_ID_DMA_5_3: - fence_addr = mmDMA5_QM_CP_FENCE2_RDATA_3; + offset = mmDMA5_QM_CP_FENCE2_RDATA_3; break; default: - /* queue index should be valid here */ - dev_crit(hdev->dev, "wrong queue id %d for wait packet\n", - prop->q_idx); - return; + return -EINVAL; } - fence_addr += CFG_BASE; + *addr = CFG_BASE + offset; + + return 0; +} + +static u32 gaudi_add_mon_pkts(void *buf, u16 mon_id, u64 fence_addr) +{ + u64 monitor_base; + u32 size = 0; + u16 msg_addr_offset; /* * monitor_base should be the content of the base0 address registers, @@ -6530,15 +6555,17 @@ static void gaudi_gen_wait_cb(struct hl_device *hdev, monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0; /* First monitor config packet: low address of the sync */ - msg_addr_offset = (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + - prop->mon_id * 4) - monitor_base; + msg_addr_offset = + (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_id * 4) - + monitor_base; size += gaudi_add_mon_msg_short(buf + size, (u32) fence_addr, msg_addr_offset); /* Second monitor config packet: high address of the sync */ - msg_addr_offset = (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + - prop->mon_id * 4) - monitor_base; + msg_addr_offset = + (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_id * 4) - + monitor_base; size += gaudi_add_mon_msg_short(buf + size, (u32) (fence_addr >> 32), msg_addr_offset); @@ -6547,20 +6574,35 @@ static void gaudi_gen_wait_cb(struct hl_device *hdev, * Third monitor config packet: the payload, i.e. what to write when the * sync triggers */ - msg_addr_offset = (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + - prop->mon_id * 4) - monitor_base; + msg_addr_offset = + (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_id * 4) - + monitor_base; size += gaudi_add_mon_msg_short(buf + size, 1, msg_addr_offset); - /* Fourth monitor config packet: bind the monitor to a sync object */ - msg_addr_offset = - (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + prop->mon_id * 4) - - monitor_base; - size += gaudi_add_arm_monitor_pkt(buf + size, prop->sob_id, - prop->sob_val, msg_addr_offset); + return size; +} + +u32 gaudi_gen_wait_cb(struct hl_device *hdev, + struct hl_gen_wait_properties *prop) +{ + struct hl_cb *cb = (struct hl_cb *) prop->data; + void *buf = cb->kernel_address; + u64 fence_addr = 0; + u32 size = prop->size; - /* Fence packet */ + if (gaudi_get_fence_addr(hdev, prop->q_idx, &fence_addr)) { + dev_crit(hdev->dev, "wrong queue id %d for wait packet\n", + prop->q_idx); + return 0; + } + + size += gaudi_add_mon_pkts(buf + size, prop->mon_id, fence_addr); + size += gaudi_add_arm_monitor_pkt(hdev, buf + size, prop->sob_base, + prop->sob_mask, prop->sob_val, prop->mon_id); size += gaudi_add_fence_pkt(buf + size); + + return size; } static void gaudi_reset_sob(struct hl_device *hdev, void *data) diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index c0ab91592744..d873f613acb0 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5288,15 +5288,16 @@ static u32 goya_get_wait_cb_size(struct hl_device *hdev) return 0; } -static void goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id) +static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, + u32 size) { - + return 0; } -static void goya_gen_wait_cb(struct hl_device *hdev, +static u32 goya_gen_wait_cb(struct hl_device *hdev, struct hl_gen_wait_properties *prop) { - + return 0; } static void goya_reset_sob(struct hl_device *hdev, void *data) -- cgit v1.2.3 From 06f791f74fab6bf056013fe723d580d47199fdb3 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 10 Sep 2020 09:43:43 +0300 Subject: habanalabs: sync stream refactor functions Refactor sync stream implementation by reducing function length for better readability. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 210 ++++++++++++--------- 1 file changed, 118 insertions(+), 92 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 0d82c7dd93d0..df6393a98d19 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -792,26 +792,123 @@ out: return rc; } +static int cs_ioctl_extract_signal_seq(struct hl_device *hdev, + struct hl_cs_chunk *chunk, u64 *signal_seq) +{ + u64 *signal_seq_arr = NULL; + u32 size_to_copy, signal_seq_arr_len; + int rc = 0; + + signal_seq_arr_len = chunk->num_signal_seq_arr; + + /* currently only one signal seq is supported */ + if (signal_seq_arr_len != 1) { + dev_err(hdev->dev, + "Wait for signal CS supports only one signal CS seq\n"); + return -EINVAL; + } + + signal_seq_arr = kmalloc_array(signal_seq_arr_len, + sizeof(*signal_seq_arr), + GFP_ATOMIC); + if (!signal_seq_arr) + return -ENOMEM; + + size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr); + if (copy_from_user(signal_seq_arr, + u64_to_user_ptr(chunk->signal_seq_arr), + size_to_copy)) { + dev_err(hdev->dev, + "Failed to copy signal seq array from user\n"); + rc = -EFAULT; + goto out; + } + + /* currently it is guaranteed to have only one signal seq */ + *signal_seq = signal_seq_arr[0]; + +out: + kfree(signal_seq_arr); + + return rc; +} + +static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev, + struct hl_ctx *ctx, struct hl_cs *cs, enum hl_queue_type q_type, + u32 q_idx) +{ + struct hl_cs_counters_atomic *cntr; + struct hl_cs_job *job; + struct hl_cb *cb; + u32 cb_size; + + cntr = &hdev->aggregated_cs_counters; + + job = hl_cs_allocate_job(hdev, q_type, true); + if (!job) { + ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc(&cntr->out_of_mem_drop_cnt); + dev_err(hdev->dev, "Failed to allocate a new job\n"); + return -ENOMEM; + } + + if (cs->type == CS_TYPE_WAIT) + cb_size = hdev->asic_funcs->get_wait_cb_size(hdev); + else + cb_size = hdev->asic_funcs->get_signal_cb_size(hdev); + + cb = hl_cb_kernel_create(hdev, cb_size, + q_type == QUEUE_TYPE_HW && hdev->mmu_enable); + if (!cb) { + ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc(&cntr->out_of_mem_drop_cnt); + kfree(job); + return -EFAULT; + } + + job->id = 0; + job->cs = cs; + job->user_cb = cb; + job->user_cb->cs_cnt++; + job->user_cb_size = cb_size; + job->hw_queue_id = q_idx; + + /* + * No need in parsing, user CB is the patched CB. + * We call hl_cb_destroy() out of two reasons - we don't need the CB in + * the CB idr anymore and to decrement its refcount as it was + * incremented inside hl_cb_kernel_create(). + */ + job->patched_cb = job->user_cb; + job->job_cb_size = job->user_cb_size; + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + cs->jobs_in_queue_cnt[job->hw_queue_id]++; + + list_add_tail(&job->cs_node, &cs->job_list); + + hl_debugfs_add_job(hdev, job); + + return 0; +} + static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, void __user *chunks, u32 num_chunks, u64 *cs_seq) { - u32 size_to_copy, q_idx, signal_seq_arr_len, cb_size; + struct hl_device *hdev = hpriv->hdev; + struct hl_ctx *ctx = hpriv->ctx; struct hl_cs_chunk *cs_chunk_array, *chunk; struct hw_queue_properties *hw_queue_prop; - u64 *signal_seq_arr = NULL, signal_seq; - struct hl_device *hdev = hpriv->hdev; - struct hl_cs_counters_atomic *cntr; struct hl_fence *sig_fence = NULL; - struct hl_ctx *ctx = hpriv->ctx; - enum hl_queue_type q_type; - struct hl_cs_job *job; + struct hl_cs_compl *sig_waitcs_cmpl; struct hl_cs *cs; - struct hl_cb *cb; + enum hl_queue_type q_type; + u32 size_to_copy, q_idx; + u64 signal_seq; int rc; *cs_seq = ULLONG_MAX; - cntr = &hdev->aggregated_cs_counters; if (num_chunks > HL_MAX_JOBS_PER_CS) { dev_err(hdev->dev, @@ -857,52 +954,23 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, } if (cs_type == CS_TYPE_WAIT) { - struct hl_cs_compl *sig_waitcs_cmpl; - - signal_seq_arr_len = chunk->num_signal_seq_arr; - - /* currently only one signal seq is supported */ - if (signal_seq_arr_len != 1) { - dev_err(hdev->dev, - "Wait for signal CS supports only one signal CS seq\n"); - rc = -EINVAL; + rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq); + if (rc) goto free_cs_chunk_array; - } - signal_seq_arr = kmalloc_array(signal_seq_arr_len, - sizeof(*signal_seq_arr), - GFP_ATOMIC); - if (!signal_seq_arr) { - rc = -ENOMEM; - goto free_cs_chunk_array; - } - - size_to_copy = chunk->num_signal_seq_arr * - sizeof(*signal_seq_arr); - if (copy_from_user(signal_seq_arr, - u64_to_user_ptr(chunk->signal_seq_arr), - size_to_copy)) { - dev_err(hdev->dev, - "Failed to copy signal seq array from user\n"); - rc = -EFAULT; - goto free_signal_seq_array; - } - - /* currently it is guaranteed to have only one signal seq */ - signal_seq = signal_seq_arr[0]; sig_fence = hl_ctx_get_fence(ctx, signal_seq); if (IS_ERR(sig_fence)) { dev_err(hdev->dev, "Failed to get signal CS with seq 0x%llx\n", signal_seq); rc = PTR_ERR(sig_fence); - goto free_signal_seq_array; + goto free_cs_chunk_array; } if (!sig_fence) { /* signal CS already finished */ rc = 0; - goto free_signal_seq_array; + goto free_cs_chunk_array; } sig_waitcs_cmpl = @@ -914,14 +982,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, signal_seq); hl_fence_put(sig_fence); rc = -EINVAL; - goto free_signal_seq_array; + goto free_cs_chunk_array; } if (completion_done(&sig_fence->completion)) { /* signal CS already finished */ hl_fence_put(sig_fence); rc = 0; - goto free_signal_seq_array; + goto free_cs_chunk_array; } } @@ -933,70 +1001,31 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, if (cs_type == CS_TYPE_WAIT) hl_fence_put(sig_fence); hl_ctx_put(ctx); - goto free_signal_seq_array; + goto free_cs_chunk_array; } /* * Save the signal CS fence for later initialization right before * hanging the wait CS on the queue. */ - if (cs->type == CS_TYPE_WAIT) + if (cs_type == CS_TYPE_WAIT) cs->signal_fence = sig_fence; hl_debugfs_add_cs(cs); *cs_seq = cs->sequence; - job = hl_cs_allocate_job(hdev, q_type, true); - if (!job) { - ctx->cs_counters.out_of_mem_drop_cnt++; - atomic64_inc(&cntr->out_of_mem_drop_cnt); - dev_err(hdev->dev, "Failed to allocate a new job\n"); - rc = -ENOMEM; - goto put_cs; - } - - if (cs->type == CS_TYPE_WAIT) - cb_size = hdev->asic_funcs->get_wait_cb_size(hdev); - else - cb_size = hdev->asic_funcs->get_signal_cb_size(hdev); + if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL) + rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type, + q_idx); - cb = hl_cb_kernel_create(hdev, cb_size, - q_type == QUEUE_TYPE_HW && hdev->mmu_enable); - if (!cb) { - ctx->cs_counters.out_of_mem_drop_cnt++; - atomic64_inc(&cntr->out_of_mem_drop_cnt); - kfree(job); - rc = -EFAULT; + if (rc) goto put_cs; - } - job->id = 0; - job->cs = cs; - job->user_cb = cb; - job->user_cb->cs_cnt++; - job->user_cb_size = cb_size; - job->hw_queue_id = q_idx; - - /* - * No need in parsing, user CB is the patched CB. - * We call hl_cb_destroy() out of two reasons - we don't need the CB in - * the CB idr anymore and to decrement its refcount as it was - * incremented inside hl_cb_kernel_create(). - */ - job->patched_cb = job->user_cb; - job->job_cb_size = job->user_cb_size; - hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); - - cs->jobs_in_queue_cnt[job->hw_queue_id]++; - - list_add_tail(&job->cs_node, &cs->job_list); /* increment refcount as for external queues we get completion */ cs_get(cs); - hl_debugfs_add_job(hdev, job); - rc = hl_hw_queue_schedule_cs(cs); if (rc) { if (rc != -EAGAIN) @@ -1016,9 +1045,6 @@ free_cs_object: put_cs: /* We finished with the CS in this function, so put the ref */ cs_put(cs); -free_signal_seq_array: - if (cs_type == CS_TYPE_WAIT) - kfree(signal_seq_arr); free_cs_chunk_array: kfree(cs_chunk_array); out: -- cgit v1.2.3 From becce5f9948db2d172334d5fdde35eca4a3171f3 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Mon, 12 Oct 2020 20:56:33 +0300 Subject: habanalabs: remove duplicate check We already check if queue index is smaller than max queues a few lines above this check so no need to check this again. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_submission.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index df6393a98d19..0e37aad85930 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -946,9 +946,10 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, hw_queue_prop = &hdev->asic_prop.hw_queues_props[q_idx]; q_type = hw_queue_prop->type; - if ((q_idx >= hdev->asic_prop.max_queues) || - (!hw_queue_prop->supports_sync_stream)) { - dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx); + if (!hw_queue_prop->supports_sync_stream) { + dev_err(hdev->dev, + "Queue index %d does not support sync stream operations\n", + q_idx); rc = -EINVAL; goto free_cs_chunk_array; } -- cgit v1.2.3 From 16ac36504548e70b78c552ead1cd5e9d6477e0d3 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Mon, 2 Nov 2020 21:00:18 +0200 Subject: habanalabs/gaudi: add NIC QMAN H/W and registers definitions Add auto-generated header files that describe the NIC QMANs registers used by the driver. Signed-off-by: Omer Shpigelman Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../habanalabs/include/gaudi/asic_reg/gaudi_regs.h | 14 +- .../include/gaudi/asic_reg/nic0_qm0_masks.h | 800 ++++++++++++++++++++ .../include/gaudi/asic_reg/nic0_qm0_regs.h | 834 +++++++++++++++++++++ .../include/gaudi/asic_reg/nic0_qm1_regs.h | 834 +++++++++++++++++++++ .../include/gaudi/asic_reg/nic1_qm0_regs.h | 834 +++++++++++++++++++++ .../include/gaudi/asic_reg/nic1_qm1_regs.h | 834 +++++++++++++++++++++ .../include/gaudi/asic_reg/nic2_qm0_regs.h | 834 +++++++++++++++++++++ .../include/gaudi/asic_reg/nic2_qm1_regs.h | 834 +++++++++++++++++++++ .../include/gaudi/asic_reg/nic3_qm0_regs.h | 834 +++++++++++++++++++++ .../include/gaudi/asic_reg/nic3_qm1_regs.h | 834 +++++++++++++++++++++ .../include/gaudi/asic_reg/nic4_qm0_regs.h | 834 +++++++++++++++++++++ .../include/gaudi/asic_reg/nic4_qm1_regs.h | 834 +++++++++++++++++++++ .../misc/habanalabs/include/gaudi/gaudi_masks.h | 15 + 13 files changed, 9168 insertions(+), 1 deletion(-) create mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h create mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h create mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h create mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h create mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h create mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h create mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h create mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h create mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h create mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h create mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h (limited to 'drivers') diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h index f92dc53af074..df21a40691e5 100644 --- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h @@ -89,7 +89,19 @@ #include "tpc0_cfg_masks.h" #include "psoc_global_conf_masks.h" -#include "psoc_pci_pll_regs.h" +#include "nic0_qm0_regs.h" +#include "nic1_qm0_regs.h" +#include "nic2_qm0_regs.h" +#include "nic3_qm0_regs.h" +#include "nic4_qm0_regs.h" +#include "nic0_qm1_regs.h" +#include "nic1_qm1_regs.h" +#include "nic2_qm1_regs.h" +#include "nic3_qm1_regs.h" +#include "nic4_qm1_regs.h" + +#include "nic0_qm0_masks.h" + #include "psoc_hbm_pll_regs.h" #include "psoc_cpu_pll_regs.h" diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h new file mode 100644 index 000000000000..bd37b6452133 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h @@ -0,0 +1,800 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC0_QM0_MASKS_H_ +#define ASIC_REG_NIC0_QM0_MASKS_H_ + +/* + ***************************************** + * NIC0_QM0 (Prototype: QMAN) + ***************************************** + */ + +/* NIC0_QM0_GLBL_CFG0 */ +#define NIC0_QM0_GLBL_CFG0_PQF_EN_SHIFT 0 +#define NIC0_QM0_GLBL_CFG0_PQF_EN_MASK 0xF +#define NIC0_QM0_GLBL_CFG0_CQF_EN_SHIFT 4 +#define NIC0_QM0_GLBL_CFG0_CQF_EN_MASK 0x1F0 +#define NIC0_QM0_GLBL_CFG0_CP_EN_SHIFT 9 +#define NIC0_QM0_GLBL_CFG0_CP_EN_MASK 0x3E00 + +/* NIC0_QM0_GLBL_CFG1 */ +#define NIC0_QM0_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK 0xF +#define NIC0_QM0_GLBL_CFG1_CQF_STOP_SHIFT 4 +#define NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK 0x1F0 +#define NIC0_QM0_GLBL_CFG1_CP_STOP_SHIFT 9 +#define NIC0_QM0_GLBL_CFG1_CP_STOP_MASK 0x3E00 +#define NIC0_QM0_GLBL_CFG1_PQF_FLUSH_SHIFT 16 +#define NIC0_QM0_GLBL_CFG1_PQF_FLUSH_MASK 0xF0000 +#define NIC0_QM0_GLBL_CFG1_CQF_FLUSH_SHIFT 20 +#define NIC0_QM0_GLBL_CFG1_CQF_FLUSH_MASK 0x1F00000 +#define NIC0_QM0_GLBL_CFG1_CP_FLUSH_SHIFT 25 +#define NIC0_QM0_GLBL_CFG1_CP_FLUSH_MASK 0x3E000000 + +/* NIC0_QM0_GLBL_PROT */ +#define NIC0_QM0_GLBL_PROT_PQF_SHIFT 0 +#define NIC0_QM0_GLBL_PROT_PQF_MASK 0xF +#define NIC0_QM0_GLBL_PROT_CQF_SHIFT 4 +#define NIC0_QM0_GLBL_PROT_CQF_MASK 0x1F0 +#define NIC0_QM0_GLBL_PROT_CP_SHIFT 9 +#define NIC0_QM0_GLBL_PROT_CP_MASK 0x3E00 +#define NIC0_QM0_GLBL_PROT_ERR_SHIFT 14 +#define NIC0_QM0_GLBL_PROT_ERR_MASK 0x4000 +#define NIC0_QM0_GLBL_PROT_ARB_SHIFT 15 +#define NIC0_QM0_GLBL_PROT_ARB_MASK 0x8000 + +/* NIC0_QM0_GLBL_ERR_CFG */ +#define NIC0_QM0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 0 +#define NIC0_QM0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0xF +#define NIC0_QM0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define NIC0_QM0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x1F0 +#define NIC0_QM0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 9 +#define NIC0_QM0_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x3E00 +#define NIC0_QM0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 16 +#define NIC0_QM0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0xF0000 +#define NIC0_QM0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 20 +#define NIC0_QM0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x1F00000 +#define NIC0_QM0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 25 +#define NIC0_QM0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x3E000000 +#define NIC0_QM0_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT 31 +#define NIC0_QM0_GLBL_ERR_CFG_ARB_STOP_ON_ERR_MASK 0x80000000 + +/* NIC0_QM0_GLBL_SECURE_PROPS */ +#define NIC0_QM0_GLBL_SECURE_PROPS_0_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_SECURE_PROPS_0_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_SECURE_PROPS_1_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_SECURE_PROPS_1_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_SECURE_PROPS_2_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_SECURE_PROPS_2_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_SECURE_PROPS_3_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_SECURE_PROPS_3_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_SECURE_PROPS_4_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_SECURE_PROPS_4_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_SECURE_PROPS_0_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_SECURE_PROPS_0_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_SECURE_PROPS_1_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_SECURE_PROPS_1_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_SECURE_PROPS_2_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_SECURE_PROPS_2_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_SECURE_PROPS_3_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_SECURE_PROPS_3_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_SECURE_PROPS_4_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_SECURE_PROPS_4_MMBP_MASK 0x400 + +/* NIC0_QM0_GLBL_NON_SECURE_PROPS */ +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_0_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_0_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_1_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_1_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_2_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_2_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_3_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_3_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_4_ASID_SHIFT 0 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_4_ASID_MASK 0x3FF +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_0_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_0_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_1_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_1_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_2_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_2_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_3_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_3_MMBP_MASK 0x400 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_4_MMBP_SHIFT 10 +#define NIC0_QM0_GLBL_NON_SECURE_PROPS_4_MMBP_MASK 0x400 + +/* NIC0_QM0_GLBL_STS0 */ +#define NIC0_QM0_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define NIC0_QM0_GLBL_STS0_PQF_IDLE_MASK 0xF +#define NIC0_QM0_GLBL_STS0_CQF_IDLE_SHIFT 4 +#define NIC0_QM0_GLBL_STS0_CQF_IDLE_MASK 0x1F0 +#define NIC0_QM0_GLBL_STS0_CP_IDLE_SHIFT 9 +#define NIC0_QM0_GLBL_STS0_CP_IDLE_MASK 0x3E00 +#define NIC0_QM0_GLBL_STS0_PQF_IS_STOP_SHIFT 16 +#define NIC0_QM0_GLBL_STS0_PQF_IS_STOP_MASK 0xF0000 +#define NIC0_QM0_GLBL_STS0_CQF_IS_STOP_SHIFT 20 +#define NIC0_QM0_GLBL_STS0_CQF_IS_STOP_MASK 0x1F00000 +#define NIC0_QM0_GLBL_STS0_CP_IS_STOP_SHIFT 25 +#define NIC0_QM0_GLBL_STS0_CP_IS_STOP_MASK 0x3E000000 +#define NIC0_QM0_GLBL_STS0_ARB_IS_STOP_SHIFT 31 +#define NIC0_QM0_GLBL_STS0_ARB_IS_STOP_MASK 0x80000000 + +/* NIC0_QM0_GLBL_STS1 */ +#define NIC0_QM0_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define NIC0_QM0_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define NIC0_QM0_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define NIC0_QM0_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define NIC0_QM0_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define NIC0_QM0_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define NIC0_QM0_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define NIC0_QM0_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define NIC0_QM0_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define NIC0_QM0_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define NIC0_QM0_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define NIC0_QM0_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define NIC0_QM0_GLBL_STS1_CP_WREG_ERR_SHIFT 6 +#define NIC0_QM0_GLBL_STS1_CP_WREG_ERR_MASK 0x40 +#define NIC0_QM0_GLBL_STS1_CP_FENCE0_OVF_ERR_SHIFT 8 +#define NIC0_QM0_GLBL_STS1_CP_FENCE0_OVF_ERR_MASK 0x100 +#define NIC0_QM0_GLBL_STS1_CP_FENCE1_OVF_ERR_SHIFT 9 +#define NIC0_QM0_GLBL_STS1_CP_FENCE1_OVF_ERR_MASK 0x200 +#define NIC0_QM0_GLBL_STS1_CP_FENCE2_OVF_ERR_SHIFT 10 +#define NIC0_QM0_GLBL_STS1_CP_FENCE2_OVF_ERR_MASK 0x400 +#define NIC0_QM0_GLBL_STS1_CP_FENCE3_OVF_ERR_SHIFT 11 +#define NIC0_QM0_GLBL_STS1_CP_FENCE3_OVF_ERR_MASK 0x800 +#define NIC0_QM0_GLBL_STS1_CP_FENCE0_UDF_ERR_SHIFT 12 +#define NIC0_QM0_GLBL_STS1_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define NIC0_QM0_GLBL_STS1_CP_FENCE1_UDF_ERR_SHIFT 13 +#define NIC0_QM0_GLBL_STS1_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define NIC0_QM0_GLBL_STS1_CP_FENCE2_UDF_ERR_SHIFT 14 +#define NIC0_QM0_GLBL_STS1_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define NIC0_QM0_GLBL_STS1_CP_FENCE3_UDF_ERR_SHIFT 15 +#define NIC0_QM0_GLBL_STS1_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* NIC0_QM0_GLBL_STS1_4 */ +#define NIC0_QM0_GLBL_STS1_4_CQF_RD_ERR_SHIFT 1 +#define NIC0_QM0_GLBL_STS1_4_CQF_RD_ERR_MASK 0x2 +#define NIC0_QM0_GLBL_STS1_4_CP_RD_ERR_SHIFT 2 +#define NIC0_QM0_GLBL_STS1_4_CP_RD_ERR_MASK 0x4 +#define NIC0_QM0_GLBL_STS1_4_CP_UNDEF_CMD_ERR_SHIFT 3 +#define NIC0_QM0_GLBL_STS1_4_CP_UNDEF_CMD_ERR_MASK 0x8 +#define NIC0_QM0_GLBL_STS1_4_CP_STOP_OP_SHIFT 4 +#define NIC0_QM0_GLBL_STS1_4_CP_STOP_OP_MASK 0x10 +#define NIC0_QM0_GLBL_STS1_4_CP_MSG_WR_ERR_SHIFT 5 +#define NIC0_QM0_GLBL_STS1_4_CP_MSG_WR_ERR_MASK 0x20 +#define NIC0_QM0_GLBL_STS1_4_CP_WREG_ERR_SHIFT 6 +#define NIC0_QM0_GLBL_STS1_4_CP_WREG_ERR_MASK 0x40 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE0_OVF_ERR_SHIFT 8 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE0_OVF_ERR_MASK 0x100 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE1_OVF_ERR_SHIFT 9 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE1_OVF_ERR_MASK 0x200 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE2_OVF_ERR_SHIFT 10 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE2_OVF_ERR_MASK 0x400 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE3_OVF_ERR_SHIFT 11 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE3_OVF_ERR_MASK 0x800 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE0_UDF_ERR_SHIFT 12 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE1_UDF_ERR_SHIFT 13 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE2_UDF_ERR_SHIFT 14 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE3_UDF_ERR_SHIFT 15 +#define NIC0_QM0_GLBL_STS1_4_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* NIC0_QM0_GLBL_MSG_EN */ +#define NIC0_QM0_GLBL_MSG_EN_PQF_RD_ERR_SHIFT 0 +#define NIC0_QM0_GLBL_MSG_EN_PQF_RD_ERR_MASK 0x1 +#define NIC0_QM0_GLBL_MSG_EN_CQF_RD_ERR_SHIFT 1 +#define NIC0_QM0_GLBL_MSG_EN_CQF_RD_ERR_MASK 0x2 +#define NIC0_QM0_GLBL_MSG_EN_CP_RD_ERR_SHIFT 2 +#define NIC0_QM0_GLBL_MSG_EN_CP_RD_ERR_MASK 0x4 +#define NIC0_QM0_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_SHIFT 3 +#define NIC0_QM0_GLBL_MSG_EN_CP_UNDEF_CMD_ERR_MASK 0x8 +#define NIC0_QM0_GLBL_MSG_EN_CP_STOP_OP_SHIFT 4 +#define NIC0_QM0_GLBL_MSG_EN_CP_STOP_OP_MASK 0x10 +#define NIC0_QM0_GLBL_MSG_EN_CP_MSG_WR_ERR_SHIFT 5 +#define NIC0_QM0_GLBL_MSG_EN_CP_MSG_WR_ERR_MASK 0x20 +#define NIC0_QM0_GLBL_MSG_EN_CP_WREG_ERR_SHIFT 6 +#define NIC0_QM0_GLBL_MSG_EN_CP_WREG_ERR_MASK 0x40 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_SHIFT 8 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE0_OVF_ERR_MASK 0x100 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_SHIFT 9 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE1_OVF_ERR_MASK 0x200 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_SHIFT 10 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE2_OVF_ERR_MASK 0x400 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_SHIFT 11 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE3_OVF_ERR_MASK 0x800 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_SHIFT 12 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_SHIFT 13 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_SHIFT 14 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_SHIFT 15 +#define NIC0_QM0_GLBL_MSG_EN_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* NIC0_QM0_GLBL_MSG_EN_4 */ +#define NIC0_QM0_GLBL_MSG_EN_4_CQF_RD_ERR_SHIFT 1 +#define NIC0_QM0_GLBL_MSG_EN_4_CQF_RD_ERR_MASK 0x2 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_RD_ERR_SHIFT 2 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_RD_ERR_MASK 0x4 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_SHIFT 3 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_UNDEF_CMD_ERR_MASK 0x8 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_STOP_OP_SHIFT 4 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_STOP_OP_MASK 0x10 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_MSG_WR_ERR_SHIFT 5 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_MSG_WR_ERR_MASK 0x20 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_WREG_ERR_SHIFT 6 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_WREG_ERR_MASK 0x40 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_SHIFT 8 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE0_OVF_ERR_MASK 0x100 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_SHIFT 9 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE1_OVF_ERR_MASK 0x200 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_SHIFT 10 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE2_OVF_ERR_MASK 0x400 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_SHIFT 11 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE3_OVF_ERR_MASK 0x800 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_SHIFT 12 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE0_UDF_ERR_MASK 0x1000 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_SHIFT 13 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE1_UDF_ERR_MASK 0x2000 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_SHIFT 14 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE2_UDF_ERR_MASK 0x4000 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_SHIFT 15 +#define NIC0_QM0_GLBL_MSG_EN_4_CP_FENCE3_UDF_ERR_MASK 0x8000 + +/* NIC0_QM0_PQ_BASE_LO */ +#define NIC0_QM0_PQ_BASE_LO_VAL_SHIFT 0 +#define NIC0_QM0_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_PQ_BASE_HI */ +#define NIC0_QM0_PQ_BASE_HI_VAL_SHIFT 0 +#define NIC0_QM0_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_PQ_SIZE */ +#define NIC0_QM0_PQ_SIZE_VAL_SHIFT 0 +#define NIC0_QM0_PQ_SIZE_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_PQ_PI */ +#define NIC0_QM0_PQ_PI_VAL_SHIFT 0 +#define NIC0_QM0_PQ_PI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_PQ_CI */ +#define NIC0_QM0_PQ_CI_VAL_SHIFT 0 +#define NIC0_QM0_PQ_CI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_PQ_CFG0 */ +#define NIC0_QM0_PQ_CFG0_RESERVED_SHIFT 0 +#define NIC0_QM0_PQ_CFG0_RESERVED_MASK 0x1 + +/* NIC0_QM0_PQ_CFG1 */ +#define NIC0_QM0_PQ_CFG1_CREDIT_LIM_SHIFT 0 +#define NIC0_QM0_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define NIC0_QM0_PQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define NIC0_QM0_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* NIC0_QM0_PQ_ARUSER_31_11 */ +#define NIC0_QM0_PQ_ARUSER_31_11_VAL_SHIFT 0 +#define NIC0_QM0_PQ_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* NIC0_QM0_PQ_STS0 */ +#define NIC0_QM0_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0 +#define NIC0_QM0_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF +#define NIC0_QM0_PQ_STS0_PQ_FREE_CNT_SHIFT 16 +#define NIC0_QM0_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000 + +/* NIC0_QM0_PQ_STS1 */ +#define NIC0_QM0_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0 +#define NIC0_QM0_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF +#define NIC0_QM0_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30 +#define NIC0_QM0_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000 +#define NIC0_QM0_PQ_STS1_PQ_BUSY_SHIFT 31 +#define NIC0_QM0_PQ_STS1_PQ_BUSY_MASK 0x80000000 + +/* NIC0_QM0_CQ_CFG0 */ +#define NIC0_QM0_CQ_CFG0_RESERVED_SHIFT 0 +#define NIC0_QM0_CQ_CFG0_RESERVED_MASK 0x1 + +/* NIC0_QM0_CQ_CFG1 */ +#define NIC0_QM0_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define NIC0_QM0_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define NIC0_QM0_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define NIC0_QM0_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_ARUSER_31_11 */ +#define NIC0_QM0_CQ_ARUSER_31_11_VAL_SHIFT 0 +#define NIC0_QM0_CQ_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* NIC0_QM0_CQ_STS0 */ +#define NIC0_QM0_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define NIC0_QM0_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define NIC0_QM0_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define NIC0_QM0_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_STS1 */ +#define NIC0_QM0_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define NIC0_QM0_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define NIC0_QM0_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define NIC0_QM0_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define NIC0_QM0_CQ_STS1_CQ_BUSY_SHIFT 31 +#define NIC0_QM0_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* NIC0_QM0_CQ_PTR_LO_0 */ +#define NIC0_QM0_CQ_PTR_LO_0_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_LO_0_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_PTR_HI_0 */ +#define NIC0_QM0_CQ_PTR_HI_0_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_HI_0_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_TSIZE_0 */ +#define NIC0_QM0_CQ_TSIZE_0_VAL_SHIFT 0 +#define NIC0_QM0_CQ_TSIZE_0_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_CTL_0 */ +#define NIC0_QM0_CQ_CTL_0_RPT_SHIFT 0 +#define NIC0_QM0_CQ_CTL_0_RPT_MASK 0xFFFF +#define NIC0_QM0_CQ_CTL_0_CTL_SHIFT 16 +#define NIC0_QM0_CQ_CTL_0_CTL_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_PTR_LO_1 */ +#define NIC0_QM0_CQ_PTR_LO_1_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_LO_1_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_PTR_HI_1 */ +#define NIC0_QM0_CQ_PTR_HI_1_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_HI_1_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_TSIZE_1 */ +#define NIC0_QM0_CQ_TSIZE_1_VAL_SHIFT 0 +#define NIC0_QM0_CQ_TSIZE_1_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_CTL_1 */ +#define NIC0_QM0_CQ_CTL_1_RPT_SHIFT 0 +#define NIC0_QM0_CQ_CTL_1_RPT_MASK 0xFFFF +#define NIC0_QM0_CQ_CTL_1_CTL_SHIFT 16 +#define NIC0_QM0_CQ_CTL_1_CTL_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_PTR_LO_2 */ +#define NIC0_QM0_CQ_PTR_LO_2_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_LO_2_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_PTR_HI_2 */ +#define NIC0_QM0_CQ_PTR_HI_2_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_HI_2_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_TSIZE_2 */ +#define NIC0_QM0_CQ_TSIZE_2_VAL_SHIFT 0 +#define NIC0_QM0_CQ_TSIZE_2_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_CTL_2 */ +#define NIC0_QM0_CQ_CTL_2_RPT_SHIFT 0 +#define NIC0_QM0_CQ_CTL_2_RPT_MASK 0xFFFF +#define NIC0_QM0_CQ_CTL_2_CTL_SHIFT 16 +#define NIC0_QM0_CQ_CTL_2_CTL_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_PTR_LO_3 */ +#define NIC0_QM0_CQ_PTR_LO_3_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_LO_3_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_PTR_HI_3 */ +#define NIC0_QM0_CQ_PTR_HI_3_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_HI_3_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_TSIZE_3 */ +#define NIC0_QM0_CQ_TSIZE_3_VAL_SHIFT 0 +#define NIC0_QM0_CQ_TSIZE_3_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_CTL_3 */ +#define NIC0_QM0_CQ_CTL_3_RPT_SHIFT 0 +#define NIC0_QM0_CQ_CTL_3_RPT_MASK 0xFFFF +#define NIC0_QM0_CQ_CTL_3_CTL_SHIFT 16 +#define NIC0_QM0_CQ_CTL_3_CTL_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_PTR_LO_4 */ +#define NIC0_QM0_CQ_PTR_LO_4_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_LO_4_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_PTR_HI_4 */ +#define NIC0_QM0_CQ_PTR_HI_4_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_HI_4_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_TSIZE_4 */ +#define NIC0_QM0_CQ_TSIZE_4_VAL_SHIFT 0 +#define NIC0_QM0_CQ_TSIZE_4_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_CTL_4 */ +#define NIC0_QM0_CQ_CTL_4_RPT_SHIFT 0 +#define NIC0_QM0_CQ_CTL_4_RPT_MASK 0xFFFF +#define NIC0_QM0_CQ_CTL_4_CTL_SHIFT 16 +#define NIC0_QM0_CQ_CTL_4_CTL_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_PTR_LO_STS */ +#define NIC0_QM0_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_PTR_HI_STS */ +#define NIC0_QM0_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define NIC0_QM0_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_TSIZE_STS */ +#define NIC0_QM0_CQ_TSIZE_STS_VAL_SHIFT 0 +#define NIC0_QM0_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CQ_CTL_STS */ +#define NIC0_QM0_CQ_CTL_STS_RPT_SHIFT 0 +#define NIC0_QM0_CQ_CTL_STS_RPT_MASK 0xFFFF +#define NIC0_QM0_CQ_CTL_STS_CTL_SHIFT 16 +#define NIC0_QM0_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* NIC0_QM0_CQ_IFIFO_CNT */ +#define NIC0_QM0_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define NIC0_QM0_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* NIC0_QM0_CP_MSG_BASE0_ADDR_LO */ +#define NIC0_QM0_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE0_ADDR_HI */ +#define NIC0_QM0_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE1_ADDR_LO */ +#define NIC0_QM0_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE1_ADDR_HI */ +#define NIC0_QM0_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE2_ADDR_LO */ +#define NIC0_QM0_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE2_ADDR_HI */ +#define NIC0_QM0_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE3_ADDR_LO */ +#define NIC0_QM0_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_MSG_BASE3_ADDR_HI */ +#define NIC0_QM0_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define NIC0_QM0_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_LDMA_TSIZE_OFFSET */ +#define NIC0_QM0_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define NIC0_QM0_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define NIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define NIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET */ +#define NIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define NIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_FENCE0_RDATA */ +#define NIC0_QM0_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* NIC0_QM0_CP_FENCE1_RDATA */ +#define NIC0_QM0_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* NIC0_QM0_CP_FENCE2_RDATA */ +#define NIC0_QM0_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* NIC0_QM0_CP_FENCE3_RDATA */ +#define NIC0_QM0_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* NIC0_QM0_CP_FENCE0_CNT */ +#define NIC0_QM0_CP_FENCE0_CNT_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE0_CNT_VAL_MASK 0x3FFF + +/* NIC0_QM0_CP_FENCE1_CNT */ +#define NIC0_QM0_CP_FENCE1_CNT_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE1_CNT_VAL_MASK 0x3FFF + +/* NIC0_QM0_CP_FENCE2_CNT */ +#define NIC0_QM0_CP_FENCE2_CNT_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE2_CNT_VAL_MASK 0x3FFF + +/* NIC0_QM0_CP_FENCE3_CNT */ +#define NIC0_QM0_CP_FENCE3_CNT_VAL_SHIFT 0 +#define NIC0_QM0_CP_FENCE3_CNT_VAL_MASK 0x3FFF + +/* NIC0_QM0_CP_STS */ +#define NIC0_QM0_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define NIC0_QM0_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define NIC0_QM0_CP_STS_ERDY_SHIFT 16 +#define NIC0_QM0_CP_STS_ERDY_MASK 0x10000 +#define NIC0_QM0_CP_STS_RRDY_SHIFT 17 +#define NIC0_QM0_CP_STS_RRDY_MASK 0x20000 +#define NIC0_QM0_CP_STS_MRDY_SHIFT 18 +#define NIC0_QM0_CP_STS_MRDY_MASK 0x40000 +#define NIC0_QM0_CP_STS_SW_STOP_SHIFT 19 +#define NIC0_QM0_CP_STS_SW_STOP_MASK 0x80000 +#define NIC0_QM0_CP_STS_FENCE_ID_SHIFT 20 +#define NIC0_QM0_CP_STS_FENCE_ID_MASK 0x300000 +#define NIC0_QM0_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define NIC0_QM0_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* NIC0_QM0_CP_CURRENT_INST_LO */ +#define NIC0_QM0_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define NIC0_QM0_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_CURRENT_INST_HI */ +#define NIC0_QM0_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define NIC0_QM0_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_CP_BARRIER_CFG */ +#define NIC0_QM0_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define NIC0_QM0_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF +#define NIC0_QM0_CP_BARRIER_CFG_RBGUARD_SHIFT 16 +#define NIC0_QM0_CP_BARRIER_CFG_RBGUARD_MASK 0xF0000 + +/* NIC0_QM0_CP_DBG_0 */ +#define NIC0_QM0_CP_DBG_0_CS_SHIFT 0 +#define NIC0_QM0_CP_DBG_0_CS_MASK 0xF +#define NIC0_QM0_CP_DBG_0_EB_CNT_NOT_ZERO_SHIFT 4 +#define NIC0_QM0_CP_DBG_0_EB_CNT_NOT_ZERO_MASK 0x10 +#define NIC0_QM0_CP_DBG_0_BULK_CNT_NOT_ZERO_SHIFT 5 +#define NIC0_QM0_CP_DBG_0_BULK_CNT_NOT_ZERO_MASK 0x20 +#define NIC0_QM0_CP_DBG_0_MREB_STALL_SHIFT 6 +#define NIC0_QM0_CP_DBG_0_MREB_STALL_MASK 0x40 +#define NIC0_QM0_CP_DBG_0_STALL_SHIFT 7 +#define NIC0_QM0_CP_DBG_0_STALL_MASK 0x80 + +/* NIC0_QM0_CP_ARUSER_31_11 */ +#define NIC0_QM0_CP_ARUSER_31_11_VAL_SHIFT 0 +#define NIC0_QM0_CP_ARUSER_31_11_VAL_MASK 0x1FFFFF + +/* NIC0_QM0_CP_AWUSER_31_11 */ +#define NIC0_QM0_CP_AWUSER_31_11_VAL_SHIFT 0 +#define NIC0_QM0_CP_AWUSER_31_11_VAL_MASK 0x1FFFFF + +/* NIC0_QM0_ARB_CFG_0 */ +#define NIC0_QM0_ARB_CFG_0_TYPE_SHIFT 0 +#define NIC0_QM0_ARB_CFG_0_TYPE_MASK 0x1 +#define NIC0_QM0_ARB_CFG_0_IS_MASTER_SHIFT 4 +#define NIC0_QM0_ARB_CFG_0_IS_MASTER_MASK 0x10 +#define NIC0_QM0_ARB_CFG_0_EN_SHIFT 8 +#define NIC0_QM0_ARB_CFG_0_EN_MASK 0x100 +#define NIC0_QM0_ARB_CFG_0_MASK_SHIFT 12 +#define NIC0_QM0_ARB_CFG_0_MASK_MASK 0xF000 +#define NIC0_QM0_ARB_CFG_0_MST_MSG_NOSTALL_SHIFT 16 +#define NIC0_QM0_ARB_CFG_0_MST_MSG_NOSTALL_MASK 0x10000 + +/* NIC0_QM0_ARB_CHOISE_Q_PUSH */ +#define NIC0_QM0_ARB_CHOISE_Q_PUSH_VAL_SHIFT 0 +#define NIC0_QM0_ARB_CHOISE_Q_PUSH_VAL_MASK 0x3 + +/* NIC0_QM0_ARB_WRR_WEIGHT */ +#define NIC0_QM0_ARB_WRR_WEIGHT_VAL_SHIFT 0 +#define NIC0_QM0_ARB_WRR_WEIGHT_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_CFG_1 */ +#define NIC0_QM0_ARB_CFG_1_CLR_SHIFT 0 +#define NIC0_QM0_ARB_CFG_1_CLR_MASK 0x1 + +/* NIC0_QM0_ARB_MST_AVAIL_CRED */ +#define NIC0_QM0_ARB_MST_AVAIL_CRED_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MST_AVAIL_CRED_VAL_MASK 0x7F + +/* NIC0_QM0_ARB_MST_CRED_INC */ +#define NIC0_QM0_ARB_MST_CRED_INC_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MST_CRED_INC_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_MST_CHOISE_PUSH_OFST */ +#define NIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST */ +#define NIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST_VAL_SHIFT 0 +#define NIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_MST_SLAVE_EN */ +#define NIC0_QM0_ARB_MST_SLAVE_EN_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MST_SLAVE_EN_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_MST_QUIET_PER */ +#define NIC0_QM0_ARB_MST_QUIET_PER_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MST_QUIET_PER_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_SLV_CHOISE_WDT */ +#define NIC0_QM0_ARB_SLV_CHOISE_WDT_VAL_SHIFT 0 +#define NIC0_QM0_ARB_SLV_CHOISE_WDT_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_SLV_ID */ +#define NIC0_QM0_ARB_SLV_ID_VAL_SHIFT 0 +#define NIC0_QM0_ARB_SLV_ID_VAL_MASK 0x1F + +/* NIC0_QM0_ARB_MSG_MAX_INFLIGHT */ +#define NIC0_QM0_ARB_MSG_MAX_INFLIGHT_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MSG_MAX_INFLIGHT_VAL_MASK 0x3F + +/* NIC0_QM0_ARB_MSG_AWUSER_31_11 */ +#define NIC0_QM0_ARB_MSG_AWUSER_31_11_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MSG_AWUSER_31_11_VAL_MASK 0x1FFFFF + +/* NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP */ +#define NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP_ASID_SHIFT 0 +#define NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP_ASID_MASK 0x3FF +#define NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP_MMBP_SHIFT 10 +#define NIC0_QM0_ARB_MSG_AWUSER_SEC_PROP_MMBP_MASK 0x400 + +/* NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP */ +#define NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_SHIFT 0 +#define NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP_ASID_MASK 0x3FF +#define NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_SHIFT 10 +#define NIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP_MMBP_MASK 0x400 + +/* NIC0_QM0_ARB_BASE_LO */ +#define NIC0_QM0_ARB_BASE_LO_VAL_SHIFT 0 +#define NIC0_QM0_ARB_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_BASE_HI */ +#define NIC0_QM0_ARB_BASE_HI_VAL_SHIFT 0 +#define NIC0_QM0_ARB_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_STATE_STS */ +#define NIC0_QM0_ARB_STATE_STS_VAL_SHIFT 0 +#define NIC0_QM0_ARB_STATE_STS_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_ARB_CHOISE_FULLNESS_STS */ +#define NIC0_QM0_ARB_CHOISE_FULLNESS_STS_VAL_SHIFT 0 +#define NIC0_QM0_ARB_CHOISE_FULLNESS_STS_VAL_MASK 0x7F + +/* NIC0_QM0_ARB_MSG_STS */ +#define NIC0_QM0_ARB_MSG_STS_FULL_SHIFT 0 +#define NIC0_QM0_ARB_MSG_STS_FULL_MASK 0x1 +#define NIC0_QM0_ARB_MSG_STS_NO_INFLIGHT_SHIFT 1 +#define NIC0_QM0_ARB_MSG_STS_NO_INFLIGHT_MASK 0x2 + +/* NIC0_QM0_ARB_SLV_CHOISE_Q_HEAD */ +#define NIC0_QM0_ARB_SLV_CHOISE_Q_HEAD_VAL_SHIFT 0 +#define NIC0_QM0_ARB_SLV_CHOISE_Q_HEAD_VAL_MASK 0x3 + +/* NIC0_QM0_ARB_ERR_CAUSE */ +#define NIC0_QM0_ARB_ERR_CAUSE_CHOISE_OVF_SHIFT 0 +#define NIC0_QM0_ARB_ERR_CAUSE_CHOISE_OVF_MASK 0x1 +#define NIC0_QM0_ARB_ERR_CAUSE_CHOISE_WDT_SHIFT 1 +#define NIC0_QM0_ARB_ERR_CAUSE_CHOISE_WDT_MASK 0x2 +#define NIC0_QM0_ARB_ERR_CAUSE_AXI_LBW_ERR_SHIFT 2 +#define NIC0_QM0_ARB_ERR_CAUSE_AXI_LBW_ERR_MASK 0x4 + +/* NIC0_QM0_ARB_ERR_MSG_EN */ +#define NIC0_QM0_ARB_ERR_MSG_EN_CHOISE_OVF_SHIFT 0 +#define NIC0_QM0_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1 +#define NIC0_QM0_ARB_ERR_MSG_EN_CHOISE_WDT_SHIFT 1 +#define NIC0_QM0_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2 +#define NIC0_QM0_ARB_ERR_MSG_EN_AXI_LBW_ERR_SHIFT 2 +#define NIC0_QM0_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4 + +/* NIC0_QM0_ARB_ERR_STS_DRP */ +#define NIC0_QM0_ARB_ERR_STS_DRP_VAL_SHIFT 0 +#define NIC0_QM0_ARB_ERR_STS_DRP_VAL_MASK 0x3 + +/* NIC0_QM0_ARB_MST_CRED_STS */ +#define NIC0_QM0_ARB_MST_CRED_STS_VAL_SHIFT 0 +#define NIC0_QM0_ARB_MST_CRED_STS_VAL_MASK 0x7F + +/* NIC0_QM0_CGM_CFG */ +#define NIC0_QM0_CGM_CFG_IDLE_TH_SHIFT 0 +#define NIC0_QM0_CGM_CFG_IDLE_TH_MASK 0xFFF +#define NIC0_QM0_CGM_CFG_G2F_TH_SHIFT 16 +#define NIC0_QM0_CGM_CFG_G2F_TH_MASK 0xFF0000 +#define NIC0_QM0_CGM_CFG_CP_IDLE_MASK_SHIFT 24 +#define NIC0_QM0_CGM_CFG_CP_IDLE_MASK_MASK 0x1F000000 +#define NIC0_QM0_CGM_CFG_EN_SHIFT 31 +#define NIC0_QM0_CGM_CFG_EN_MASK 0x80000000 + +/* NIC0_QM0_CGM_STS */ +#define NIC0_QM0_CGM_STS_ST_SHIFT 0 +#define NIC0_QM0_CGM_STS_ST_MASK 0x3 +#define NIC0_QM0_CGM_STS_CG_SHIFT 4 +#define NIC0_QM0_CGM_STS_CG_MASK 0x10 +#define NIC0_QM0_CGM_STS_AGENT_IDLE_SHIFT 8 +#define NIC0_QM0_CGM_STS_AGENT_IDLE_MASK 0x100 +#define NIC0_QM0_CGM_STS_AXI_IDLE_SHIFT 9 +#define NIC0_QM0_CGM_STS_AXI_IDLE_MASK 0x200 +#define NIC0_QM0_CGM_STS_CP_IDLE_SHIFT 10 +#define NIC0_QM0_CGM_STS_CP_IDLE_MASK 0x400 + +/* NIC0_QM0_CGM_CFG1 */ +#define NIC0_QM0_CGM_CFG1_MASK_TH_SHIFT 0 +#define NIC0_QM0_CGM_CFG1_MASK_TH_MASK 0xFF + +/* NIC0_QM0_LOCAL_RANGE_BASE */ +#define NIC0_QM0_LOCAL_RANGE_BASE_VAL_SHIFT 0 +#define NIC0_QM0_LOCAL_RANGE_BASE_VAL_MASK 0xFFFF + +/* NIC0_QM0_LOCAL_RANGE_SIZE */ +#define NIC0_QM0_LOCAL_RANGE_SIZE_VAL_SHIFT 0 +#define NIC0_QM0_LOCAL_RANGE_SIZE_VAL_MASK 0xFFFF + +/* NIC0_QM0_CSMR_STRICT_PRIO_CFG */ +#define NIC0_QM0_CSMR_STRICT_PRIO_CFG_TYPE_SHIFT 0 +#define NIC0_QM0_CSMR_STRICT_PRIO_CFG_TYPE_MASK 0x1 + +/* NIC0_QM0_HBW_RD_RATE_LIM_CFG_1 */ +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_1_TOUT_SHIFT 0 +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_1_TOUT_MASK 0xFF +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_1_EN_SHIFT 31 +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_1_EN_MASK 0x80000000 + +/* NIC0_QM0_LBW_WR_RATE_LIM_CFG_0 */ +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0 +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_0_SAT_SHIFT 16 +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_0_SAT_MASK 0xFF0000 + +/* NIC0_QM0_LBW_WR_RATE_LIM_CFG_1 */ +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_1_TOUT_SHIFT 0 +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_1_TOUT_MASK 0xFF +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_1_EN_SHIFT 31 +#define NIC0_QM0_LBW_WR_RATE_LIM_CFG_1_EN_MASK 0x80000000 + +/* NIC0_QM0_HBW_RD_RATE_LIM_CFG_0 */ +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_SHIFT 0 +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_0_RST_TOKEN_MASK 0xFF +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_0_SAT_SHIFT 16 +#define NIC0_QM0_HBW_RD_RATE_LIM_CFG_0_SAT_MASK 0xFF0000 + +/* NIC0_QM0_GLBL_AXCACHE */ +#define NIC0_QM0_GLBL_AXCACHE_AR_SHIFT 0 +#define NIC0_QM0_GLBL_AXCACHE_AR_MASK 0xF +#define NIC0_QM0_GLBL_AXCACHE_AW_SHIFT 16 +#define NIC0_QM0_GLBL_AXCACHE_AW_MASK 0xF0000 + +/* NIC0_QM0_IND_GW_APB_CFG */ +#define NIC0_QM0_IND_GW_APB_CFG_ADDR_SHIFT 0 +#define NIC0_QM0_IND_GW_APB_CFG_ADDR_MASK 0x7FFFFFFF +#define NIC0_QM0_IND_GW_APB_CFG_CMD_SHIFT 31 +#define NIC0_QM0_IND_GW_APB_CFG_CMD_MASK 0x80000000 + +/* NIC0_QM0_IND_GW_APB_WDATA */ +#define NIC0_QM0_IND_GW_APB_WDATA_VAL_SHIFT 0 +#define NIC0_QM0_IND_GW_APB_WDATA_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_IND_GW_APB_RDATA */ +#define NIC0_QM0_IND_GW_APB_RDATA_VAL_SHIFT 0 +#define NIC0_QM0_IND_GW_APB_RDATA_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_IND_GW_APB_STATUS */ +#define NIC0_QM0_IND_GW_APB_STATUS_RDY_SHIFT 0 +#define NIC0_QM0_IND_GW_APB_STATUS_RDY_MASK 0x1 +#define NIC0_QM0_IND_GW_APB_STATUS_ERR_SHIFT 1 +#define NIC0_QM0_IND_GW_APB_STATUS_ERR_MASK 0x2 + +/* NIC0_QM0_GLBL_ERR_ADDR_LO */ +#define NIC0_QM0_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define NIC0_QM0_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_GLBL_ERR_ADDR_HI */ +#define NIC0_QM0_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define NIC0_QM0_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_GLBL_ERR_WDATA */ +#define NIC0_QM0_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define NIC0_QM0_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* NIC0_QM0_GLBL_MEM_INIT_BUSY */ +#define NIC0_QM0_GLBL_MEM_INIT_BUSY_RBUF_SHIFT 0 +#define NIC0_QM0_GLBL_MEM_INIT_BUSY_RBUF_MASK 0xF + +#endif /* ASIC_REG_NIC0_QM0_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h new file mode 100644 index 000000000000..7c97f4041b8e --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC0_QM0_REGS_H_ +#define ASIC_REG_NIC0_QM0_REGS_H_ + +/* + ***************************************** + * NIC0_QM0 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC0_QM0_GLBL_CFG0 0xCE0000 + +#define mmNIC0_QM0_GLBL_CFG1 0xCE0004 + +#define mmNIC0_QM0_GLBL_PROT 0xCE0008 + +#define mmNIC0_QM0_GLBL_ERR_CFG 0xCE000C + +#define mmNIC0_QM0_GLBL_SECURE_PROPS_0 0xCE0010 + +#define mmNIC0_QM0_GLBL_SECURE_PROPS_1 0xCE0014 + +#define mmNIC0_QM0_GLBL_SECURE_PROPS_2 0xCE0018 + +#define mmNIC0_QM0_GLBL_SECURE_PROPS_3 0xCE001C + +#define mmNIC0_QM0_GLBL_SECURE_PROPS_4 0xCE0020 + +#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0 0xCE0024 + +#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1 0xCE0028 + +#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_2 0xCE002C + +#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_3 0xCE0030 + +#define mmNIC0_QM0_GLBL_NON_SECURE_PROPS_4 0xCE0034 + +#define mmNIC0_QM0_GLBL_STS0 0xCE0038 + +#define mmNIC0_QM0_GLBL_STS1_0 0xCE0040 + +#define mmNIC0_QM0_GLBL_STS1_1 0xCE0044 + +#define mmNIC0_QM0_GLBL_STS1_2 0xCE0048 + +#define mmNIC0_QM0_GLBL_STS1_3 0xCE004C + +#define mmNIC0_QM0_GLBL_STS1_4 0xCE0050 + +#define mmNIC0_QM0_GLBL_MSG_EN_0 0xCE0054 + +#define mmNIC0_QM0_GLBL_MSG_EN_1 0xCE0058 + +#define mmNIC0_QM0_GLBL_MSG_EN_2 0xCE005C + +#define mmNIC0_QM0_GLBL_MSG_EN_3 0xCE0060 + +#define mmNIC0_QM0_GLBL_MSG_EN_4 0xCE0068 + +#define mmNIC0_QM0_PQ_BASE_LO_0 0xCE0070 + +#define mmNIC0_QM0_PQ_BASE_LO_1 0xCE0074 + +#define mmNIC0_QM0_PQ_BASE_LO_2 0xCE0078 + +#define mmNIC0_QM0_PQ_BASE_LO_3 0xCE007C + +#define mmNIC0_QM0_PQ_BASE_HI_0 0xCE0080 + +#define mmNIC0_QM0_PQ_BASE_HI_1 0xCE0084 + +#define mmNIC0_QM0_PQ_BASE_HI_2 0xCE0088 + +#define mmNIC0_QM0_PQ_BASE_HI_3 0xCE008C + +#define mmNIC0_QM0_PQ_SIZE_0 0xCE0090 + +#define mmNIC0_QM0_PQ_SIZE_1 0xCE0094 + +#define mmNIC0_QM0_PQ_SIZE_2 0xCE0098 + +#define mmNIC0_QM0_PQ_SIZE_3 0xCE009C + +#define mmNIC0_QM0_PQ_PI_0 0xCE00A0 + +#define mmNIC0_QM0_PQ_PI_1 0xCE00A4 + +#define mmNIC0_QM0_PQ_PI_2 0xCE00A8 + +#define mmNIC0_QM0_PQ_PI_3 0xCE00AC + +#define mmNIC0_QM0_PQ_CI_0 0xCE00B0 + +#define mmNIC0_QM0_PQ_CI_1 0xCE00B4 + +#define mmNIC0_QM0_PQ_CI_2 0xCE00B8 + +#define mmNIC0_QM0_PQ_CI_3 0xCE00BC + +#define mmNIC0_QM0_PQ_CFG0_0 0xCE00C0 + +#define mmNIC0_QM0_PQ_CFG0_1 0xCE00C4 + +#define mmNIC0_QM0_PQ_CFG0_2 0xCE00C8 + +#define mmNIC0_QM0_PQ_CFG0_3 0xCE00CC + +#define mmNIC0_QM0_PQ_CFG1_0 0xCE00D0 + +#define mmNIC0_QM0_PQ_CFG1_1 0xCE00D4 + +#define mmNIC0_QM0_PQ_CFG1_2 0xCE00D8 + +#define mmNIC0_QM0_PQ_CFG1_3 0xCE00DC + +#define mmNIC0_QM0_PQ_ARUSER_31_11_0 0xCE00E0 + +#define mmNIC0_QM0_PQ_ARUSER_31_11_1 0xCE00E4 + +#define mmNIC0_QM0_PQ_ARUSER_31_11_2 0xCE00E8 + +#define mmNIC0_QM0_PQ_ARUSER_31_11_3 0xCE00EC + +#define mmNIC0_QM0_PQ_STS0_0 0xCE00F0 + +#define mmNIC0_QM0_PQ_STS0_1 0xCE00F4 + +#define mmNIC0_QM0_PQ_STS0_2 0xCE00F8 + +#define mmNIC0_QM0_PQ_STS0_3 0xCE00FC + +#define mmNIC0_QM0_PQ_STS1_0 0xCE0100 + +#define mmNIC0_QM0_PQ_STS1_1 0xCE0104 + +#define mmNIC0_QM0_PQ_STS1_2 0xCE0108 + +#define mmNIC0_QM0_PQ_STS1_3 0xCE010C + +#define mmNIC0_QM0_CQ_CFG0_0 0xCE0110 + +#define mmNIC0_QM0_CQ_CFG0_1 0xCE0114 + +#define mmNIC0_QM0_CQ_CFG0_2 0xCE0118 + +#define mmNIC0_QM0_CQ_CFG0_3 0xCE011C + +#define mmNIC0_QM0_CQ_CFG0_4 0xCE0120 + +#define mmNIC0_QM0_CQ_CFG1_0 0xCE0124 + +#define mmNIC0_QM0_CQ_CFG1_1 0xCE0128 + +#define mmNIC0_QM0_CQ_CFG1_2 0xCE012C + +#define mmNIC0_QM0_CQ_CFG1_3 0xCE0130 + +#define mmNIC0_QM0_CQ_CFG1_4 0xCE0134 + +#define mmNIC0_QM0_CQ_ARUSER_31_11_0 0xCE0138 + +#define mmNIC0_QM0_CQ_ARUSER_31_11_1 0xCE013C + +#define mmNIC0_QM0_CQ_ARUSER_31_11_2 0xCE0140 + +#define mmNIC0_QM0_CQ_ARUSER_31_11_3 0xCE0144 + +#define mmNIC0_QM0_CQ_ARUSER_31_11_4 0xCE0148 + +#define mmNIC0_QM0_CQ_STS0_0 0xCE014C + +#define mmNIC0_QM0_CQ_STS0_1 0xCE0150 + +#define mmNIC0_QM0_CQ_STS0_2 0xCE0154 + +#define mmNIC0_QM0_CQ_STS0_3 0xCE0158 + +#define mmNIC0_QM0_CQ_STS0_4 0xCE015C + +#define mmNIC0_QM0_CQ_STS1_0 0xCE0160 + +#define mmNIC0_QM0_CQ_STS1_1 0xCE0164 + +#define mmNIC0_QM0_CQ_STS1_2 0xCE0168 + +#define mmNIC0_QM0_CQ_STS1_3 0xCE016C + +#define mmNIC0_QM0_CQ_STS1_4 0xCE0170 + +#define mmNIC0_QM0_CQ_PTR_LO_0 0xCE0174 + +#define mmNIC0_QM0_CQ_PTR_HI_0 0xCE0178 + +#define mmNIC0_QM0_CQ_TSIZE_0 0xCE017C + +#define mmNIC0_QM0_CQ_CTL_0 0xCE0180 + +#define mmNIC0_QM0_CQ_PTR_LO_1 0xCE0184 + +#define mmNIC0_QM0_CQ_PTR_HI_1 0xCE0188 + +#define mmNIC0_QM0_CQ_TSIZE_1 0xCE018C + +#define mmNIC0_QM0_CQ_CTL_1 0xCE0190 + +#define mmNIC0_QM0_CQ_PTR_LO_2 0xCE0194 + +#define mmNIC0_QM0_CQ_PTR_HI_2 0xCE0198 + +#define mmNIC0_QM0_CQ_TSIZE_2 0xCE019C + +#define mmNIC0_QM0_CQ_CTL_2 0xCE01A0 + +#define mmNIC0_QM0_CQ_PTR_LO_3 0xCE01A4 + +#define mmNIC0_QM0_CQ_PTR_HI_3 0xCE01A8 + +#define mmNIC0_QM0_CQ_TSIZE_3 0xCE01AC + +#define mmNIC0_QM0_CQ_CTL_3 0xCE01B0 + +#define mmNIC0_QM0_CQ_PTR_LO_4 0xCE01B4 + +#define mmNIC0_QM0_CQ_PTR_HI_4 0xCE01B8 + +#define mmNIC0_QM0_CQ_TSIZE_4 0xCE01BC + +#define mmNIC0_QM0_CQ_CTL_4 0xCE01C0 + +#define mmNIC0_QM0_CQ_PTR_LO_STS_0 0xCE01C4 + +#define mmNIC0_QM0_CQ_PTR_LO_STS_1 0xCE01C8 + +#define mmNIC0_QM0_CQ_PTR_LO_STS_2 0xCE01CC + +#define mmNIC0_QM0_CQ_PTR_LO_STS_3 0xCE01D0 + +#define mmNIC0_QM0_CQ_PTR_LO_STS_4 0xCE01D4 + +#define mmNIC0_QM0_CQ_PTR_HI_STS_0 0xCE01D8 + +#define mmNIC0_QM0_CQ_PTR_HI_STS_1 0xCE01DC + +#define mmNIC0_QM0_CQ_PTR_HI_STS_2 0xCE01E0 + +#define mmNIC0_QM0_CQ_PTR_HI_STS_3 0xCE01E4 + +#define mmNIC0_QM0_CQ_PTR_HI_STS_4 0xCE01E8 + +#define mmNIC0_QM0_CQ_TSIZE_STS_0 0xCE01EC + +#define mmNIC0_QM0_CQ_TSIZE_STS_1 0xCE01F0 + +#define mmNIC0_QM0_CQ_TSIZE_STS_2 0xCE01F4 + +#define mmNIC0_QM0_CQ_TSIZE_STS_3 0xCE01F8 + +#define mmNIC0_QM0_CQ_TSIZE_STS_4 0xCE01FC + +#define mmNIC0_QM0_CQ_CTL_STS_0 0xCE0200 + +#define mmNIC0_QM0_CQ_CTL_STS_1 0xCE0204 + +#define mmNIC0_QM0_CQ_CTL_STS_2 0xCE0208 + +#define mmNIC0_QM0_CQ_CTL_STS_3 0xCE020C + +#define mmNIC0_QM0_CQ_CTL_STS_4 0xCE0210 + +#define mmNIC0_QM0_CQ_IFIFO_CNT_0 0xCE0214 + +#define mmNIC0_QM0_CQ_IFIFO_CNT_1 0xCE0218 + +#define mmNIC0_QM0_CQ_IFIFO_CNT_2 0xCE021C + +#define mmNIC0_QM0_CQ_IFIFO_CNT_3 0xCE0220 + +#define mmNIC0_QM0_CQ_IFIFO_CNT_4 0xCE0224 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 0xCE0228 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_1 0xCE022C + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_2 0xCE0230 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_3 0xCE0234 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_4 0xCE0238 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 0xCE023C + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_1 0xCE0240 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_2 0xCE0244 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_3 0xCE0248 + +#define mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_4 0xCE024C + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 0xCE0250 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_1 0xCE0254 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_2 0xCE0258 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_3 0xCE025C + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_4 0xCE0260 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 0xCE0264 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_1 0xCE0268 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_2 0xCE026C + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_3 0xCE0270 + +#define mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_4 0xCE0274 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0 0xCE0278 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_1 0xCE027C + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 0xCE0280 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_3 0xCE0284 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_4 0xCE0288 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0 0xCE028C + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_1 0xCE0290 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_2 0xCE0294 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_3 0xCE0298 + +#define mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_4 0xCE029C + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0 0xCE02A0 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_1 0xCE02A4 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_2 0xCE02A8 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_3 0xCE02AC + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_4 0xCE02B0 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 0xCE02B4 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_1 0xCE02B8 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_2 0xCE02BC + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_3 0xCE02C0 + +#define mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_4 0xCE02C4 + +#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 0xCE02C8 + +#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_1 0xCE02CC + +#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_2 0xCE02D0 + +#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_3 0xCE02D4 + +#define mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_4 0xCE02D8 + +#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xCE02E0 + +#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xCE02E4 + +#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xCE02E8 + +#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xCE02EC + +#define mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xCE02F0 + +#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 0xCE02F4 + +#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 0xCE02F8 + +#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 0xCE02FC + +#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 0xCE0300 + +#define mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 0xCE0304 + +#define mmNIC0_QM0_CP_FENCE0_RDATA_0 0xCE0308 + +#define mmNIC0_QM0_CP_FENCE0_RDATA_1 0xCE030C + +#define mmNIC0_QM0_CP_FENCE0_RDATA_2 0xCE0310 + +#define mmNIC0_QM0_CP_FENCE0_RDATA_3 0xCE0314 + +#define mmNIC0_QM0_CP_FENCE0_RDATA_4 0xCE0318 + +#define mmNIC0_QM0_CP_FENCE1_RDATA_0 0xCE031C + +#define mmNIC0_QM0_CP_FENCE1_RDATA_1 0xCE0320 + +#define mmNIC0_QM0_CP_FENCE1_RDATA_2 0xCE0324 + +#define mmNIC0_QM0_CP_FENCE1_RDATA_3 0xCE0328 + +#define mmNIC0_QM0_CP_FENCE1_RDATA_4 0xCE032C + +#define mmNIC0_QM0_CP_FENCE2_RDATA_0 0xCE0330 + +#define mmNIC0_QM0_CP_FENCE2_RDATA_1 0xCE0334 + +#define mmNIC0_QM0_CP_FENCE2_RDATA_2 0xCE0338 + +#define mmNIC0_QM0_CP_FENCE2_RDATA_3 0xCE033C + +#define mmNIC0_QM0_CP_FENCE2_RDATA_4 0xCE0340 + +#define mmNIC0_QM0_CP_FENCE3_RDATA_0 0xCE0344 + +#define mmNIC0_QM0_CP_FENCE3_RDATA_1 0xCE0348 + +#define mmNIC0_QM0_CP_FENCE3_RDATA_2 0xCE034C + +#define mmNIC0_QM0_CP_FENCE3_RDATA_3 0xCE0350 + +#define mmNIC0_QM0_CP_FENCE3_RDATA_4 0xCE0354 + +#define mmNIC0_QM0_CP_FENCE0_CNT_0 0xCE0358 + +#define mmNIC0_QM0_CP_FENCE0_CNT_1 0xCE035C + +#define mmNIC0_QM0_CP_FENCE0_CNT_2 0xCE0360 + +#define mmNIC0_QM0_CP_FENCE0_CNT_3 0xCE0364 + +#define mmNIC0_QM0_CP_FENCE0_CNT_4 0xCE0368 + +#define mmNIC0_QM0_CP_FENCE1_CNT_0 0xCE036C + +#define mmNIC0_QM0_CP_FENCE1_CNT_1 0xCE0370 + +#define mmNIC0_QM0_CP_FENCE1_CNT_2 0xCE0374 + +#define mmNIC0_QM0_CP_FENCE1_CNT_3 0xCE0378 + +#define mmNIC0_QM0_CP_FENCE1_CNT_4 0xCE037C + +#define mmNIC0_QM0_CP_FENCE2_CNT_0 0xCE0380 + +#define mmNIC0_QM0_CP_FENCE2_CNT_1 0xCE0384 + +#define mmNIC0_QM0_CP_FENCE2_CNT_2 0xCE0388 + +#define mmNIC0_QM0_CP_FENCE2_CNT_3 0xCE038C + +#define mmNIC0_QM0_CP_FENCE2_CNT_4 0xCE0390 + +#define mmNIC0_QM0_CP_FENCE3_CNT_0 0xCE0394 + +#define mmNIC0_QM0_CP_FENCE3_CNT_1 0xCE0398 + +#define mmNIC0_QM0_CP_FENCE3_CNT_2 0xCE039C + +#define mmNIC0_QM0_CP_FENCE3_CNT_3 0xCE03A0 + +#define mmNIC0_QM0_CP_FENCE3_CNT_4 0xCE03A4 + +#define mmNIC0_QM0_CP_STS_0 0xCE03A8 + +#define mmNIC0_QM0_CP_STS_1 0xCE03AC + +#define mmNIC0_QM0_CP_STS_2 0xCE03B0 + +#define mmNIC0_QM0_CP_STS_3 0xCE03B4 + +#define mmNIC0_QM0_CP_STS_4 0xCE03B8 + +#define mmNIC0_QM0_CP_CURRENT_INST_LO_0 0xCE03BC + +#define mmNIC0_QM0_CP_CURRENT_INST_LO_1 0xCE03C0 + +#define mmNIC0_QM0_CP_CURRENT_INST_LO_2 0xCE03C4 + +#define mmNIC0_QM0_CP_CURRENT_INST_LO_3 0xCE03C8 + +#define mmNIC0_QM0_CP_CURRENT_INST_LO_4 0xCE03CC + +#define mmNIC0_QM0_CP_CURRENT_INST_HI_0 0xCE03D0 + +#define mmNIC0_QM0_CP_CURRENT_INST_HI_1 0xCE03D4 + +#define mmNIC0_QM0_CP_CURRENT_INST_HI_2 0xCE03D8 + +#define mmNIC0_QM0_CP_CURRENT_INST_HI_3 0xCE03DC + +#define mmNIC0_QM0_CP_CURRENT_INST_HI_4 0xCE03E0 + +#define mmNIC0_QM0_CP_BARRIER_CFG_0 0xCE03F4 + +#define mmNIC0_QM0_CP_BARRIER_CFG_1 0xCE03F8 + +#define mmNIC0_QM0_CP_BARRIER_CFG_2 0xCE03FC + +#define mmNIC0_QM0_CP_BARRIER_CFG_3 0xCE0400 + +#define mmNIC0_QM0_CP_BARRIER_CFG_4 0xCE0404 + +#define mmNIC0_QM0_CP_DBG_0_0 0xCE0408 + +#define mmNIC0_QM0_CP_DBG_0_1 0xCE040C + +#define mmNIC0_QM0_CP_DBG_0_2 0xCE0410 + +#define mmNIC0_QM0_CP_DBG_0_3 0xCE0414 + +#define mmNIC0_QM0_CP_DBG_0_4 0xCE0418 + +#define mmNIC0_QM0_CP_ARUSER_31_11_0 0xCE041C + +#define mmNIC0_QM0_CP_ARUSER_31_11_1 0xCE0420 + +#define mmNIC0_QM0_CP_ARUSER_31_11_2 0xCE0424 + +#define mmNIC0_QM0_CP_ARUSER_31_11_3 0xCE0428 + +#define mmNIC0_QM0_CP_ARUSER_31_11_4 0xCE042C + +#define mmNIC0_QM0_CP_AWUSER_31_11_0 0xCE0430 + +#define mmNIC0_QM0_CP_AWUSER_31_11_1 0xCE0434 + +#define mmNIC0_QM0_CP_AWUSER_31_11_2 0xCE0438 + +#define mmNIC0_QM0_CP_AWUSER_31_11_3 0xCE043C + +#define mmNIC0_QM0_CP_AWUSER_31_11_4 0xCE0440 + +#define mmNIC0_QM0_ARB_CFG_0 0xCE0A00 + +#define mmNIC0_QM0_ARB_CHOISE_Q_PUSH 0xCE0A04 + +#define mmNIC0_QM0_ARB_WRR_WEIGHT_0 0xCE0A08 + +#define mmNIC0_QM0_ARB_WRR_WEIGHT_1 0xCE0A0C + +#define mmNIC0_QM0_ARB_WRR_WEIGHT_2 0xCE0A10 + +#define mmNIC0_QM0_ARB_WRR_WEIGHT_3 0xCE0A14 + +#define mmNIC0_QM0_ARB_CFG_1 0xCE0A18 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_0 0xCE0A20 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_1 0xCE0A24 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_2 0xCE0A28 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_3 0xCE0A2C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_4 0xCE0A30 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_5 0xCE0A34 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_6 0xCE0A38 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_7 0xCE0A3C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_8 0xCE0A40 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_9 0xCE0A44 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_10 0xCE0A48 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_11 0xCE0A4C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_12 0xCE0A50 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_13 0xCE0A54 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_14 0xCE0A58 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_15 0xCE0A5C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_16 0xCE0A60 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_17 0xCE0A64 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_18 0xCE0A68 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_19 0xCE0A6C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_20 0xCE0A70 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_21 0xCE0A74 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_22 0xCE0A78 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_23 0xCE0A7C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 0xCE0A80 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_25 0xCE0A84 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_26 0xCE0A88 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_27 0xCE0A8C + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_28 0xCE0A90 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_29 0xCE0A94 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_30 0xCE0A98 + +#define mmNIC0_QM0_ARB_MST_AVAIL_CRED_31 0xCE0A9C + +#define mmNIC0_QM0_ARB_MST_CRED_INC 0xCE0AA0 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_0 0xCE0AA4 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_1 0xCE0AA8 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_2 0xCE0AAC + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_3 0xCE0AB0 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_4 0xCE0AB4 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_5 0xCE0AB8 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_6 0xCE0ABC + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_7 0xCE0AC0 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_8 0xCE0AC4 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_9 0xCE0AC8 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_10 0xCE0ACC + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_11 0xCE0AD0 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_12 0xCE0AD4 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_13 0xCE0AD8 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_14 0xCE0ADC + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_15 0xCE0AE0 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_16 0xCE0AE4 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_17 0xCE0AE8 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_18 0xCE0AEC + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_19 0xCE0AF0 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_20 0xCE0AF4 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_21 0xCE0AF8 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_22 0xCE0AFC + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_23 0xCE0B00 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_24 0xCE0B04 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_25 0xCE0B08 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_26 0xCE0B0C + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_27 0xCE0B10 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_28 0xCE0B14 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_29 0xCE0B18 + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_30 0xCE0B1C + +#define mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_31 0xCE0B20 + +#define mmNIC0_QM0_ARB_SLV_MASTER_INC_CRED_OFST 0xCE0B28 + +#define mmNIC0_QM0_ARB_MST_SLAVE_EN 0xCE0B2C + +#define mmNIC0_QM0_ARB_MST_QUIET_PER 0xCE0B34 + +#define mmNIC0_QM0_ARB_SLV_CHOISE_WDT 0xCE0B38 + +#define mmNIC0_QM0_ARB_SLV_ID 0xCE0B3C + +#define mmNIC0_QM0_ARB_MSG_MAX_INFLIGHT 0xCE0B44 + +#define mmNIC0_QM0_ARB_MSG_AWUSER_31_11 0xCE0B48 + +#define mmNIC0_QM0_ARB_MSG_AWUSER_SEC_PROP 0xCE0B4C + +#define mmNIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP 0xCE0B50 + +#define mmNIC0_QM0_ARB_BASE_LO 0xCE0B54 + +#define mmNIC0_QM0_ARB_BASE_HI 0xCE0B58 + +#define mmNIC0_QM0_ARB_STATE_STS 0xCE0B80 + +#define mmNIC0_QM0_ARB_CHOISE_FULLNESS_STS 0xCE0B84 + +#define mmNIC0_QM0_ARB_MSG_STS 0xCE0B88 + +#define mmNIC0_QM0_ARB_SLV_CHOISE_Q_HEAD 0xCE0B8C + +#define mmNIC0_QM0_ARB_ERR_CAUSE 0xCE0B9C + +#define mmNIC0_QM0_ARB_ERR_MSG_EN 0xCE0BA0 + +#define mmNIC0_QM0_ARB_ERR_STS_DRP 0xCE0BA8 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_0 0xCE0BB0 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_1 0xCE0BB4 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_2 0xCE0BB8 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_3 0xCE0BBC + +#define mmNIC0_QM0_ARB_MST_CRED_STS_4 0xCE0BC0 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_5 0xCE0BC4 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_6 0xCE0BC8 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_7 0xCE0BCC + +#define mmNIC0_QM0_ARB_MST_CRED_STS_8 0xCE0BD0 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_9 0xCE0BD4 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_10 0xCE0BD8 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_11 0xCE0BDC + +#define mmNIC0_QM0_ARB_MST_CRED_STS_12 0xCE0BE0 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_13 0xCE0BE4 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_14 0xCE0BE8 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_15 0xCE0BEC + +#define mmNIC0_QM0_ARB_MST_CRED_STS_16 0xCE0BF0 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_17 0xCE0BF4 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_18 0xCE0BF8 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_19 0xCE0BFC + +#define mmNIC0_QM0_ARB_MST_CRED_STS_20 0xCE0C00 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_21 0xCE0C04 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_22 0xCE0C08 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_23 0xCE0C0C + +#define mmNIC0_QM0_ARB_MST_CRED_STS_24 0xCE0C10 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_25 0xCE0C14 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_26 0xCE0C18 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_27 0xCE0C1C + +#define mmNIC0_QM0_ARB_MST_CRED_STS_28 0xCE0C20 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_29 0xCE0C24 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_30 0xCE0C28 + +#define mmNIC0_QM0_ARB_MST_CRED_STS_31 0xCE0C2C + +#define mmNIC0_QM0_CGM_CFG 0xCE0C70 + +#define mmNIC0_QM0_CGM_STS 0xCE0C74 + +#define mmNIC0_QM0_CGM_CFG1 0xCE0C78 + +#define mmNIC0_QM0_LOCAL_RANGE_BASE 0xCE0C80 + +#define mmNIC0_QM0_LOCAL_RANGE_SIZE 0xCE0C84 + +#define mmNIC0_QM0_CSMR_STRICT_PRIO_CFG 0xCE0C90 + +#define mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_1 0xCE0C94 + +#define mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_0 0xCE0C98 + +#define mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_1 0xCE0C9C + +#define mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_0 0xCE0CA0 + +#define mmNIC0_QM0_GLBL_AXCACHE 0xCE0CA4 + +#define mmNIC0_QM0_IND_GW_APB_CFG 0xCE0CB0 + +#define mmNIC0_QM0_IND_GW_APB_WDATA 0xCE0CB4 + +#define mmNIC0_QM0_IND_GW_APB_RDATA 0xCE0CB8 + +#define mmNIC0_QM0_IND_GW_APB_STATUS 0xCE0CBC + +#define mmNIC0_QM0_GLBL_ERR_ADDR_LO 0xCE0CD0 + +#define mmNIC0_QM0_GLBL_ERR_ADDR_HI 0xCE0CD4 + +#define mmNIC0_QM0_GLBL_ERR_WDATA 0xCE0CD8 + +#define mmNIC0_QM0_GLBL_MEM_INIT_BUSY 0xCE0D00 + +#endif /* ASIC_REG_NIC0_QM0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h new file mode 100644 index 000000000000..fe96c575b5c6 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC0_QM1_REGS_H_ +#define ASIC_REG_NIC0_QM1_REGS_H_ + +/* + ***************************************** + * NIC0_QM1 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC0_QM1_GLBL_CFG0 0xCE2000 + +#define mmNIC0_QM1_GLBL_CFG1 0xCE2004 + +#define mmNIC0_QM1_GLBL_PROT 0xCE2008 + +#define mmNIC0_QM1_GLBL_ERR_CFG 0xCE200C + +#define mmNIC0_QM1_GLBL_SECURE_PROPS_0 0xCE2010 + +#define mmNIC0_QM1_GLBL_SECURE_PROPS_1 0xCE2014 + +#define mmNIC0_QM1_GLBL_SECURE_PROPS_2 0xCE2018 + +#define mmNIC0_QM1_GLBL_SECURE_PROPS_3 0xCE201C + +#define mmNIC0_QM1_GLBL_SECURE_PROPS_4 0xCE2020 + +#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0 0xCE2024 + +#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1 0xCE2028 + +#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_2 0xCE202C + +#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_3 0xCE2030 + +#define mmNIC0_QM1_GLBL_NON_SECURE_PROPS_4 0xCE2034 + +#define mmNIC0_QM1_GLBL_STS0 0xCE2038 + +#define mmNIC0_QM1_GLBL_STS1_0 0xCE2040 + +#define mmNIC0_QM1_GLBL_STS1_1 0xCE2044 + +#define mmNIC0_QM1_GLBL_STS1_2 0xCE2048 + +#define mmNIC0_QM1_GLBL_STS1_3 0xCE204C + +#define mmNIC0_QM1_GLBL_STS1_4 0xCE2050 + +#define mmNIC0_QM1_GLBL_MSG_EN_0 0xCE2054 + +#define mmNIC0_QM1_GLBL_MSG_EN_1 0xCE2058 + +#define mmNIC0_QM1_GLBL_MSG_EN_2 0xCE205C + +#define mmNIC0_QM1_GLBL_MSG_EN_3 0xCE2060 + +#define mmNIC0_QM1_GLBL_MSG_EN_4 0xCE2068 + +#define mmNIC0_QM1_PQ_BASE_LO_0 0xCE2070 + +#define mmNIC0_QM1_PQ_BASE_LO_1 0xCE2074 + +#define mmNIC0_QM1_PQ_BASE_LO_2 0xCE2078 + +#define mmNIC0_QM1_PQ_BASE_LO_3 0xCE207C + +#define mmNIC0_QM1_PQ_BASE_HI_0 0xCE2080 + +#define mmNIC0_QM1_PQ_BASE_HI_1 0xCE2084 + +#define mmNIC0_QM1_PQ_BASE_HI_2 0xCE2088 + +#define mmNIC0_QM1_PQ_BASE_HI_3 0xCE208C + +#define mmNIC0_QM1_PQ_SIZE_0 0xCE2090 + +#define mmNIC0_QM1_PQ_SIZE_1 0xCE2094 + +#define mmNIC0_QM1_PQ_SIZE_2 0xCE2098 + +#define mmNIC0_QM1_PQ_SIZE_3 0xCE209C + +#define mmNIC0_QM1_PQ_PI_0 0xCE20A0 + +#define mmNIC0_QM1_PQ_PI_1 0xCE20A4 + +#define mmNIC0_QM1_PQ_PI_2 0xCE20A8 + +#define mmNIC0_QM1_PQ_PI_3 0xCE20AC + +#define mmNIC0_QM1_PQ_CI_0 0xCE20B0 + +#define mmNIC0_QM1_PQ_CI_1 0xCE20B4 + +#define mmNIC0_QM1_PQ_CI_2 0xCE20B8 + +#define mmNIC0_QM1_PQ_CI_3 0xCE20BC + +#define mmNIC0_QM1_PQ_CFG0_0 0xCE20C0 + +#define mmNIC0_QM1_PQ_CFG0_1 0xCE20C4 + +#define mmNIC0_QM1_PQ_CFG0_2 0xCE20C8 + +#define mmNIC0_QM1_PQ_CFG0_3 0xCE20CC + +#define mmNIC0_QM1_PQ_CFG1_0 0xCE20D0 + +#define mmNIC0_QM1_PQ_CFG1_1 0xCE20D4 + +#define mmNIC0_QM1_PQ_CFG1_2 0xCE20D8 + +#define mmNIC0_QM1_PQ_CFG1_3 0xCE20DC + +#define mmNIC0_QM1_PQ_ARUSER_31_11_0 0xCE20E0 + +#define mmNIC0_QM1_PQ_ARUSER_31_11_1 0xCE20E4 + +#define mmNIC0_QM1_PQ_ARUSER_31_11_2 0xCE20E8 + +#define mmNIC0_QM1_PQ_ARUSER_31_11_3 0xCE20EC + +#define mmNIC0_QM1_PQ_STS0_0 0xCE20F0 + +#define mmNIC0_QM1_PQ_STS0_1 0xCE20F4 + +#define mmNIC0_QM1_PQ_STS0_2 0xCE20F8 + +#define mmNIC0_QM1_PQ_STS0_3 0xCE20FC + +#define mmNIC0_QM1_PQ_STS1_0 0xCE2100 + +#define mmNIC0_QM1_PQ_STS1_1 0xCE2104 + +#define mmNIC0_QM1_PQ_STS1_2 0xCE2108 + +#define mmNIC0_QM1_PQ_STS1_3 0xCE210C + +#define mmNIC0_QM1_CQ_CFG0_0 0xCE2110 + +#define mmNIC0_QM1_CQ_CFG0_1 0xCE2114 + +#define mmNIC0_QM1_CQ_CFG0_2 0xCE2118 + +#define mmNIC0_QM1_CQ_CFG0_3 0xCE211C + +#define mmNIC0_QM1_CQ_CFG0_4 0xCE2120 + +#define mmNIC0_QM1_CQ_CFG1_0 0xCE2124 + +#define mmNIC0_QM1_CQ_CFG1_1 0xCE2128 + +#define mmNIC0_QM1_CQ_CFG1_2 0xCE212C + +#define mmNIC0_QM1_CQ_CFG1_3 0xCE2130 + +#define mmNIC0_QM1_CQ_CFG1_4 0xCE2134 + +#define mmNIC0_QM1_CQ_ARUSER_31_11_0 0xCE2138 + +#define mmNIC0_QM1_CQ_ARUSER_31_11_1 0xCE213C + +#define mmNIC0_QM1_CQ_ARUSER_31_11_2 0xCE2140 + +#define mmNIC0_QM1_CQ_ARUSER_31_11_3 0xCE2144 + +#define mmNIC0_QM1_CQ_ARUSER_31_11_4 0xCE2148 + +#define mmNIC0_QM1_CQ_STS0_0 0xCE214C + +#define mmNIC0_QM1_CQ_STS0_1 0xCE2150 + +#define mmNIC0_QM1_CQ_STS0_2 0xCE2154 + +#define mmNIC0_QM1_CQ_STS0_3 0xCE2158 + +#define mmNIC0_QM1_CQ_STS0_4 0xCE215C + +#define mmNIC0_QM1_CQ_STS1_0 0xCE2160 + +#define mmNIC0_QM1_CQ_STS1_1 0xCE2164 + +#define mmNIC0_QM1_CQ_STS1_2 0xCE2168 + +#define mmNIC0_QM1_CQ_STS1_3 0xCE216C + +#define mmNIC0_QM1_CQ_STS1_4 0xCE2170 + +#define mmNIC0_QM1_CQ_PTR_LO_0 0xCE2174 + +#define mmNIC0_QM1_CQ_PTR_HI_0 0xCE2178 + +#define mmNIC0_QM1_CQ_TSIZE_0 0xCE217C + +#define mmNIC0_QM1_CQ_CTL_0 0xCE2180 + +#define mmNIC0_QM1_CQ_PTR_LO_1 0xCE2184 + +#define mmNIC0_QM1_CQ_PTR_HI_1 0xCE2188 + +#define mmNIC0_QM1_CQ_TSIZE_1 0xCE218C + +#define mmNIC0_QM1_CQ_CTL_1 0xCE2190 + +#define mmNIC0_QM1_CQ_PTR_LO_2 0xCE2194 + +#define mmNIC0_QM1_CQ_PTR_HI_2 0xCE2198 + +#define mmNIC0_QM1_CQ_TSIZE_2 0xCE219C + +#define mmNIC0_QM1_CQ_CTL_2 0xCE21A0 + +#define mmNIC0_QM1_CQ_PTR_LO_3 0xCE21A4 + +#define mmNIC0_QM1_CQ_PTR_HI_3 0xCE21A8 + +#define mmNIC0_QM1_CQ_TSIZE_3 0xCE21AC + +#define mmNIC0_QM1_CQ_CTL_3 0xCE21B0 + +#define mmNIC0_QM1_CQ_PTR_LO_4 0xCE21B4 + +#define mmNIC0_QM1_CQ_PTR_HI_4 0xCE21B8 + +#define mmNIC0_QM1_CQ_TSIZE_4 0xCE21BC + +#define mmNIC0_QM1_CQ_CTL_4 0xCE21C0 + +#define mmNIC0_QM1_CQ_PTR_LO_STS_0 0xCE21C4 + +#define mmNIC0_QM1_CQ_PTR_LO_STS_1 0xCE21C8 + +#define mmNIC0_QM1_CQ_PTR_LO_STS_2 0xCE21CC + +#define mmNIC0_QM1_CQ_PTR_LO_STS_3 0xCE21D0 + +#define mmNIC0_QM1_CQ_PTR_LO_STS_4 0xCE21D4 + +#define mmNIC0_QM1_CQ_PTR_HI_STS_0 0xCE21D8 + +#define mmNIC0_QM1_CQ_PTR_HI_STS_1 0xCE21DC + +#define mmNIC0_QM1_CQ_PTR_HI_STS_2 0xCE21E0 + +#define mmNIC0_QM1_CQ_PTR_HI_STS_3 0xCE21E4 + +#define mmNIC0_QM1_CQ_PTR_HI_STS_4 0xCE21E8 + +#define mmNIC0_QM1_CQ_TSIZE_STS_0 0xCE21EC + +#define mmNIC0_QM1_CQ_TSIZE_STS_1 0xCE21F0 + +#define mmNIC0_QM1_CQ_TSIZE_STS_2 0xCE21F4 + +#define mmNIC0_QM1_CQ_TSIZE_STS_3 0xCE21F8 + +#define mmNIC0_QM1_CQ_TSIZE_STS_4 0xCE21FC + +#define mmNIC0_QM1_CQ_CTL_STS_0 0xCE2200 + +#define mmNIC0_QM1_CQ_CTL_STS_1 0xCE2204 + +#define mmNIC0_QM1_CQ_CTL_STS_2 0xCE2208 + +#define mmNIC0_QM1_CQ_CTL_STS_3 0xCE220C + +#define mmNIC0_QM1_CQ_CTL_STS_4 0xCE2210 + +#define mmNIC0_QM1_CQ_IFIFO_CNT_0 0xCE2214 + +#define mmNIC0_QM1_CQ_IFIFO_CNT_1 0xCE2218 + +#define mmNIC0_QM1_CQ_IFIFO_CNT_2 0xCE221C + +#define mmNIC0_QM1_CQ_IFIFO_CNT_3 0xCE2220 + +#define mmNIC0_QM1_CQ_IFIFO_CNT_4 0xCE2224 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_0 0xCE2228 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_1 0xCE222C + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_2 0xCE2230 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_3 0xCE2234 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_4 0xCE2238 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_0 0xCE223C + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_1 0xCE2240 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_2 0xCE2244 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_3 0xCE2248 + +#define mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_4 0xCE224C + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_0 0xCE2250 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_1 0xCE2254 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_2 0xCE2258 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_3 0xCE225C + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_4 0xCE2260 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_0 0xCE2264 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_1 0xCE2268 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_2 0xCE226C + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_3 0xCE2270 + +#define mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_4 0xCE2274 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_0 0xCE2278 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_1 0xCE227C + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 0xCE2280 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_3 0xCE2284 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_4 0xCE2288 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_0 0xCE228C + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_1 0xCE2290 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_2 0xCE2294 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_3 0xCE2298 + +#define mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_4 0xCE229C + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_0 0xCE22A0 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_1 0xCE22A4 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_2 0xCE22A8 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_3 0xCE22AC + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_4 0xCE22B0 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_0 0xCE22B4 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_1 0xCE22B8 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_2 0xCE22BC + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_3 0xCE22C0 + +#define mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_4 0xCE22C4 + +#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_0 0xCE22C8 + +#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_1 0xCE22CC + +#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_2 0xCE22D0 + +#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_3 0xCE22D4 + +#define mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_4 0xCE22D8 + +#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xCE22E0 + +#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xCE22E4 + +#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xCE22E8 + +#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xCE22EC + +#define mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xCE22F0 + +#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 0xCE22F4 + +#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 0xCE22F8 + +#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 0xCE22FC + +#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 0xCE2300 + +#define mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 0xCE2304 + +#define mmNIC0_QM1_CP_FENCE0_RDATA_0 0xCE2308 + +#define mmNIC0_QM1_CP_FENCE0_RDATA_1 0xCE230C + +#define mmNIC0_QM1_CP_FENCE0_RDATA_2 0xCE2310 + +#define mmNIC0_QM1_CP_FENCE0_RDATA_3 0xCE2314 + +#define mmNIC0_QM1_CP_FENCE0_RDATA_4 0xCE2318 + +#define mmNIC0_QM1_CP_FENCE1_RDATA_0 0xCE231C + +#define mmNIC0_QM1_CP_FENCE1_RDATA_1 0xCE2320 + +#define mmNIC0_QM1_CP_FENCE1_RDATA_2 0xCE2324 + +#define mmNIC0_QM1_CP_FENCE1_RDATA_3 0xCE2328 + +#define mmNIC0_QM1_CP_FENCE1_RDATA_4 0xCE232C + +#define mmNIC0_QM1_CP_FENCE2_RDATA_0 0xCE2330 + +#define mmNIC0_QM1_CP_FENCE2_RDATA_1 0xCE2334 + +#define mmNIC0_QM1_CP_FENCE2_RDATA_2 0xCE2338 + +#define mmNIC0_QM1_CP_FENCE2_RDATA_3 0xCE233C + +#define mmNIC0_QM1_CP_FENCE2_RDATA_4 0xCE2340 + +#define mmNIC0_QM1_CP_FENCE3_RDATA_0 0xCE2344 + +#define mmNIC0_QM1_CP_FENCE3_RDATA_1 0xCE2348 + +#define mmNIC0_QM1_CP_FENCE3_RDATA_2 0xCE234C + +#define mmNIC0_QM1_CP_FENCE3_RDATA_3 0xCE2350 + +#define mmNIC0_QM1_CP_FENCE3_RDATA_4 0xCE2354 + +#define mmNIC0_QM1_CP_FENCE0_CNT_0 0xCE2358 + +#define mmNIC0_QM1_CP_FENCE0_CNT_1 0xCE235C + +#define mmNIC0_QM1_CP_FENCE0_CNT_2 0xCE2360 + +#define mmNIC0_QM1_CP_FENCE0_CNT_3 0xCE2364 + +#define mmNIC0_QM1_CP_FENCE0_CNT_4 0xCE2368 + +#define mmNIC0_QM1_CP_FENCE1_CNT_0 0xCE236C + +#define mmNIC0_QM1_CP_FENCE1_CNT_1 0xCE2370 + +#define mmNIC0_QM1_CP_FENCE1_CNT_2 0xCE2374 + +#define mmNIC0_QM1_CP_FENCE1_CNT_3 0xCE2378 + +#define mmNIC0_QM1_CP_FENCE1_CNT_4 0xCE237C + +#define mmNIC0_QM1_CP_FENCE2_CNT_0 0xCE2380 + +#define mmNIC0_QM1_CP_FENCE2_CNT_1 0xCE2384 + +#define mmNIC0_QM1_CP_FENCE2_CNT_2 0xCE2388 + +#define mmNIC0_QM1_CP_FENCE2_CNT_3 0xCE238C + +#define mmNIC0_QM1_CP_FENCE2_CNT_4 0xCE2390 + +#define mmNIC0_QM1_CP_FENCE3_CNT_0 0xCE2394 + +#define mmNIC0_QM1_CP_FENCE3_CNT_1 0xCE2398 + +#define mmNIC0_QM1_CP_FENCE3_CNT_2 0xCE239C + +#define mmNIC0_QM1_CP_FENCE3_CNT_3 0xCE23A0 + +#define mmNIC0_QM1_CP_FENCE3_CNT_4 0xCE23A4 + +#define mmNIC0_QM1_CP_STS_0 0xCE23A8 + +#define mmNIC0_QM1_CP_STS_1 0xCE23AC + +#define mmNIC0_QM1_CP_STS_2 0xCE23B0 + +#define mmNIC0_QM1_CP_STS_3 0xCE23B4 + +#define mmNIC0_QM1_CP_STS_4 0xCE23B8 + +#define mmNIC0_QM1_CP_CURRENT_INST_LO_0 0xCE23BC + +#define mmNIC0_QM1_CP_CURRENT_INST_LO_1 0xCE23C0 + +#define mmNIC0_QM1_CP_CURRENT_INST_LO_2 0xCE23C4 + +#define mmNIC0_QM1_CP_CURRENT_INST_LO_3 0xCE23C8 + +#define mmNIC0_QM1_CP_CURRENT_INST_LO_4 0xCE23CC + +#define mmNIC0_QM1_CP_CURRENT_INST_HI_0 0xCE23D0 + +#define mmNIC0_QM1_CP_CURRENT_INST_HI_1 0xCE23D4 + +#define mmNIC0_QM1_CP_CURRENT_INST_HI_2 0xCE23D8 + +#define mmNIC0_QM1_CP_CURRENT_INST_HI_3 0xCE23DC + +#define mmNIC0_QM1_CP_CURRENT_INST_HI_4 0xCE23E0 + +#define mmNIC0_QM1_CP_BARRIER_CFG_0 0xCE23F4 + +#define mmNIC0_QM1_CP_BARRIER_CFG_1 0xCE23F8 + +#define mmNIC0_QM1_CP_BARRIER_CFG_2 0xCE23FC + +#define mmNIC0_QM1_CP_BARRIER_CFG_3 0xCE2400 + +#define mmNIC0_QM1_CP_BARRIER_CFG_4 0xCE2404 + +#define mmNIC0_QM1_CP_DBG_0_0 0xCE2408 + +#define mmNIC0_QM1_CP_DBG_0_1 0xCE240C + +#define mmNIC0_QM1_CP_DBG_0_2 0xCE2410 + +#define mmNIC0_QM1_CP_DBG_0_3 0xCE2414 + +#define mmNIC0_QM1_CP_DBG_0_4 0xCE2418 + +#define mmNIC0_QM1_CP_ARUSER_31_11_0 0xCE241C + +#define mmNIC0_QM1_CP_ARUSER_31_11_1 0xCE2420 + +#define mmNIC0_QM1_CP_ARUSER_31_11_2 0xCE2424 + +#define mmNIC0_QM1_CP_ARUSER_31_11_3 0xCE2428 + +#define mmNIC0_QM1_CP_ARUSER_31_11_4 0xCE242C + +#define mmNIC0_QM1_CP_AWUSER_31_11_0 0xCE2430 + +#define mmNIC0_QM1_CP_AWUSER_31_11_1 0xCE2434 + +#define mmNIC0_QM1_CP_AWUSER_31_11_2 0xCE2438 + +#define mmNIC0_QM1_CP_AWUSER_31_11_3 0xCE243C + +#define mmNIC0_QM1_CP_AWUSER_31_11_4 0xCE2440 + +#define mmNIC0_QM1_ARB_CFG_0 0xCE2A00 + +#define mmNIC0_QM1_ARB_CHOISE_Q_PUSH 0xCE2A04 + +#define mmNIC0_QM1_ARB_WRR_WEIGHT_0 0xCE2A08 + +#define mmNIC0_QM1_ARB_WRR_WEIGHT_1 0xCE2A0C + +#define mmNIC0_QM1_ARB_WRR_WEIGHT_2 0xCE2A10 + +#define mmNIC0_QM1_ARB_WRR_WEIGHT_3 0xCE2A14 + +#define mmNIC0_QM1_ARB_CFG_1 0xCE2A18 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_0 0xCE2A20 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_1 0xCE2A24 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_2 0xCE2A28 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_3 0xCE2A2C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_4 0xCE2A30 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_5 0xCE2A34 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_6 0xCE2A38 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_7 0xCE2A3C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_8 0xCE2A40 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_9 0xCE2A44 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_10 0xCE2A48 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_11 0xCE2A4C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_12 0xCE2A50 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_13 0xCE2A54 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_14 0xCE2A58 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_15 0xCE2A5C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_16 0xCE2A60 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_17 0xCE2A64 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_18 0xCE2A68 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_19 0xCE2A6C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_20 0xCE2A70 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_21 0xCE2A74 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_22 0xCE2A78 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_23 0xCE2A7C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 0xCE2A80 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_25 0xCE2A84 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_26 0xCE2A88 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_27 0xCE2A8C + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_28 0xCE2A90 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_29 0xCE2A94 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_30 0xCE2A98 + +#define mmNIC0_QM1_ARB_MST_AVAIL_CRED_31 0xCE2A9C + +#define mmNIC0_QM1_ARB_MST_CRED_INC 0xCE2AA0 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_0 0xCE2AA4 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_1 0xCE2AA8 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_2 0xCE2AAC + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_3 0xCE2AB0 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_4 0xCE2AB4 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_5 0xCE2AB8 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_6 0xCE2ABC + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_7 0xCE2AC0 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_8 0xCE2AC4 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_9 0xCE2AC8 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_10 0xCE2ACC + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_11 0xCE2AD0 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_12 0xCE2AD4 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_13 0xCE2AD8 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_14 0xCE2ADC + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_15 0xCE2AE0 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_16 0xCE2AE4 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_17 0xCE2AE8 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_18 0xCE2AEC + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_19 0xCE2AF0 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_20 0xCE2AF4 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_21 0xCE2AF8 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_22 0xCE2AFC + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_23 0xCE2B00 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_24 0xCE2B04 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_25 0xCE2B08 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_26 0xCE2B0C + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_27 0xCE2B10 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_28 0xCE2B14 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_29 0xCE2B18 + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_30 0xCE2B1C + +#define mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_31 0xCE2B20 + +#define mmNIC0_QM1_ARB_SLV_MASTER_INC_CRED_OFST 0xCE2B28 + +#define mmNIC0_QM1_ARB_MST_SLAVE_EN 0xCE2B2C + +#define mmNIC0_QM1_ARB_MST_QUIET_PER 0xCE2B34 + +#define mmNIC0_QM1_ARB_SLV_CHOISE_WDT 0xCE2B38 + +#define mmNIC0_QM1_ARB_SLV_ID 0xCE2B3C + +#define mmNIC0_QM1_ARB_MSG_MAX_INFLIGHT 0xCE2B44 + +#define mmNIC0_QM1_ARB_MSG_AWUSER_31_11 0xCE2B48 + +#define mmNIC0_QM1_ARB_MSG_AWUSER_SEC_PROP 0xCE2B4C + +#define mmNIC0_QM1_ARB_MSG_AWUSER_NON_SEC_PROP 0xCE2B50 + +#define mmNIC0_QM1_ARB_BASE_LO 0xCE2B54 + +#define mmNIC0_QM1_ARB_BASE_HI 0xCE2B58 + +#define mmNIC0_QM1_ARB_STATE_STS 0xCE2B80 + +#define mmNIC0_QM1_ARB_CHOISE_FULLNESS_STS 0xCE2B84 + +#define mmNIC0_QM1_ARB_MSG_STS 0xCE2B88 + +#define mmNIC0_QM1_ARB_SLV_CHOISE_Q_HEAD 0xCE2B8C + +#define mmNIC0_QM1_ARB_ERR_CAUSE 0xCE2B9C + +#define mmNIC0_QM1_ARB_ERR_MSG_EN 0xCE2BA0 + +#define mmNIC0_QM1_ARB_ERR_STS_DRP 0xCE2BA8 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_0 0xCE2BB0 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_1 0xCE2BB4 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_2 0xCE2BB8 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_3 0xCE2BBC + +#define mmNIC0_QM1_ARB_MST_CRED_STS_4 0xCE2BC0 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_5 0xCE2BC4 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_6 0xCE2BC8 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_7 0xCE2BCC + +#define mmNIC0_QM1_ARB_MST_CRED_STS_8 0xCE2BD0 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_9 0xCE2BD4 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_10 0xCE2BD8 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_11 0xCE2BDC + +#define mmNIC0_QM1_ARB_MST_CRED_STS_12 0xCE2BE0 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_13 0xCE2BE4 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_14 0xCE2BE8 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_15 0xCE2BEC + +#define mmNIC0_QM1_ARB_MST_CRED_STS_16 0xCE2BF0 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_17 0xCE2BF4 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_18 0xCE2BF8 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_19 0xCE2BFC + +#define mmNIC0_QM1_ARB_MST_CRED_STS_20 0xCE2C00 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_21 0xCE2C04 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_22 0xCE2C08 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_23 0xCE2C0C + +#define mmNIC0_QM1_ARB_MST_CRED_STS_24 0xCE2C10 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_25 0xCE2C14 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_26 0xCE2C18 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_27 0xCE2C1C + +#define mmNIC0_QM1_ARB_MST_CRED_STS_28 0xCE2C20 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_29 0xCE2C24 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_30 0xCE2C28 + +#define mmNIC0_QM1_ARB_MST_CRED_STS_31 0xCE2C2C + +#define mmNIC0_QM1_CGM_CFG 0xCE2C70 + +#define mmNIC0_QM1_CGM_STS 0xCE2C74 + +#define mmNIC0_QM1_CGM_CFG1 0xCE2C78 + +#define mmNIC0_QM1_LOCAL_RANGE_BASE 0xCE2C80 + +#define mmNIC0_QM1_LOCAL_RANGE_SIZE 0xCE2C84 + +#define mmNIC0_QM1_CSMR_STRICT_PRIO_CFG 0xCE2C90 + +#define mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_1 0xCE2C94 + +#define mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_0 0xCE2C98 + +#define mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_1 0xCE2C9C + +#define mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_0 0xCE2CA0 + +#define mmNIC0_QM1_GLBL_AXCACHE 0xCE2CA4 + +#define mmNIC0_QM1_IND_GW_APB_CFG 0xCE2CB0 + +#define mmNIC0_QM1_IND_GW_APB_WDATA 0xCE2CB4 + +#define mmNIC0_QM1_IND_GW_APB_RDATA 0xCE2CB8 + +#define mmNIC0_QM1_IND_GW_APB_STATUS 0xCE2CBC + +#define mmNIC0_QM1_GLBL_ERR_ADDR_LO 0xCE2CD0 + +#define mmNIC0_QM1_GLBL_ERR_ADDR_HI 0xCE2CD4 + +#define mmNIC0_QM1_GLBL_ERR_WDATA 0xCE2CD8 + +#define mmNIC0_QM1_GLBL_MEM_INIT_BUSY 0xCE2D00 + +#endif /* ASIC_REG_NIC0_QM1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h new file mode 100644 index 000000000000..0d1caf057ad0 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC1_QM0_REGS_H_ +#define ASIC_REG_NIC1_QM0_REGS_H_ + +/* + ***************************************** + * NIC1_QM0 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC1_QM0_GLBL_CFG0 0xD20000 + +#define mmNIC1_QM0_GLBL_CFG1 0xD20004 + +#define mmNIC1_QM0_GLBL_PROT 0xD20008 + +#define mmNIC1_QM0_GLBL_ERR_CFG 0xD2000C + +#define mmNIC1_QM0_GLBL_SECURE_PROPS_0 0xD20010 + +#define mmNIC1_QM0_GLBL_SECURE_PROPS_1 0xD20014 + +#define mmNIC1_QM0_GLBL_SECURE_PROPS_2 0xD20018 + +#define mmNIC1_QM0_GLBL_SECURE_PROPS_3 0xD2001C + +#define mmNIC1_QM0_GLBL_SECURE_PROPS_4 0xD20020 + +#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0 0xD20024 + +#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1 0xD20028 + +#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_2 0xD2002C + +#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_3 0xD20030 + +#define mmNIC1_QM0_GLBL_NON_SECURE_PROPS_4 0xD20034 + +#define mmNIC1_QM0_GLBL_STS0 0xD20038 + +#define mmNIC1_QM0_GLBL_STS1_0 0xD20040 + +#define mmNIC1_QM0_GLBL_STS1_1 0xD20044 + +#define mmNIC1_QM0_GLBL_STS1_2 0xD20048 + +#define mmNIC1_QM0_GLBL_STS1_3 0xD2004C + +#define mmNIC1_QM0_GLBL_STS1_4 0xD20050 + +#define mmNIC1_QM0_GLBL_MSG_EN_0 0xD20054 + +#define mmNIC1_QM0_GLBL_MSG_EN_1 0xD20058 + +#define mmNIC1_QM0_GLBL_MSG_EN_2 0xD2005C + +#define mmNIC1_QM0_GLBL_MSG_EN_3 0xD20060 + +#define mmNIC1_QM0_GLBL_MSG_EN_4 0xD20068 + +#define mmNIC1_QM0_PQ_BASE_LO_0 0xD20070 + +#define mmNIC1_QM0_PQ_BASE_LO_1 0xD20074 + +#define mmNIC1_QM0_PQ_BASE_LO_2 0xD20078 + +#define mmNIC1_QM0_PQ_BASE_LO_3 0xD2007C + +#define mmNIC1_QM0_PQ_BASE_HI_0 0xD20080 + +#define mmNIC1_QM0_PQ_BASE_HI_1 0xD20084 + +#define mmNIC1_QM0_PQ_BASE_HI_2 0xD20088 + +#define mmNIC1_QM0_PQ_BASE_HI_3 0xD2008C + +#define mmNIC1_QM0_PQ_SIZE_0 0xD20090 + +#define mmNIC1_QM0_PQ_SIZE_1 0xD20094 + +#define mmNIC1_QM0_PQ_SIZE_2 0xD20098 + +#define mmNIC1_QM0_PQ_SIZE_3 0xD2009C + +#define mmNIC1_QM0_PQ_PI_0 0xD200A0 + +#define mmNIC1_QM0_PQ_PI_1 0xD200A4 + +#define mmNIC1_QM0_PQ_PI_2 0xD200A8 + +#define mmNIC1_QM0_PQ_PI_3 0xD200AC + +#define mmNIC1_QM0_PQ_CI_0 0xD200B0 + +#define mmNIC1_QM0_PQ_CI_1 0xD200B4 + +#define mmNIC1_QM0_PQ_CI_2 0xD200B8 + +#define mmNIC1_QM0_PQ_CI_3 0xD200BC + +#define mmNIC1_QM0_PQ_CFG0_0 0xD200C0 + +#define mmNIC1_QM0_PQ_CFG0_1 0xD200C4 + +#define mmNIC1_QM0_PQ_CFG0_2 0xD200C8 + +#define mmNIC1_QM0_PQ_CFG0_3 0xD200CC + +#define mmNIC1_QM0_PQ_CFG1_0 0xD200D0 + +#define mmNIC1_QM0_PQ_CFG1_1 0xD200D4 + +#define mmNIC1_QM0_PQ_CFG1_2 0xD200D8 + +#define mmNIC1_QM0_PQ_CFG1_3 0xD200DC + +#define mmNIC1_QM0_PQ_ARUSER_31_11_0 0xD200E0 + +#define mmNIC1_QM0_PQ_ARUSER_31_11_1 0xD200E4 + +#define mmNIC1_QM0_PQ_ARUSER_31_11_2 0xD200E8 + +#define mmNIC1_QM0_PQ_ARUSER_31_11_3 0xD200EC + +#define mmNIC1_QM0_PQ_STS0_0 0xD200F0 + +#define mmNIC1_QM0_PQ_STS0_1 0xD200F4 + +#define mmNIC1_QM0_PQ_STS0_2 0xD200F8 + +#define mmNIC1_QM0_PQ_STS0_3 0xD200FC + +#define mmNIC1_QM0_PQ_STS1_0 0xD20100 + +#define mmNIC1_QM0_PQ_STS1_1 0xD20104 + +#define mmNIC1_QM0_PQ_STS1_2 0xD20108 + +#define mmNIC1_QM0_PQ_STS1_3 0xD2010C + +#define mmNIC1_QM0_CQ_CFG0_0 0xD20110 + +#define mmNIC1_QM0_CQ_CFG0_1 0xD20114 + +#define mmNIC1_QM0_CQ_CFG0_2 0xD20118 + +#define mmNIC1_QM0_CQ_CFG0_3 0xD2011C + +#define mmNIC1_QM0_CQ_CFG0_4 0xD20120 + +#define mmNIC1_QM0_CQ_CFG1_0 0xD20124 + +#define mmNIC1_QM0_CQ_CFG1_1 0xD20128 + +#define mmNIC1_QM0_CQ_CFG1_2 0xD2012C + +#define mmNIC1_QM0_CQ_CFG1_3 0xD20130 + +#define mmNIC1_QM0_CQ_CFG1_4 0xD20134 + +#define mmNIC1_QM0_CQ_ARUSER_31_11_0 0xD20138 + +#define mmNIC1_QM0_CQ_ARUSER_31_11_1 0xD2013C + +#define mmNIC1_QM0_CQ_ARUSER_31_11_2 0xD20140 + +#define mmNIC1_QM0_CQ_ARUSER_31_11_3 0xD20144 + +#define mmNIC1_QM0_CQ_ARUSER_31_11_4 0xD20148 + +#define mmNIC1_QM0_CQ_STS0_0 0xD2014C + +#define mmNIC1_QM0_CQ_STS0_1 0xD20150 + +#define mmNIC1_QM0_CQ_STS0_2 0xD20154 + +#define mmNIC1_QM0_CQ_STS0_3 0xD20158 + +#define mmNIC1_QM0_CQ_STS0_4 0xD2015C + +#define mmNIC1_QM0_CQ_STS1_0 0xD20160 + +#define mmNIC1_QM0_CQ_STS1_1 0xD20164 + +#define mmNIC1_QM0_CQ_STS1_2 0xD20168 + +#define mmNIC1_QM0_CQ_STS1_3 0xD2016C + +#define mmNIC1_QM0_CQ_STS1_4 0xD20170 + +#define mmNIC1_QM0_CQ_PTR_LO_0 0xD20174 + +#define mmNIC1_QM0_CQ_PTR_HI_0 0xD20178 + +#define mmNIC1_QM0_CQ_TSIZE_0 0xD2017C + +#define mmNIC1_QM0_CQ_CTL_0 0xD20180 + +#define mmNIC1_QM0_CQ_PTR_LO_1 0xD20184 + +#define mmNIC1_QM0_CQ_PTR_HI_1 0xD20188 + +#define mmNIC1_QM0_CQ_TSIZE_1 0xD2018C + +#define mmNIC1_QM0_CQ_CTL_1 0xD20190 + +#define mmNIC1_QM0_CQ_PTR_LO_2 0xD20194 + +#define mmNIC1_QM0_CQ_PTR_HI_2 0xD20198 + +#define mmNIC1_QM0_CQ_TSIZE_2 0xD2019C + +#define mmNIC1_QM0_CQ_CTL_2 0xD201A0 + +#define mmNIC1_QM0_CQ_PTR_LO_3 0xD201A4 + +#define mmNIC1_QM0_CQ_PTR_HI_3 0xD201A8 + +#define mmNIC1_QM0_CQ_TSIZE_3 0xD201AC + +#define mmNIC1_QM0_CQ_CTL_3 0xD201B0 + +#define mmNIC1_QM0_CQ_PTR_LO_4 0xD201B4 + +#define mmNIC1_QM0_CQ_PTR_HI_4 0xD201B8 + +#define mmNIC1_QM0_CQ_TSIZE_4 0xD201BC + +#define mmNIC1_QM0_CQ_CTL_4 0xD201C0 + +#define mmNIC1_QM0_CQ_PTR_LO_STS_0 0xD201C4 + +#define mmNIC1_QM0_CQ_PTR_LO_STS_1 0xD201C8 + +#define mmNIC1_QM0_CQ_PTR_LO_STS_2 0xD201CC + +#define mmNIC1_QM0_CQ_PTR_LO_STS_3 0xD201D0 + +#define mmNIC1_QM0_CQ_PTR_LO_STS_4 0xD201D4 + +#define mmNIC1_QM0_CQ_PTR_HI_STS_0 0xD201D8 + +#define mmNIC1_QM0_CQ_PTR_HI_STS_1 0xD201DC + +#define mmNIC1_QM0_CQ_PTR_HI_STS_2 0xD201E0 + +#define mmNIC1_QM0_CQ_PTR_HI_STS_3 0xD201E4 + +#define mmNIC1_QM0_CQ_PTR_HI_STS_4 0xD201E8 + +#define mmNIC1_QM0_CQ_TSIZE_STS_0 0xD201EC + +#define mmNIC1_QM0_CQ_TSIZE_STS_1 0xD201F0 + +#define mmNIC1_QM0_CQ_TSIZE_STS_2 0xD201F4 + +#define mmNIC1_QM0_CQ_TSIZE_STS_3 0xD201F8 + +#define mmNIC1_QM0_CQ_TSIZE_STS_4 0xD201FC + +#define mmNIC1_QM0_CQ_CTL_STS_0 0xD20200 + +#define mmNIC1_QM0_CQ_CTL_STS_1 0xD20204 + +#define mmNIC1_QM0_CQ_CTL_STS_2 0xD20208 + +#define mmNIC1_QM0_CQ_CTL_STS_3 0xD2020C + +#define mmNIC1_QM0_CQ_CTL_STS_4 0xD20210 + +#define mmNIC1_QM0_CQ_IFIFO_CNT_0 0xD20214 + +#define mmNIC1_QM0_CQ_IFIFO_CNT_1 0xD20218 + +#define mmNIC1_QM0_CQ_IFIFO_CNT_2 0xD2021C + +#define mmNIC1_QM0_CQ_IFIFO_CNT_3 0xD20220 + +#define mmNIC1_QM0_CQ_IFIFO_CNT_4 0xD20224 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_0 0xD20228 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_1 0xD2022C + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_2 0xD20230 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_3 0xD20234 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_4 0xD20238 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_0 0xD2023C + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_1 0xD20240 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_2 0xD20244 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_3 0xD20248 + +#define mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_4 0xD2024C + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_0 0xD20250 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_1 0xD20254 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_2 0xD20258 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_3 0xD2025C + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_4 0xD20260 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_0 0xD20264 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_1 0xD20268 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_2 0xD2026C + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_3 0xD20270 + +#define mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_4 0xD20274 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_0 0xD20278 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_1 0xD2027C + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 0xD20280 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_3 0xD20284 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_4 0xD20288 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_0 0xD2028C + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_1 0xD20290 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_2 0xD20294 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_3 0xD20298 + +#define mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_4 0xD2029C + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_0 0xD202A0 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_1 0xD202A4 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_2 0xD202A8 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_3 0xD202AC + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_4 0xD202B0 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_0 0xD202B4 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_1 0xD202B8 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_2 0xD202BC + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_3 0xD202C0 + +#define mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_4 0xD202C4 + +#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_0 0xD202C8 + +#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_1 0xD202CC + +#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_2 0xD202D0 + +#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_3 0xD202D4 + +#define mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_4 0xD202D8 + +#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xD202E0 + +#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xD202E4 + +#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xD202E8 + +#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xD202EC + +#define mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xD202F0 + +#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 0xD202F4 + +#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 0xD202F8 + +#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 0xD202FC + +#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 0xD20300 + +#define mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 0xD20304 + +#define mmNIC1_QM0_CP_FENCE0_RDATA_0 0xD20308 + +#define mmNIC1_QM0_CP_FENCE0_RDATA_1 0xD2030C + +#define mmNIC1_QM0_CP_FENCE0_RDATA_2 0xD20310 + +#define mmNIC1_QM0_CP_FENCE0_RDATA_3 0xD20314 + +#define mmNIC1_QM0_CP_FENCE0_RDATA_4 0xD20318 + +#define mmNIC1_QM0_CP_FENCE1_RDATA_0 0xD2031C + +#define mmNIC1_QM0_CP_FENCE1_RDATA_1 0xD20320 + +#define mmNIC1_QM0_CP_FENCE1_RDATA_2 0xD20324 + +#define mmNIC1_QM0_CP_FENCE1_RDATA_3 0xD20328 + +#define mmNIC1_QM0_CP_FENCE1_RDATA_4 0xD2032C + +#define mmNIC1_QM0_CP_FENCE2_RDATA_0 0xD20330 + +#define mmNIC1_QM0_CP_FENCE2_RDATA_1 0xD20334 + +#define mmNIC1_QM0_CP_FENCE2_RDATA_2 0xD20338 + +#define mmNIC1_QM0_CP_FENCE2_RDATA_3 0xD2033C + +#define mmNIC1_QM0_CP_FENCE2_RDATA_4 0xD20340 + +#define mmNIC1_QM0_CP_FENCE3_RDATA_0 0xD20344 + +#define mmNIC1_QM0_CP_FENCE3_RDATA_1 0xD20348 + +#define mmNIC1_QM0_CP_FENCE3_RDATA_2 0xD2034C + +#define mmNIC1_QM0_CP_FENCE3_RDATA_3 0xD20350 + +#define mmNIC1_QM0_CP_FENCE3_RDATA_4 0xD20354 + +#define mmNIC1_QM0_CP_FENCE0_CNT_0 0xD20358 + +#define mmNIC1_QM0_CP_FENCE0_CNT_1 0xD2035C + +#define mmNIC1_QM0_CP_FENCE0_CNT_2 0xD20360 + +#define mmNIC1_QM0_CP_FENCE0_CNT_3 0xD20364 + +#define mmNIC1_QM0_CP_FENCE0_CNT_4 0xD20368 + +#define mmNIC1_QM0_CP_FENCE1_CNT_0 0xD2036C + +#define mmNIC1_QM0_CP_FENCE1_CNT_1 0xD20370 + +#define mmNIC1_QM0_CP_FENCE1_CNT_2 0xD20374 + +#define mmNIC1_QM0_CP_FENCE1_CNT_3 0xD20378 + +#define mmNIC1_QM0_CP_FENCE1_CNT_4 0xD2037C + +#define mmNIC1_QM0_CP_FENCE2_CNT_0 0xD20380 + +#define mmNIC1_QM0_CP_FENCE2_CNT_1 0xD20384 + +#define mmNIC1_QM0_CP_FENCE2_CNT_2 0xD20388 + +#define mmNIC1_QM0_CP_FENCE2_CNT_3 0xD2038C + +#define mmNIC1_QM0_CP_FENCE2_CNT_4 0xD20390 + +#define mmNIC1_QM0_CP_FENCE3_CNT_0 0xD20394 + +#define mmNIC1_QM0_CP_FENCE3_CNT_1 0xD20398 + +#define mmNIC1_QM0_CP_FENCE3_CNT_2 0xD2039C + +#define mmNIC1_QM0_CP_FENCE3_CNT_3 0xD203A0 + +#define mmNIC1_QM0_CP_FENCE3_CNT_4 0xD203A4 + +#define mmNIC1_QM0_CP_STS_0 0xD203A8 + +#define mmNIC1_QM0_CP_STS_1 0xD203AC + +#define mmNIC1_QM0_CP_STS_2 0xD203B0 + +#define mmNIC1_QM0_CP_STS_3 0xD203B4 + +#define mmNIC1_QM0_CP_STS_4 0xD203B8 + +#define mmNIC1_QM0_CP_CURRENT_INST_LO_0 0xD203BC + +#define mmNIC1_QM0_CP_CURRENT_INST_LO_1 0xD203C0 + +#define mmNIC1_QM0_CP_CURRENT_INST_LO_2 0xD203C4 + +#define mmNIC1_QM0_CP_CURRENT_INST_LO_3 0xD203C8 + +#define mmNIC1_QM0_CP_CURRENT_INST_LO_4 0xD203CC + +#define mmNIC1_QM0_CP_CURRENT_INST_HI_0 0xD203D0 + +#define mmNIC1_QM0_CP_CURRENT_INST_HI_1 0xD203D4 + +#define mmNIC1_QM0_CP_CURRENT_INST_HI_2 0xD203D8 + +#define mmNIC1_QM0_CP_CURRENT_INST_HI_3 0xD203DC + +#define mmNIC1_QM0_CP_CURRENT_INST_HI_4 0xD203E0 + +#define mmNIC1_QM0_CP_BARRIER_CFG_0 0xD203F4 + +#define mmNIC1_QM0_CP_BARRIER_CFG_1 0xD203F8 + +#define mmNIC1_QM0_CP_BARRIER_CFG_2 0xD203FC + +#define mmNIC1_QM0_CP_BARRIER_CFG_3 0xD20400 + +#define mmNIC1_QM0_CP_BARRIER_CFG_4 0xD20404 + +#define mmNIC1_QM0_CP_DBG_0_0 0xD20408 + +#define mmNIC1_QM0_CP_DBG_0_1 0xD2040C + +#define mmNIC1_QM0_CP_DBG_0_2 0xD20410 + +#define mmNIC1_QM0_CP_DBG_0_3 0xD20414 + +#define mmNIC1_QM0_CP_DBG_0_4 0xD20418 + +#define mmNIC1_QM0_CP_ARUSER_31_11_0 0xD2041C + +#define mmNIC1_QM0_CP_ARUSER_31_11_1 0xD20420 + +#define mmNIC1_QM0_CP_ARUSER_31_11_2 0xD20424 + +#define mmNIC1_QM0_CP_ARUSER_31_11_3 0xD20428 + +#define mmNIC1_QM0_CP_ARUSER_31_11_4 0xD2042C + +#define mmNIC1_QM0_CP_AWUSER_31_11_0 0xD20430 + +#define mmNIC1_QM0_CP_AWUSER_31_11_1 0xD20434 + +#define mmNIC1_QM0_CP_AWUSER_31_11_2 0xD20438 + +#define mmNIC1_QM0_CP_AWUSER_31_11_3 0xD2043C + +#define mmNIC1_QM0_CP_AWUSER_31_11_4 0xD20440 + +#define mmNIC1_QM0_ARB_CFG_0 0xD20A00 + +#define mmNIC1_QM0_ARB_CHOISE_Q_PUSH 0xD20A04 + +#define mmNIC1_QM0_ARB_WRR_WEIGHT_0 0xD20A08 + +#define mmNIC1_QM0_ARB_WRR_WEIGHT_1 0xD20A0C + +#define mmNIC1_QM0_ARB_WRR_WEIGHT_2 0xD20A10 + +#define mmNIC1_QM0_ARB_WRR_WEIGHT_3 0xD20A14 + +#define mmNIC1_QM0_ARB_CFG_1 0xD20A18 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_0 0xD20A20 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_1 0xD20A24 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_2 0xD20A28 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_3 0xD20A2C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_4 0xD20A30 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_5 0xD20A34 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_6 0xD20A38 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_7 0xD20A3C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_8 0xD20A40 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_9 0xD20A44 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_10 0xD20A48 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_11 0xD20A4C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_12 0xD20A50 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_13 0xD20A54 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_14 0xD20A58 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_15 0xD20A5C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_16 0xD20A60 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_17 0xD20A64 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_18 0xD20A68 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_19 0xD20A6C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_20 0xD20A70 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_21 0xD20A74 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_22 0xD20A78 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_23 0xD20A7C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 0xD20A80 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_25 0xD20A84 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_26 0xD20A88 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_27 0xD20A8C + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_28 0xD20A90 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_29 0xD20A94 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_30 0xD20A98 + +#define mmNIC1_QM0_ARB_MST_AVAIL_CRED_31 0xD20A9C + +#define mmNIC1_QM0_ARB_MST_CRED_INC 0xD20AA0 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_0 0xD20AA4 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_1 0xD20AA8 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_2 0xD20AAC + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_3 0xD20AB0 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_4 0xD20AB4 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_5 0xD20AB8 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_6 0xD20ABC + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_7 0xD20AC0 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_8 0xD20AC4 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_9 0xD20AC8 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_10 0xD20ACC + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_11 0xD20AD0 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_12 0xD20AD4 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_13 0xD20AD8 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_14 0xD20ADC + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_15 0xD20AE0 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_16 0xD20AE4 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_17 0xD20AE8 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_18 0xD20AEC + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_19 0xD20AF0 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_20 0xD20AF4 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_21 0xD20AF8 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_22 0xD20AFC + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_23 0xD20B00 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_24 0xD20B04 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_25 0xD20B08 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_26 0xD20B0C + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_27 0xD20B10 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_28 0xD20B14 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_29 0xD20B18 + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_30 0xD20B1C + +#define mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_31 0xD20B20 + +#define mmNIC1_QM0_ARB_SLV_MASTER_INC_CRED_OFST 0xD20B28 + +#define mmNIC1_QM0_ARB_MST_SLAVE_EN 0xD20B2C + +#define mmNIC1_QM0_ARB_MST_QUIET_PER 0xD20B34 + +#define mmNIC1_QM0_ARB_SLV_CHOISE_WDT 0xD20B38 + +#define mmNIC1_QM0_ARB_SLV_ID 0xD20B3C + +#define mmNIC1_QM0_ARB_MSG_MAX_INFLIGHT 0xD20B44 + +#define mmNIC1_QM0_ARB_MSG_AWUSER_31_11 0xD20B48 + +#define mmNIC1_QM0_ARB_MSG_AWUSER_SEC_PROP 0xD20B4C + +#define mmNIC1_QM0_ARB_MSG_AWUSER_NON_SEC_PROP 0xD20B50 + +#define mmNIC1_QM0_ARB_BASE_LO 0xD20B54 + +#define mmNIC1_QM0_ARB_BASE_HI 0xD20B58 + +#define mmNIC1_QM0_ARB_STATE_STS 0xD20B80 + +#define mmNIC1_QM0_ARB_CHOISE_FULLNESS_STS 0xD20B84 + +#define mmNIC1_QM0_ARB_MSG_STS 0xD20B88 + +#define mmNIC1_QM0_ARB_SLV_CHOISE_Q_HEAD 0xD20B8C + +#define mmNIC1_QM0_ARB_ERR_CAUSE 0xD20B9C + +#define mmNIC1_QM0_ARB_ERR_MSG_EN 0xD20BA0 + +#define mmNIC1_QM0_ARB_ERR_STS_DRP 0xD20BA8 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_0 0xD20BB0 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_1 0xD20BB4 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_2 0xD20BB8 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_3 0xD20BBC + +#define mmNIC1_QM0_ARB_MST_CRED_STS_4 0xD20BC0 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_5 0xD20BC4 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_6 0xD20BC8 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_7 0xD20BCC + +#define mmNIC1_QM0_ARB_MST_CRED_STS_8 0xD20BD0 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_9 0xD20BD4 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_10 0xD20BD8 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_11 0xD20BDC + +#define mmNIC1_QM0_ARB_MST_CRED_STS_12 0xD20BE0 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_13 0xD20BE4 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_14 0xD20BE8 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_15 0xD20BEC + +#define mmNIC1_QM0_ARB_MST_CRED_STS_16 0xD20BF0 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_17 0xD20BF4 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_18 0xD20BF8 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_19 0xD20BFC + +#define mmNIC1_QM0_ARB_MST_CRED_STS_20 0xD20C00 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_21 0xD20C04 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_22 0xD20C08 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_23 0xD20C0C + +#define mmNIC1_QM0_ARB_MST_CRED_STS_24 0xD20C10 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_25 0xD20C14 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_26 0xD20C18 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_27 0xD20C1C + +#define mmNIC1_QM0_ARB_MST_CRED_STS_28 0xD20C20 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_29 0xD20C24 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_30 0xD20C28 + +#define mmNIC1_QM0_ARB_MST_CRED_STS_31 0xD20C2C + +#define mmNIC1_QM0_CGM_CFG 0xD20C70 + +#define mmNIC1_QM0_CGM_STS 0xD20C74 + +#define mmNIC1_QM0_CGM_CFG1 0xD20C78 + +#define mmNIC1_QM0_LOCAL_RANGE_BASE 0xD20C80 + +#define mmNIC1_QM0_LOCAL_RANGE_SIZE 0xD20C84 + +#define mmNIC1_QM0_CSMR_STRICT_PRIO_CFG 0xD20C90 + +#define mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_1 0xD20C94 + +#define mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_0 0xD20C98 + +#define mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_1 0xD20C9C + +#define mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_0 0xD20CA0 + +#define mmNIC1_QM0_GLBL_AXCACHE 0xD20CA4 + +#define mmNIC1_QM0_IND_GW_APB_CFG 0xD20CB0 + +#define mmNIC1_QM0_IND_GW_APB_WDATA 0xD20CB4 + +#define mmNIC1_QM0_IND_GW_APB_RDATA 0xD20CB8 + +#define mmNIC1_QM0_IND_GW_APB_STATUS 0xD20CBC + +#define mmNIC1_QM0_GLBL_ERR_ADDR_LO 0xD20CD0 + +#define mmNIC1_QM0_GLBL_ERR_ADDR_HI 0xD20CD4 + +#define mmNIC1_QM0_GLBL_ERR_WDATA 0xD20CD8 + +#define mmNIC1_QM0_GLBL_MEM_INIT_BUSY 0xD20D00 + +#endif /* ASIC_REG_NIC1_QM0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h new file mode 100644 index 000000000000..1b115ee6d6f0 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC1_QM1_REGS_H_ +#define ASIC_REG_NIC1_QM1_REGS_H_ + +/* + ***************************************** + * NIC1_QM1 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC1_QM1_GLBL_CFG0 0xD22000 + +#define mmNIC1_QM1_GLBL_CFG1 0xD22004 + +#define mmNIC1_QM1_GLBL_PROT 0xD22008 + +#define mmNIC1_QM1_GLBL_ERR_CFG 0xD2200C + +#define mmNIC1_QM1_GLBL_SECURE_PROPS_0 0xD22010 + +#define mmNIC1_QM1_GLBL_SECURE_PROPS_1 0xD22014 + +#define mmNIC1_QM1_GLBL_SECURE_PROPS_2 0xD22018 + +#define mmNIC1_QM1_GLBL_SECURE_PROPS_3 0xD2201C + +#define mmNIC1_QM1_GLBL_SECURE_PROPS_4 0xD22020 + +#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0 0xD22024 + +#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1 0xD22028 + +#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_2 0xD2202C + +#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_3 0xD22030 + +#define mmNIC1_QM1_GLBL_NON_SECURE_PROPS_4 0xD22034 + +#define mmNIC1_QM1_GLBL_STS0 0xD22038 + +#define mmNIC1_QM1_GLBL_STS1_0 0xD22040 + +#define mmNIC1_QM1_GLBL_STS1_1 0xD22044 + +#define mmNIC1_QM1_GLBL_STS1_2 0xD22048 + +#define mmNIC1_QM1_GLBL_STS1_3 0xD2204C + +#define mmNIC1_QM1_GLBL_STS1_4 0xD22050 + +#define mmNIC1_QM1_GLBL_MSG_EN_0 0xD22054 + +#define mmNIC1_QM1_GLBL_MSG_EN_1 0xD22058 + +#define mmNIC1_QM1_GLBL_MSG_EN_2 0xD2205C + +#define mmNIC1_QM1_GLBL_MSG_EN_3 0xD22060 + +#define mmNIC1_QM1_GLBL_MSG_EN_4 0xD22068 + +#define mmNIC1_QM1_PQ_BASE_LO_0 0xD22070 + +#define mmNIC1_QM1_PQ_BASE_LO_1 0xD22074 + +#define mmNIC1_QM1_PQ_BASE_LO_2 0xD22078 + +#define mmNIC1_QM1_PQ_BASE_LO_3 0xD2207C + +#define mmNIC1_QM1_PQ_BASE_HI_0 0xD22080 + +#define mmNIC1_QM1_PQ_BASE_HI_1 0xD22084 + +#define mmNIC1_QM1_PQ_BASE_HI_2 0xD22088 + +#define mmNIC1_QM1_PQ_BASE_HI_3 0xD2208C + +#define mmNIC1_QM1_PQ_SIZE_0 0xD22090 + +#define mmNIC1_QM1_PQ_SIZE_1 0xD22094 + +#define mmNIC1_QM1_PQ_SIZE_2 0xD22098 + +#define mmNIC1_QM1_PQ_SIZE_3 0xD2209C + +#define mmNIC1_QM1_PQ_PI_0 0xD220A0 + +#define mmNIC1_QM1_PQ_PI_1 0xD220A4 + +#define mmNIC1_QM1_PQ_PI_2 0xD220A8 + +#define mmNIC1_QM1_PQ_PI_3 0xD220AC + +#define mmNIC1_QM1_PQ_CI_0 0xD220B0 + +#define mmNIC1_QM1_PQ_CI_1 0xD220B4 + +#define mmNIC1_QM1_PQ_CI_2 0xD220B8 + +#define mmNIC1_QM1_PQ_CI_3 0xD220BC + +#define mmNIC1_QM1_PQ_CFG0_0 0xD220C0 + +#define mmNIC1_QM1_PQ_CFG0_1 0xD220C4 + +#define mmNIC1_QM1_PQ_CFG0_2 0xD220C8 + +#define mmNIC1_QM1_PQ_CFG0_3 0xD220CC + +#define mmNIC1_QM1_PQ_CFG1_0 0xD220D0 + +#define mmNIC1_QM1_PQ_CFG1_1 0xD220D4 + +#define mmNIC1_QM1_PQ_CFG1_2 0xD220D8 + +#define mmNIC1_QM1_PQ_CFG1_3 0xD220DC + +#define mmNIC1_QM1_PQ_ARUSER_31_11_0 0xD220E0 + +#define mmNIC1_QM1_PQ_ARUSER_31_11_1 0xD220E4 + +#define mmNIC1_QM1_PQ_ARUSER_31_11_2 0xD220E8 + +#define mmNIC1_QM1_PQ_ARUSER_31_11_3 0xD220EC + +#define mmNIC1_QM1_PQ_STS0_0 0xD220F0 + +#define mmNIC1_QM1_PQ_STS0_1 0xD220F4 + +#define mmNIC1_QM1_PQ_STS0_2 0xD220F8 + +#define mmNIC1_QM1_PQ_STS0_3 0xD220FC + +#define mmNIC1_QM1_PQ_STS1_0 0xD22100 + +#define mmNIC1_QM1_PQ_STS1_1 0xD22104 + +#define mmNIC1_QM1_PQ_STS1_2 0xD22108 + +#define mmNIC1_QM1_PQ_STS1_3 0xD2210C + +#define mmNIC1_QM1_CQ_CFG0_0 0xD22110 + +#define mmNIC1_QM1_CQ_CFG0_1 0xD22114 + +#define mmNIC1_QM1_CQ_CFG0_2 0xD22118 + +#define mmNIC1_QM1_CQ_CFG0_3 0xD2211C + +#define mmNIC1_QM1_CQ_CFG0_4 0xD22120 + +#define mmNIC1_QM1_CQ_CFG1_0 0xD22124 + +#define mmNIC1_QM1_CQ_CFG1_1 0xD22128 + +#define mmNIC1_QM1_CQ_CFG1_2 0xD2212C + +#define mmNIC1_QM1_CQ_CFG1_3 0xD22130 + +#define mmNIC1_QM1_CQ_CFG1_4 0xD22134 + +#define mmNIC1_QM1_CQ_ARUSER_31_11_0 0xD22138 + +#define mmNIC1_QM1_CQ_ARUSER_31_11_1 0xD2213C + +#define mmNIC1_QM1_CQ_ARUSER_31_11_2 0xD22140 + +#define mmNIC1_QM1_CQ_ARUSER_31_11_3 0xD22144 + +#define mmNIC1_QM1_CQ_ARUSER_31_11_4 0xD22148 + +#define mmNIC1_QM1_CQ_STS0_0 0xD2214C + +#define mmNIC1_QM1_CQ_STS0_1 0xD22150 + +#define mmNIC1_QM1_CQ_STS0_2 0xD22154 + +#define mmNIC1_QM1_CQ_STS0_3 0xD22158 + +#define mmNIC1_QM1_CQ_STS0_4 0xD2215C + +#define mmNIC1_QM1_CQ_STS1_0 0xD22160 + +#define mmNIC1_QM1_CQ_STS1_1 0xD22164 + +#define mmNIC1_QM1_CQ_STS1_2 0xD22168 + +#define mmNIC1_QM1_CQ_STS1_3 0xD2216C + +#define mmNIC1_QM1_CQ_STS1_4 0xD22170 + +#define mmNIC1_QM1_CQ_PTR_LO_0 0xD22174 + +#define mmNIC1_QM1_CQ_PTR_HI_0 0xD22178 + +#define mmNIC1_QM1_CQ_TSIZE_0 0xD2217C + +#define mmNIC1_QM1_CQ_CTL_0 0xD22180 + +#define mmNIC1_QM1_CQ_PTR_LO_1 0xD22184 + +#define mmNIC1_QM1_CQ_PTR_HI_1 0xD22188 + +#define mmNIC1_QM1_CQ_TSIZE_1 0xD2218C + +#define mmNIC1_QM1_CQ_CTL_1 0xD22190 + +#define mmNIC1_QM1_CQ_PTR_LO_2 0xD22194 + +#define mmNIC1_QM1_CQ_PTR_HI_2 0xD22198 + +#define mmNIC1_QM1_CQ_TSIZE_2 0xD2219C + +#define mmNIC1_QM1_CQ_CTL_2 0xD221A0 + +#define mmNIC1_QM1_CQ_PTR_LO_3 0xD221A4 + +#define mmNIC1_QM1_CQ_PTR_HI_3 0xD221A8 + +#define mmNIC1_QM1_CQ_TSIZE_3 0xD221AC + +#define mmNIC1_QM1_CQ_CTL_3 0xD221B0 + +#define mmNIC1_QM1_CQ_PTR_LO_4 0xD221B4 + +#define mmNIC1_QM1_CQ_PTR_HI_4 0xD221B8 + +#define mmNIC1_QM1_CQ_TSIZE_4 0xD221BC + +#define mmNIC1_QM1_CQ_CTL_4 0xD221C0 + +#define mmNIC1_QM1_CQ_PTR_LO_STS_0 0xD221C4 + +#define mmNIC1_QM1_CQ_PTR_LO_STS_1 0xD221C8 + +#define mmNIC1_QM1_CQ_PTR_LO_STS_2 0xD221CC + +#define mmNIC1_QM1_CQ_PTR_LO_STS_3 0xD221D0 + +#define mmNIC1_QM1_CQ_PTR_LO_STS_4 0xD221D4 + +#define mmNIC1_QM1_CQ_PTR_HI_STS_0 0xD221D8 + +#define mmNIC1_QM1_CQ_PTR_HI_STS_1 0xD221DC + +#define mmNIC1_QM1_CQ_PTR_HI_STS_2 0xD221E0 + +#define mmNIC1_QM1_CQ_PTR_HI_STS_3 0xD221E4 + +#define mmNIC1_QM1_CQ_PTR_HI_STS_4 0xD221E8 + +#define mmNIC1_QM1_CQ_TSIZE_STS_0 0xD221EC + +#define mmNIC1_QM1_CQ_TSIZE_STS_1 0xD221F0 + +#define mmNIC1_QM1_CQ_TSIZE_STS_2 0xD221F4 + +#define mmNIC1_QM1_CQ_TSIZE_STS_3 0xD221F8 + +#define mmNIC1_QM1_CQ_TSIZE_STS_4 0xD221FC + +#define mmNIC1_QM1_CQ_CTL_STS_0 0xD22200 + +#define mmNIC1_QM1_CQ_CTL_STS_1 0xD22204 + +#define mmNIC1_QM1_CQ_CTL_STS_2 0xD22208 + +#define mmNIC1_QM1_CQ_CTL_STS_3 0xD2220C + +#define mmNIC1_QM1_CQ_CTL_STS_4 0xD22210 + +#define mmNIC1_QM1_CQ_IFIFO_CNT_0 0xD22214 + +#define mmNIC1_QM1_CQ_IFIFO_CNT_1 0xD22218 + +#define mmNIC1_QM1_CQ_IFIFO_CNT_2 0xD2221C + +#define mmNIC1_QM1_CQ_IFIFO_CNT_3 0xD22220 + +#define mmNIC1_QM1_CQ_IFIFO_CNT_4 0xD22224 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_0 0xD22228 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_1 0xD2222C + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_2 0xD22230 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_3 0xD22234 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_4 0xD22238 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_0 0xD2223C + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_1 0xD22240 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_2 0xD22244 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_3 0xD22248 + +#define mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_4 0xD2224C + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_0 0xD22250 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_1 0xD22254 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_2 0xD22258 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_3 0xD2225C + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_4 0xD22260 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_0 0xD22264 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_1 0xD22268 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_2 0xD2226C + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_3 0xD22270 + +#define mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_4 0xD22274 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_0 0xD22278 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_1 0xD2227C + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 0xD22280 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_3 0xD22284 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_4 0xD22288 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_0 0xD2228C + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_1 0xD22290 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_2 0xD22294 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_3 0xD22298 + +#define mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_4 0xD2229C + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_0 0xD222A0 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_1 0xD222A4 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_2 0xD222A8 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_3 0xD222AC + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_4 0xD222B0 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_0 0xD222B4 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_1 0xD222B8 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_2 0xD222BC + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_3 0xD222C0 + +#define mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_4 0xD222C4 + +#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_0 0xD222C8 + +#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_1 0xD222CC + +#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_2 0xD222D0 + +#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_3 0xD222D4 + +#define mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_4 0xD222D8 + +#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xD222E0 + +#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xD222E4 + +#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xD222E8 + +#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xD222EC + +#define mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xD222F0 + +#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 0xD222F4 + +#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 0xD222F8 + +#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 0xD222FC + +#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 0xD22300 + +#define mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 0xD22304 + +#define mmNIC1_QM1_CP_FENCE0_RDATA_0 0xD22308 + +#define mmNIC1_QM1_CP_FENCE0_RDATA_1 0xD2230C + +#define mmNIC1_QM1_CP_FENCE0_RDATA_2 0xD22310 + +#define mmNIC1_QM1_CP_FENCE0_RDATA_3 0xD22314 + +#define mmNIC1_QM1_CP_FENCE0_RDATA_4 0xD22318 + +#define mmNIC1_QM1_CP_FENCE1_RDATA_0 0xD2231C + +#define mmNIC1_QM1_CP_FENCE1_RDATA_1 0xD22320 + +#define mmNIC1_QM1_CP_FENCE1_RDATA_2 0xD22324 + +#define mmNIC1_QM1_CP_FENCE1_RDATA_3 0xD22328 + +#define mmNIC1_QM1_CP_FENCE1_RDATA_4 0xD2232C + +#define mmNIC1_QM1_CP_FENCE2_RDATA_0 0xD22330 + +#define mmNIC1_QM1_CP_FENCE2_RDATA_1 0xD22334 + +#define mmNIC1_QM1_CP_FENCE2_RDATA_2 0xD22338 + +#define mmNIC1_QM1_CP_FENCE2_RDATA_3 0xD2233C + +#define mmNIC1_QM1_CP_FENCE2_RDATA_4 0xD22340 + +#define mmNIC1_QM1_CP_FENCE3_RDATA_0 0xD22344 + +#define mmNIC1_QM1_CP_FENCE3_RDATA_1 0xD22348 + +#define mmNIC1_QM1_CP_FENCE3_RDATA_2 0xD2234C + +#define mmNIC1_QM1_CP_FENCE3_RDATA_3 0xD22350 + +#define mmNIC1_QM1_CP_FENCE3_RDATA_4 0xD22354 + +#define mmNIC1_QM1_CP_FENCE0_CNT_0 0xD22358 + +#define mmNIC1_QM1_CP_FENCE0_CNT_1 0xD2235C + +#define mmNIC1_QM1_CP_FENCE0_CNT_2 0xD22360 + +#define mmNIC1_QM1_CP_FENCE0_CNT_3 0xD22364 + +#define mmNIC1_QM1_CP_FENCE0_CNT_4 0xD22368 + +#define mmNIC1_QM1_CP_FENCE1_CNT_0 0xD2236C + +#define mmNIC1_QM1_CP_FENCE1_CNT_1 0xD22370 + +#define mmNIC1_QM1_CP_FENCE1_CNT_2 0xD22374 + +#define mmNIC1_QM1_CP_FENCE1_CNT_3 0xD22378 + +#define mmNIC1_QM1_CP_FENCE1_CNT_4 0xD2237C + +#define mmNIC1_QM1_CP_FENCE2_CNT_0 0xD22380 + +#define mmNIC1_QM1_CP_FENCE2_CNT_1 0xD22384 + +#define mmNIC1_QM1_CP_FENCE2_CNT_2 0xD22388 + +#define mmNIC1_QM1_CP_FENCE2_CNT_3 0xD2238C + +#define mmNIC1_QM1_CP_FENCE2_CNT_4 0xD22390 + +#define mmNIC1_QM1_CP_FENCE3_CNT_0 0xD22394 + +#define mmNIC1_QM1_CP_FENCE3_CNT_1 0xD22398 + +#define mmNIC1_QM1_CP_FENCE3_CNT_2 0xD2239C + +#define mmNIC1_QM1_CP_FENCE3_CNT_3 0xD223A0 + +#define mmNIC1_QM1_CP_FENCE3_CNT_4 0xD223A4 + +#define mmNIC1_QM1_CP_STS_0 0xD223A8 + +#define mmNIC1_QM1_CP_STS_1 0xD223AC + +#define mmNIC1_QM1_CP_STS_2 0xD223B0 + +#define mmNIC1_QM1_CP_STS_3 0xD223B4 + +#define mmNIC1_QM1_CP_STS_4 0xD223B8 + +#define mmNIC1_QM1_CP_CURRENT_INST_LO_0 0xD223BC + +#define mmNIC1_QM1_CP_CURRENT_INST_LO_1 0xD223C0 + +#define mmNIC1_QM1_CP_CURRENT_INST_LO_2 0xD223C4 + +#define mmNIC1_QM1_CP_CURRENT_INST_LO_3 0xD223C8 + +#define mmNIC1_QM1_CP_CURRENT_INST_LO_4 0xD223CC + +#define mmNIC1_QM1_CP_CURRENT_INST_HI_0 0xD223D0 + +#define mmNIC1_QM1_CP_CURRENT_INST_HI_1 0xD223D4 + +#define mmNIC1_QM1_CP_CURRENT_INST_HI_2 0xD223D8 + +#define mmNIC1_QM1_CP_CURRENT_INST_HI_3 0xD223DC + +#define mmNIC1_QM1_CP_CURRENT_INST_HI_4 0xD223E0 + +#define mmNIC1_QM1_CP_BARRIER_CFG_0 0xD223F4 + +#define mmNIC1_QM1_CP_BARRIER_CFG_1 0xD223F8 + +#define mmNIC1_QM1_CP_BARRIER_CFG_2 0xD223FC + +#define mmNIC1_QM1_CP_BARRIER_CFG_3 0xD22400 + +#define mmNIC1_QM1_CP_BARRIER_CFG_4 0xD22404 + +#define mmNIC1_QM1_CP_DBG_0_0 0xD22408 + +#define mmNIC1_QM1_CP_DBG_0_1 0xD2240C + +#define mmNIC1_QM1_CP_DBG_0_2 0xD22410 + +#define mmNIC1_QM1_CP_DBG_0_3 0xD22414 + +#define mmNIC1_QM1_CP_DBG_0_4 0xD22418 + +#define mmNIC1_QM1_CP_ARUSER_31_11_0 0xD2241C + +#define mmNIC1_QM1_CP_ARUSER_31_11_1 0xD22420 + +#define mmNIC1_QM1_CP_ARUSER_31_11_2 0xD22424 + +#define mmNIC1_QM1_CP_ARUSER_31_11_3 0xD22428 + +#define mmNIC1_QM1_CP_ARUSER_31_11_4 0xD2242C + +#define mmNIC1_QM1_CP_AWUSER_31_11_0 0xD22430 + +#define mmNIC1_QM1_CP_AWUSER_31_11_1 0xD22434 + +#define mmNIC1_QM1_CP_AWUSER_31_11_2 0xD22438 + +#define mmNIC1_QM1_CP_AWUSER_31_11_3 0xD2243C + +#define mmNIC1_QM1_CP_AWUSER_31_11_4 0xD22440 + +#define mmNIC1_QM1_ARB_CFG_0 0xD22A00 + +#define mmNIC1_QM1_ARB_CHOISE_Q_PUSH 0xD22A04 + +#define mmNIC1_QM1_ARB_WRR_WEIGHT_0 0xD22A08 + +#define mmNIC1_QM1_ARB_WRR_WEIGHT_1 0xD22A0C + +#define mmNIC1_QM1_ARB_WRR_WEIGHT_2 0xD22A10 + +#define mmNIC1_QM1_ARB_WRR_WEIGHT_3 0xD22A14 + +#define mmNIC1_QM1_ARB_CFG_1 0xD22A18 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_0 0xD22A20 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_1 0xD22A24 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_2 0xD22A28 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_3 0xD22A2C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_4 0xD22A30 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_5 0xD22A34 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_6 0xD22A38 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_7 0xD22A3C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_8 0xD22A40 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_9 0xD22A44 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_10 0xD22A48 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_11 0xD22A4C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_12 0xD22A50 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_13 0xD22A54 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_14 0xD22A58 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_15 0xD22A5C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_16 0xD22A60 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_17 0xD22A64 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_18 0xD22A68 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_19 0xD22A6C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_20 0xD22A70 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_21 0xD22A74 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_22 0xD22A78 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_23 0xD22A7C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 0xD22A80 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_25 0xD22A84 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_26 0xD22A88 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_27 0xD22A8C + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_28 0xD22A90 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_29 0xD22A94 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_30 0xD22A98 + +#define mmNIC1_QM1_ARB_MST_AVAIL_CRED_31 0xD22A9C + +#define mmNIC1_QM1_ARB_MST_CRED_INC 0xD22AA0 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_0 0xD22AA4 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_1 0xD22AA8 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_2 0xD22AAC + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_3 0xD22AB0 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_4 0xD22AB4 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_5 0xD22AB8 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_6 0xD22ABC + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_7 0xD22AC0 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_8 0xD22AC4 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_9 0xD22AC8 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_10 0xD22ACC + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_11 0xD22AD0 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_12 0xD22AD4 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_13 0xD22AD8 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_14 0xD22ADC + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_15 0xD22AE0 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_16 0xD22AE4 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_17 0xD22AE8 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_18 0xD22AEC + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_19 0xD22AF0 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_20 0xD22AF4 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_21 0xD22AF8 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_22 0xD22AFC + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_23 0xD22B00 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_24 0xD22B04 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_25 0xD22B08 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_26 0xD22B0C + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_27 0xD22B10 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_28 0xD22B14 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_29 0xD22B18 + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_30 0xD22B1C + +#define mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_31 0xD22B20 + +#define mmNIC1_QM1_ARB_SLV_MASTER_INC_CRED_OFST 0xD22B28 + +#define mmNIC1_QM1_ARB_MST_SLAVE_EN 0xD22B2C + +#define mmNIC1_QM1_ARB_MST_QUIET_PER 0xD22B34 + +#define mmNIC1_QM1_ARB_SLV_CHOISE_WDT 0xD22B38 + +#define mmNIC1_QM1_ARB_SLV_ID 0xD22B3C + +#define mmNIC1_QM1_ARB_MSG_MAX_INFLIGHT 0xD22B44 + +#define mmNIC1_QM1_ARB_MSG_AWUSER_31_11 0xD22B48 + +#define mmNIC1_QM1_ARB_MSG_AWUSER_SEC_PROP 0xD22B4C + +#define mmNIC1_QM1_ARB_MSG_AWUSER_NON_SEC_PROP 0xD22B50 + +#define mmNIC1_QM1_ARB_BASE_LO 0xD22B54 + +#define mmNIC1_QM1_ARB_BASE_HI 0xD22B58 + +#define mmNIC1_QM1_ARB_STATE_STS 0xD22B80 + +#define mmNIC1_QM1_ARB_CHOISE_FULLNESS_STS 0xD22B84 + +#define mmNIC1_QM1_ARB_MSG_STS 0xD22B88 + +#define mmNIC1_QM1_ARB_SLV_CHOISE_Q_HEAD 0xD22B8C + +#define mmNIC1_QM1_ARB_ERR_CAUSE 0xD22B9C + +#define mmNIC1_QM1_ARB_ERR_MSG_EN 0xD22BA0 + +#define mmNIC1_QM1_ARB_ERR_STS_DRP 0xD22BA8 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_0 0xD22BB0 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_1 0xD22BB4 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_2 0xD22BB8 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_3 0xD22BBC + +#define mmNIC1_QM1_ARB_MST_CRED_STS_4 0xD22BC0 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_5 0xD22BC4 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_6 0xD22BC8 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_7 0xD22BCC + +#define mmNIC1_QM1_ARB_MST_CRED_STS_8 0xD22BD0 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_9 0xD22BD4 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_10 0xD22BD8 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_11 0xD22BDC + +#define mmNIC1_QM1_ARB_MST_CRED_STS_12 0xD22BE0 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_13 0xD22BE4 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_14 0xD22BE8 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_15 0xD22BEC + +#define mmNIC1_QM1_ARB_MST_CRED_STS_16 0xD22BF0 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_17 0xD22BF4 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_18 0xD22BF8 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_19 0xD22BFC + +#define mmNIC1_QM1_ARB_MST_CRED_STS_20 0xD22C00 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_21 0xD22C04 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_22 0xD22C08 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_23 0xD22C0C + +#define mmNIC1_QM1_ARB_MST_CRED_STS_24 0xD22C10 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_25 0xD22C14 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_26 0xD22C18 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_27 0xD22C1C + +#define mmNIC1_QM1_ARB_MST_CRED_STS_28 0xD22C20 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_29 0xD22C24 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_30 0xD22C28 + +#define mmNIC1_QM1_ARB_MST_CRED_STS_31 0xD22C2C + +#define mmNIC1_QM1_CGM_CFG 0xD22C70 + +#define mmNIC1_QM1_CGM_STS 0xD22C74 + +#define mmNIC1_QM1_CGM_CFG1 0xD22C78 + +#define mmNIC1_QM1_LOCAL_RANGE_BASE 0xD22C80 + +#define mmNIC1_QM1_LOCAL_RANGE_SIZE 0xD22C84 + +#define mmNIC1_QM1_CSMR_STRICT_PRIO_CFG 0xD22C90 + +#define mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_1 0xD22C94 + +#define mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_0 0xD22C98 + +#define mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_1 0xD22C9C + +#define mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_0 0xD22CA0 + +#define mmNIC1_QM1_GLBL_AXCACHE 0xD22CA4 + +#define mmNIC1_QM1_IND_GW_APB_CFG 0xD22CB0 + +#define mmNIC1_QM1_IND_GW_APB_WDATA 0xD22CB4 + +#define mmNIC1_QM1_IND_GW_APB_RDATA 0xD22CB8 + +#define mmNIC1_QM1_IND_GW_APB_STATUS 0xD22CBC + +#define mmNIC1_QM1_GLBL_ERR_ADDR_LO 0xD22CD0 + +#define mmNIC1_QM1_GLBL_ERR_ADDR_HI 0xD22CD4 + +#define mmNIC1_QM1_GLBL_ERR_WDATA 0xD22CD8 + +#define mmNIC1_QM1_GLBL_MEM_INIT_BUSY 0xD22D00 + +#endif /* ASIC_REG_NIC1_QM1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h new file mode 100644 index 000000000000..a89116a4586f --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC2_QM0_REGS_H_ +#define ASIC_REG_NIC2_QM0_REGS_H_ + +/* + ***************************************** + * NIC2_QM0 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC2_QM0_GLBL_CFG0 0xD60000 + +#define mmNIC2_QM0_GLBL_CFG1 0xD60004 + +#define mmNIC2_QM0_GLBL_PROT 0xD60008 + +#define mmNIC2_QM0_GLBL_ERR_CFG 0xD6000C + +#define mmNIC2_QM0_GLBL_SECURE_PROPS_0 0xD60010 + +#define mmNIC2_QM0_GLBL_SECURE_PROPS_1 0xD60014 + +#define mmNIC2_QM0_GLBL_SECURE_PROPS_2 0xD60018 + +#define mmNIC2_QM0_GLBL_SECURE_PROPS_3 0xD6001C + +#define mmNIC2_QM0_GLBL_SECURE_PROPS_4 0xD60020 + +#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0 0xD60024 + +#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1 0xD60028 + +#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_2 0xD6002C + +#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_3 0xD60030 + +#define mmNIC2_QM0_GLBL_NON_SECURE_PROPS_4 0xD60034 + +#define mmNIC2_QM0_GLBL_STS0 0xD60038 + +#define mmNIC2_QM0_GLBL_STS1_0 0xD60040 + +#define mmNIC2_QM0_GLBL_STS1_1 0xD60044 + +#define mmNIC2_QM0_GLBL_STS1_2 0xD60048 + +#define mmNIC2_QM0_GLBL_STS1_3 0xD6004C + +#define mmNIC2_QM0_GLBL_STS1_4 0xD60050 + +#define mmNIC2_QM0_GLBL_MSG_EN_0 0xD60054 + +#define mmNIC2_QM0_GLBL_MSG_EN_1 0xD60058 + +#define mmNIC2_QM0_GLBL_MSG_EN_2 0xD6005C + +#define mmNIC2_QM0_GLBL_MSG_EN_3 0xD60060 + +#define mmNIC2_QM0_GLBL_MSG_EN_4 0xD60068 + +#define mmNIC2_QM0_PQ_BASE_LO_0 0xD60070 + +#define mmNIC2_QM0_PQ_BASE_LO_1 0xD60074 + +#define mmNIC2_QM0_PQ_BASE_LO_2 0xD60078 + +#define mmNIC2_QM0_PQ_BASE_LO_3 0xD6007C + +#define mmNIC2_QM0_PQ_BASE_HI_0 0xD60080 + +#define mmNIC2_QM0_PQ_BASE_HI_1 0xD60084 + +#define mmNIC2_QM0_PQ_BASE_HI_2 0xD60088 + +#define mmNIC2_QM0_PQ_BASE_HI_3 0xD6008C + +#define mmNIC2_QM0_PQ_SIZE_0 0xD60090 + +#define mmNIC2_QM0_PQ_SIZE_1 0xD60094 + +#define mmNIC2_QM0_PQ_SIZE_2 0xD60098 + +#define mmNIC2_QM0_PQ_SIZE_3 0xD6009C + +#define mmNIC2_QM0_PQ_PI_0 0xD600A0 + +#define mmNIC2_QM0_PQ_PI_1 0xD600A4 + +#define mmNIC2_QM0_PQ_PI_2 0xD600A8 + +#define mmNIC2_QM0_PQ_PI_3 0xD600AC + +#define mmNIC2_QM0_PQ_CI_0 0xD600B0 + +#define mmNIC2_QM0_PQ_CI_1 0xD600B4 + +#define mmNIC2_QM0_PQ_CI_2 0xD600B8 + +#define mmNIC2_QM0_PQ_CI_3 0xD600BC + +#define mmNIC2_QM0_PQ_CFG0_0 0xD600C0 + +#define mmNIC2_QM0_PQ_CFG0_1 0xD600C4 + +#define mmNIC2_QM0_PQ_CFG0_2 0xD600C8 + +#define mmNIC2_QM0_PQ_CFG0_3 0xD600CC + +#define mmNIC2_QM0_PQ_CFG1_0 0xD600D0 + +#define mmNIC2_QM0_PQ_CFG1_1 0xD600D4 + +#define mmNIC2_QM0_PQ_CFG1_2 0xD600D8 + +#define mmNIC2_QM0_PQ_CFG1_3 0xD600DC + +#define mmNIC2_QM0_PQ_ARUSER_31_11_0 0xD600E0 + +#define mmNIC2_QM0_PQ_ARUSER_31_11_1 0xD600E4 + +#define mmNIC2_QM0_PQ_ARUSER_31_11_2 0xD600E8 + +#define mmNIC2_QM0_PQ_ARUSER_31_11_3 0xD600EC + +#define mmNIC2_QM0_PQ_STS0_0 0xD600F0 + +#define mmNIC2_QM0_PQ_STS0_1 0xD600F4 + +#define mmNIC2_QM0_PQ_STS0_2 0xD600F8 + +#define mmNIC2_QM0_PQ_STS0_3 0xD600FC + +#define mmNIC2_QM0_PQ_STS1_0 0xD60100 + +#define mmNIC2_QM0_PQ_STS1_1 0xD60104 + +#define mmNIC2_QM0_PQ_STS1_2 0xD60108 + +#define mmNIC2_QM0_PQ_STS1_3 0xD6010C + +#define mmNIC2_QM0_CQ_CFG0_0 0xD60110 + +#define mmNIC2_QM0_CQ_CFG0_1 0xD60114 + +#define mmNIC2_QM0_CQ_CFG0_2 0xD60118 + +#define mmNIC2_QM0_CQ_CFG0_3 0xD6011C + +#define mmNIC2_QM0_CQ_CFG0_4 0xD60120 + +#define mmNIC2_QM0_CQ_CFG1_0 0xD60124 + +#define mmNIC2_QM0_CQ_CFG1_1 0xD60128 + +#define mmNIC2_QM0_CQ_CFG1_2 0xD6012C + +#define mmNIC2_QM0_CQ_CFG1_3 0xD60130 + +#define mmNIC2_QM0_CQ_CFG1_4 0xD60134 + +#define mmNIC2_QM0_CQ_ARUSER_31_11_0 0xD60138 + +#define mmNIC2_QM0_CQ_ARUSER_31_11_1 0xD6013C + +#define mmNIC2_QM0_CQ_ARUSER_31_11_2 0xD60140 + +#define mmNIC2_QM0_CQ_ARUSER_31_11_3 0xD60144 + +#define mmNIC2_QM0_CQ_ARUSER_31_11_4 0xD60148 + +#define mmNIC2_QM0_CQ_STS0_0 0xD6014C + +#define mmNIC2_QM0_CQ_STS0_1 0xD60150 + +#define mmNIC2_QM0_CQ_STS0_2 0xD60154 + +#define mmNIC2_QM0_CQ_STS0_3 0xD60158 + +#define mmNIC2_QM0_CQ_STS0_4 0xD6015C + +#define mmNIC2_QM0_CQ_STS1_0 0xD60160 + +#define mmNIC2_QM0_CQ_STS1_1 0xD60164 + +#define mmNIC2_QM0_CQ_STS1_2 0xD60168 + +#define mmNIC2_QM0_CQ_STS1_3 0xD6016C + +#define mmNIC2_QM0_CQ_STS1_4 0xD60170 + +#define mmNIC2_QM0_CQ_PTR_LO_0 0xD60174 + +#define mmNIC2_QM0_CQ_PTR_HI_0 0xD60178 + +#define mmNIC2_QM0_CQ_TSIZE_0 0xD6017C + +#define mmNIC2_QM0_CQ_CTL_0 0xD60180 + +#define mmNIC2_QM0_CQ_PTR_LO_1 0xD60184 + +#define mmNIC2_QM0_CQ_PTR_HI_1 0xD60188 + +#define mmNIC2_QM0_CQ_TSIZE_1 0xD6018C + +#define mmNIC2_QM0_CQ_CTL_1 0xD60190 + +#define mmNIC2_QM0_CQ_PTR_LO_2 0xD60194 + +#define mmNIC2_QM0_CQ_PTR_HI_2 0xD60198 + +#define mmNIC2_QM0_CQ_TSIZE_2 0xD6019C + +#define mmNIC2_QM0_CQ_CTL_2 0xD601A0 + +#define mmNIC2_QM0_CQ_PTR_LO_3 0xD601A4 + +#define mmNIC2_QM0_CQ_PTR_HI_3 0xD601A8 + +#define mmNIC2_QM0_CQ_TSIZE_3 0xD601AC + +#define mmNIC2_QM0_CQ_CTL_3 0xD601B0 + +#define mmNIC2_QM0_CQ_PTR_LO_4 0xD601B4 + +#define mmNIC2_QM0_CQ_PTR_HI_4 0xD601B8 + +#define mmNIC2_QM0_CQ_TSIZE_4 0xD601BC + +#define mmNIC2_QM0_CQ_CTL_4 0xD601C0 + +#define mmNIC2_QM0_CQ_PTR_LO_STS_0 0xD601C4 + +#define mmNIC2_QM0_CQ_PTR_LO_STS_1 0xD601C8 + +#define mmNIC2_QM0_CQ_PTR_LO_STS_2 0xD601CC + +#define mmNIC2_QM0_CQ_PTR_LO_STS_3 0xD601D0 + +#define mmNIC2_QM0_CQ_PTR_LO_STS_4 0xD601D4 + +#define mmNIC2_QM0_CQ_PTR_HI_STS_0 0xD601D8 + +#define mmNIC2_QM0_CQ_PTR_HI_STS_1 0xD601DC + +#define mmNIC2_QM0_CQ_PTR_HI_STS_2 0xD601E0 + +#define mmNIC2_QM0_CQ_PTR_HI_STS_3 0xD601E4 + +#define mmNIC2_QM0_CQ_PTR_HI_STS_4 0xD601E8 + +#define mmNIC2_QM0_CQ_TSIZE_STS_0 0xD601EC + +#define mmNIC2_QM0_CQ_TSIZE_STS_1 0xD601F0 + +#define mmNIC2_QM0_CQ_TSIZE_STS_2 0xD601F4 + +#define mmNIC2_QM0_CQ_TSIZE_STS_3 0xD601F8 + +#define mmNIC2_QM0_CQ_TSIZE_STS_4 0xD601FC + +#define mmNIC2_QM0_CQ_CTL_STS_0 0xD60200 + +#define mmNIC2_QM0_CQ_CTL_STS_1 0xD60204 + +#define mmNIC2_QM0_CQ_CTL_STS_2 0xD60208 + +#define mmNIC2_QM0_CQ_CTL_STS_3 0xD6020C + +#define mmNIC2_QM0_CQ_CTL_STS_4 0xD60210 + +#define mmNIC2_QM0_CQ_IFIFO_CNT_0 0xD60214 + +#define mmNIC2_QM0_CQ_IFIFO_CNT_1 0xD60218 + +#define mmNIC2_QM0_CQ_IFIFO_CNT_2 0xD6021C + +#define mmNIC2_QM0_CQ_IFIFO_CNT_3 0xD60220 + +#define mmNIC2_QM0_CQ_IFIFO_CNT_4 0xD60224 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_0 0xD60228 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_1 0xD6022C + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_2 0xD60230 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_3 0xD60234 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_4 0xD60238 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_0 0xD6023C + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_1 0xD60240 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_2 0xD60244 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_3 0xD60248 + +#define mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_4 0xD6024C + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_0 0xD60250 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_1 0xD60254 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_2 0xD60258 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_3 0xD6025C + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_4 0xD60260 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_0 0xD60264 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_1 0xD60268 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_2 0xD6026C + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_3 0xD60270 + +#define mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_4 0xD60274 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_0 0xD60278 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_1 0xD6027C + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 0xD60280 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_3 0xD60284 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_4 0xD60288 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_0 0xD6028C + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_1 0xD60290 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_2 0xD60294 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_3 0xD60298 + +#define mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_4 0xD6029C + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_0 0xD602A0 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_1 0xD602A4 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_2 0xD602A8 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_3 0xD602AC + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_4 0xD602B0 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_0 0xD602B4 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_1 0xD602B8 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_2 0xD602BC + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_3 0xD602C0 + +#define mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_4 0xD602C4 + +#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_0 0xD602C8 + +#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_1 0xD602CC + +#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_2 0xD602D0 + +#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_3 0xD602D4 + +#define mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_4 0xD602D8 + +#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xD602E0 + +#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xD602E4 + +#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xD602E8 + +#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xD602EC + +#define mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xD602F0 + +#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 0xD602F4 + +#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 0xD602F8 + +#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 0xD602FC + +#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 0xD60300 + +#define mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 0xD60304 + +#define mmNIC2_QM0_CP_FENCE0_RDATA_0 0xD60308 + +#define mmNIC2_QM0_CP_FENCE0_RDATA_1 0xD6030C + +#define mmNIC2_QM0_CP_FENCE0_RDATA_2 0xD60310 + +#define mmNIC2_QM0_CP_FENCE0_RDATA_3 0xD60314 + +#define mmNIC2_QM0_CP_FENCE0_RDATA_4 0xD60318 + +#define mmNIC2_QM0_CP_FENCE1_RDATA_0 0xD6031C + +#define mmNIC2_QM0_CP_FENCE1_RDATA_1 0xD60320 + +#define mmNIC2_QM0_CP_FENCE1_RDATA_2 0xD60324 + +#define mmNIC2_QM0_CP_FENCE1_RDATA_3 0xD60328 + +#define mmNIC2_QM0_CP_FENCE1_RDATA_4 0xD6032C + +#define mmNIC2_QM0_CP_FENCE2_RDATA_0 0xD60330 + +#define mmNIC2_QM0_CP_FENCE2_RDATA_1 0xD60334 + +#define mmNIC2_QM0_CP_FENCE2_RDATA_2 0xD60338 + +#define mmNIC2_QM0_CP_FENCE2_RDATA_3 0xD6033C + +#define mmNIC2_QM0_CP_FENCE2_RDATA_4 0xD60340 + +#define mmNIC2_QM0_CP_FENCE3_RDATA_0 0xD60344 + +#define mmNIC2_QM0_CP_FENCE3_RDATA_1 0xD60348 + +#define mmNIC2_QM0_CP_FENCE3_RDATA_2 0xD6034C + +#define mmNIC2_QM0_CP_FENCE3_RDATA_3 0xD60350 + +#define mmNIC2_QM0_CP_FENCE3_RDATA_4 0xD60354 + +#define mmNIC2_QM0_CP_FENCE0_CNT_0 0xD60358 + +#define mmNIC2_QM0_CP_FENCE0_CNT_1 0xD6035C + +#define mmNIC2_QM0_CP_FENCE0_CNT_2 0xD60360 + +#define mmNIC2_QM0_CP_FENCE0_CNT_3 0xD60364 + +#define mmNIC2_QM0_CP_FENCE0_CNT_4 0xD60368 + +#define mmNIC2_QM0_CP_FENCE1_CNT_0 0xD6036C + +#define mmNIC2_QM0_CP_FENCE1_CNT_1 0xD60370 + +#define mmNIC2_QM0_CP_FENCE1_CNT_2 0xD60374 + +#define mmNIC2_QM0_CP_FENCE1_CNT_3 0xD60378 + +#define mmNIC2_QM0_CP_FENCE1_CNT_4 0xD6037C + +#define mmNIC2_QM0_CP_FENCE2_CNT_0 0xD60380 + +#define mmNIC2_QM0_CP_FENCE2_CNT_1 0xD60384 + +#define mmNIC2_QM0_CP_FENCE2_CNT_2 0xD60388 + +#define mmNIC2_QM0_CP_FENCE2_CNT_3 0xD6038C + +#define mmNIC2_QM0_CP_FENCE2_CNT_4 0xD60390 + +#define mmNIC2_QM0_CP_FENCE3_CNT_0 0xD60394 + +#define mmNIC2_QM0_CP_FENCE3_CNT_1 0xD60398 + +#define mmNIC2_QM0_CP_FENCE3_CNT_2 0xD6039C + +#define mmNIC2_QM0_CP_FENCE3_CNT_3 0xD603A0 + +#define mmNIC2_QM0_CP_FENCE3_CNT_4 0xD603A4 + +#define mmNIC2_QM0_CP_STS_0 0xD603A8 + +#define mmNIC2_QM0_CP_STS_1 0xD603AC + +#define mmNIC2_QM0_CP_STS_2 0xD603B0 + +#define mmNIC2_QM0_CP_STS_3 0xD603B4 + +#define mmNIC2_QM0_CP_STS_4 0xD603B8 + +#define mmNIC2_QM0_CP_CURRENT_INST_LO_0 0xD603BC + +#define mmNIC2_QM0_CP_CURRENT_INST_LO_1 0xD603C0 + +#define mmNIC2_QM0_CP_CURRENT_INST_LO_2 0xD603C4 + +#define mmNIC2_QM0_CP_CURRENT_INST_LO_3 0xD603C8 + +#define mmNIC2_QM0_CP_CURRENT_INST_LO_4 0xD603CC + +#define mmNIC2_QM0_CP_CURRENT_INST_HI_0 0xD603D0 + +#define mmNIC2_QM0_CP_CURRENT_INST_HI_1 0xD603D4 + +#define mmNIC2_QM0_CP_CURRENT_INST_HI_2 0xD603D8 + +#define mmNIC2_QM0_CP_CURRENT_INST_HI_3 0xD603DC + +#define mmNIC2_QM0_CP_CURRENT_INST_HI_4 0xD603E0 + +#define mmNIC2_QM0_CP_BARRIER_CFG_0 0xD603F4 + +#define mmNIC2_QM0_CP_BARRIER_CFG_1 0xD603F8 + +#define mmNIC2_QM0_CP_BARRIER_CFG_2 0xD603FC + +#define mmNIC2_QM0_CP_BARRIER_CFG_3 0xD60400 + +#define mmNIC2_QM0_CP_BARRIER_CFG_4 0xD60404 + +#define mmNIC2_QM0_CP_DBG_0_0 0xD60408 + +#define mmNIC2_QM0_CP_DBG_0_1 0xD6040C + +#define mmNIC2_QM0_CP_DBG_0_2 0xD60410 + +#define mmNIC2_QM0_CP_DBG_0_3 0xD60414 + +#define mmNIC2_QM0_CP_DBG_0_4 0xD60418 + +#define mmNIC2_QM0_CP_ARUSER_31_11_0 0xD6041C + +#define mmNIC2_QM0_CP_ARUSER_31_11_1 0xD60420 + +#define mmNIC2_QM0_CP_ARUSER_31_11_2 0xD60424 + +#define mmNIC2_QM0_CP_ARUSER_31_11_3 0xD60428 + +#define mmNIC2_QM0_CP_ARUSER_31_11_4 0xD6042C + +#define mmNIC2_QM0_CP_AWUSER_31_11_0 0xD60430 + +#define mmNIC2_QM0_CP_AWUSER_31_11_1 0xD60434 + +#define mmNIC2_QM0_CP_AWUSER_31_11_2 0xD60438 + +#define mmNIC2_QM0_CP_AWUSER_31_11_3 0xD6043C + +#define mmNIC2_QM0_CP_AWUSER_31_11_4 0xD60440 + +#define mmNIC2_QM0_ARB_CFG_0 0xD60A00 + +#define mmNIC2_QM0_ARB_CHOISE_Q_PUSH 0xD60A04 + +#define mmNIC2_QM0_ARB_WRR_WEIGHT_0 0xD60A08 + +#define mmNIC2_QM0_ARB_WRR_WEIGHT_1 0xD60A0C + +#define mmNIC2_QM0_ARB_WRR_WEIGHT_2 0xD60A10 + +#define mmNIC2_QM0_ARB_WRR_WEIGHT_3 0xD60A14 + +#define mmNIC2_QM0_ARB_CFG_1 0xD60A18 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_0 0xD60A20 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_1 0xD60A24 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_2 0xD60A28 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_3 0xD60A2C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_4 0xD60A30 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_5 0xD60A34 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_6 0xD60A38 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_7 0xD60A3C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_8 0xD60A40 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_9 0xD60A44 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_10 0xD60A48 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_11 0xD60A4C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_12 0xD60A50 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_13 0xD60A54 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_14 0xD60A58 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_15 0xD60A5C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_16 0xD60A60 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_17 0xD60A64 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_18 0xD60A68 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_19 0xD60A6C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_20 0xD60A70 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_21 0xD60A74 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_22 0xD60A78 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_23 0xD60A7C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 0xD60A80 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_25 0xD60A84 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_26 0xD60A88 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_27 0xD60A8C + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_28 0xD60A90 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_29 0xD60A94 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_30 0xD60A98 + +#define mmNIC2_QM0_ARB_MST_AVAIL_CRED_31 0xD60A9C + +#define mmNIC2_QM0_ARB_MST_CRED_INC 0xD60AA0 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_0 0xD60AA4 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_1 0xD60AA8 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_2 0xD60AAC + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_3 0xD60AB0 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_4 0xD60AB4 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_5 0xD60AB8 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_6 0xD60ABC + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_7 0xD60AC0 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_8 0xD60AC4 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_9 0xD60AC8 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_10 0xD60ACC + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_11 0xD60AD0 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_12 0xD60AD4 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_13 0xD60AD8 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_14 0xD60ADC + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_15 0xD60AE0 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_16 0xD60AE4 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_17 0xD60AE8 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_18 0xD60AEC + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_19 0xD60AF0 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_20 0xD60AF4 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_21 0xD60AF8 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_22 0xD60AFC + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_23 0xD60B00 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_24 0xD60B04 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_25 0xD60B08 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_26 0xD60B0C + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_27 0xD60B10 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_28 0xD60B14 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_29 0xD60B18 + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_30 0xD60B1C + +#define mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_31 0xD60B20 + +#define mmNIC2_QM0_ARB_SLV_MASTER_INC_CRED_OFST 0xD60B28 + +#define mmNIC2_QM0_ARB_MST_SLAVE_EN 0xD60B2C + +#define mmNIC2_QM0_ARB_MST_QUIET_PER 0xD60B34 + +#define mmNIC2_QM0_ARB_SLV_CHOISE_WDT 0xD60B38 + +#define mmNIC2_QM0_ARB_SLV_ID 0xD60B3C + +#define mmNIC2_QM0_ARB_MSG_MAX_INFLIGHT 0xD60B44 + +#define mmNIC2_QM0_ARB_MSG_AWUSER_31_11 0xD60B48 + +#define mmNIC2_QM0_ARB_MSG_AWUSER_SEC_PROP 0xD60B4C + +#define mmNIC2_QM0_ARB_MSG_AWUSER_NON_SEC_PROP 0xD60B50 + +#define mmNIC2_QM0_ARB_BASE_LO 0xD60B54 + +#define mmNIC2_QM0_ARB_BASE_HI 0xD60B58 + +#define mmNIC2_QM0_ARB_STATE_STS 0xD60B80 + +#define mmNIC2_QM0_ARB_CHOISE_FULLNESS_STS 0xD60B84 + +#define mmNIC2_QM0_ARB_MSG_STS 0xD60B88 + +#define mmNIC2_QM0_ARB_SLV_CHOISE_Q_HEAD 0xD60B8C + +#define mmNIC2_QM0_ARB_ERR_CAUSE 0xD60B9C + +#define mmNIC2_QM0_ARB_ERR_MSG_EN 0xD60BA0 + +#define mmNIC2_QM0_ARB_ERR_STS_DRP 0xD60BA8 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_0 0xD60BB0 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_1 0xD60BB4 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_2 0xD60BB8 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_3 0xD60BBC + +#define mmNIC2_QM0_ARB_MST_CRED_STS_4 0xD60BC0 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_5 0xD60BC4 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_6 0xD60BC8 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_7 0xD60BCC + +#define mmNIC2_QM0_ARB_MST_CRED_STS_8 0xD60BD0 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_9 0xD60BD4 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_10 0xD60BD8 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_11 0xD60BDC + +#define mmNIC2_QM0_ARB_MST_CRED_STS_12 0xD60BE0 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_13 0xD60BE4 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_14 0xD60BE8 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_15 0xD60BEC + +#define mmNIC2_QM0_ARB_MST_CRED_STS_16 0xD60BF0 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_17 0xD60BF4 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_18 0xD60BF8 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_19 0xD60BFC + +#define mmNIC2_QM0_ARB_MST_CRED_STS_20 0xD60C00 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_21 0xD60C04 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_22 0xD60C08 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_23 0xD60C0C + +#define mmNIC2_QM0_ARB_MST_CRED_STS_24 0xD60C10 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_25 0xD60C14 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_26 0xD60C18 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_27 0xD60C1C + +#define mmNIC2_QM0_ARB_MST_CRED_STS_28 0xD60C20 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_29 0xD60C24 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_30 0xD60C28 + +#define mmNIC2_QM0_ARB_MST_CRED_STS_31 0xD60C2C + +#define mmNIC2_QM0_CGM_CFG 0xD60C70 + +#define mmNIC2_QM0_CGM_STS 0xD60C74 + +#define mmNIC2_QM0_CGM_CFG1 0xD60C78 + +#define mmNIC2_QM0_LOCAL_RANGE_BASE 0xD60C80 + +#define mmNIC2_QM0_LOCAL_RANGE_SIZE 0xD60C84 + +#define mmNIC2_QM0_CSMR_STRICT_PRIO_CFG 0xD60C90 + +#define mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_1 0xD60C94 + +#define mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_0 0xD60C98 + +#define mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_1 0xD60C9C + +#define mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_0 0xD60CA0 + +#define mmNIC2_QM0_GLBL_AXCACHE 0xD60CA4 + +#define mmNIC2_QM0_IND_GW_APB_CFG 0xD60CB0 + +#define mmNIC2_QM0_IND_GW_APB_WDATA 0xD60CB4 + +#define mmNIC2_QM0_IND_GW_APB_RDATA 0xD60CB8 + +#define mmNIC2_QM0_IND_GW_APB_STATUS 0xD60CBC + +#define mmNIC2_QM0_GLBL_ERR_ADDR_LO 0xD60CD0 + +#define mmNIC2_QM0_GLBL_ERR_ADDR_HI 0xD60CD4 + +#define mmNIC2_QM0_GLBL_ERR_WDATA 0xD60CD8 + +#define mmNIC2_QM0_GLBL_MEM_INIT_BUSY 0xD60D00 + +#endif /* ASIC_REG_NIC2_QM0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h new file mode 100644 index 000000000000..b7f091ddc89c --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC2_QM1_REGS_H_ +#define ASIC_REG_NIC2_QM1_REGS_H_ + +/* + ***************************************** + * NIC2_QM1 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC2_QM1_GLBL_CFG0 0xD62000 + +#define mmNIC2_QM1_GLBL_CFG1 0xD62004 + +#define mmNIC2_QM1_GLBL_PROT 0xD62008 + +#define mmNIC2_QM1_GLBL_ERR_CFG 0xD6200C + +#define mmNIC2_QM1_GLBL_SECURE_PROPS_0 0xD62010 + +#define mmNIC2_QM1_GLBL_SECURE_PROPS_1 0xD62014 + +#define mmNIC2_QM1_GLBL_SECURE_PROPS_2 0xD62018 + +#define mmNIC2_QM1_GLBL_SECURE_PROPS_3 0xD6201C + +#define mmNIC2_QM1_GLBL_SECURE_PROPS_4 0xD62020 + +#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0 0xD62024 + +#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1 0xD62028 + +#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_2 0xD6202C + +#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_3 0xD62030 + +#define mmNIC2_QM1_GLBL_NON_SECURE_PROPS_4 0xD62034 + +#define mmNIC2_QM1_GLBL_STS0 0xD62038 + +#define mmNIC2_QM1_GLBL_STS1_0 0xD62040 + +#define mmNIC2_QM1_GLBL_STS1_1 0xD62044 + +#define mmNIC2_QM1_GLBL_STS1_2 0xD62048 + +#define mmNIC2_QM1_GLBL_STS1_3 0xD6204C + +#define mmNIC2_QM1_GLBL_STS1_4 0xD62050 + +#define mmNIC2_QM1_GLBL_MSG_EN_0 0xD62054 + +#define mmNIC2_QM1_GLBL_MSG_EN_1 0xD62058 + +#define mmNIC2_QM1_GLBL_MSG_EN_2 0xD6205C + +#define mmNIC2_QM1_GLBL_MSG_EN_3 0xD62060 + +#define mmNIC2_QM1_GLBL_MSG_EN_4 0xD62068 + +#define mmNIC2_QM1_PQ_BASE_LO_0 0xD62070 + +#define mmNIC2_QM1_PQ_BASE_LO_1 0xD62074 + +#define mmNIC2_QM1_PQ_BASE_LO_2 0xD62078 + +#define mmNIC2_QM1_PQ_BASE_LO_3 0xD6207C + +#define mmNIC2_QM1_PQ_BASE_HI_0 0xD62080 + +#define mmNIC2_QM1_PQ_BASE_HI_1 0xD62084 + +#define mmNIC2_QM1_PQ_BASE_HI_2 0xD62088 + +#define mmNIC2_QM1_PQ_BASE_HI_3 0xD6208C + +#define mmNIC2_QM1_PQ_SIZE_0 0xD62090 + +#define mmNIC2_QM1_PQ_SIZE_1 0xD62094 + +#define mmNIC2_QM1_PQ_SIZE_2 0xD62098 + +#define mmNIC2_QM1_PQ_SIZE_3 0xD6209C + +#define mmNIC2_QM1_PQ_PI_0 0xD620A0 + +#define mmNIC2_QM1_PQ_PI_1 0xD620A4 + +#define mmNIC2_QM1_PQ_PI_2 0xD620A8 + +#define mmNIC2_QM1_PQ_PI_3 0xD620AC + +#define mmNIC2_QM1_PQ_CI_0 0xD620B0 + +#define mmNIC2_QM1_PQ_CI_1 0xD620B4 + +#define mmNIC2_QM1_PQ_CI_2 0xD620B8 + +#define mmNIC2_QM1_PQ_CI_3 0xD620BC + +#define mmNIC2_QM1_PQ_CFG0_0 0xD620C0 + +#define mmNIC2_QM1_PQ_CFG0_1 0xD620C4 + +#define mmNIC2_QM1_PQ_CFG0_2 0xD620C8 + +#define mmNIC2_QM1_PQ_CFG0_3 0xD620CC + +#define mmNIC2_QM1_PQ_CFG1_0 0xD620D0 + +#define mmNIC2_QM1_PQ_CFG1_1 0xD620D4 + +#define mmNIC2_QM1_PQ_CFG1_2 0xD620D8 + +#define mmNIC2_QM1_PQ_CFG1_3 0xD620DC + +#define mmNIC2_QM1_PQ_ARUSER_31_11_0 0xD620E0 + +#define mmNIC2_QM1_PQ_ARUSER_31_11_1 0xD620E4 + +#define mmNIC2_QM1_PQ_ARUSER_31_11_2 0xD620E8 + +#define mmNIC2_QM1_PQ_ARUSER_31_11_3 0xD620EC + +#define mmNIC2_QM1_PQ_STS0_0 0xD620F0 + +#define mmNIC2_QM1_PQ_STS0_1 0xD620F4 + +#define mmNIC2_QM1_PQ_STS0_2 0xD620F8 + +#define mmNIC2_QM1_PQ_STS0_3 0xD620FC + +#define mmNIC2_QM1_PQ_STS1_0 0xD62100 + +#define mmNIC2_QM1_PQ_STS1_1 0xD62104 + +#define mmNIC2_QM1_PQ_STS1_2 0xD62108 + +#define mmNIC2_QM1_PQ_STS1_3 0xD6210C + +#define mmNIC2_QM1_CQ_CFG0_0 0xD62110 + +#define mmNIC2_QM1_CQ_CFG0_1 0xD62114 + +#define mmNIC2_QM1_CQ_CFG0_2 0xD62118 + +#define mmNIC2_QM1_CQ_CFG0_3 0xD6211C + +#define mmNIC2_QM1_CQ_CFG0_4 0xD62120 + +#define mmNIC2_QM1_CQ_CFG1_0 0xD62124 + +#define mmNIC2_QM1_CQ_CFG1_1 0xD62128 + +#define mmNIC2_QM1_CQ_CFG1_2 0xD6212C + +#define mmNIC2_QM1_CQ_CFG1_3 0xD62130 + +#define mmNIC2_QM1_CQ_CFG1_4 0xD62134 + +#define mmNIC2_QM1_CQ_ARUSER_31_11_0 0xD62138 + +#define mmNIC2_QM1_CQ_ARUSER_31_11_1 0xD6213C + +#define mmNIC2_QM1_CQ_ARUSER_31_11_2 0xD62140 + +#define mmNIC2_QM1_CQ_ARUSER_31_11_3 0xD62144 + +#define mmNIC2_QM1_CQ_ARUSER_31_11_4 0xD62148 + +#define mmNIC2_QM1_CQ_STS0_0 0xD6214C + +#define mmNIC2_QM1_CQ_STS0_1 0xD62150 + +#define mmNIC2_QM1_CQ_STS0_2 0xD62154 + +#define mmNIC2_QM1_CQ_STS0_3 0xD62158 + +#define mmNIC2_QM1_CQ_STS0_4 0xD6215C + +#define mmNIC2_QM1_CQ_STS1_0 0xD62160 + +#define mmNIC2_QM1_CQ_STS1_1 0xD62164 + +#define mmNIC2_QM1_CQ_STS1_2 0xD62168 + +#define mmNIC2_QM1_CQ_STS1_3 0xD6216C + +#define mmNIC2_QM1_CQ_STS1_4 0xD62170 + +#define mmNIC2_QM1_CQ_PTR_LO_0 0xD62174 + +#define mmNIC2_QM1_CQ_PTR_HI_0 0xD62178 + +#define mmNIC2_QM1_CQ_TSIZE_0 0xD6217C + +#define mmNIC2_QM1_CQ_CTL_0 0xD62180 + +#define mmNIC2_QM1_CQ_PTR_LO_1 0xD62184 + +#define mmNIC2_QM1_CQ_PTR_HI_1 0xD62188 + +#define mmNIC2_QM1_CQ_TSIZE_1 0xD6218C + +#define mmNIC2_QM1_CQ_CTL_1 0xD62190 + +#define mmNIC2_QM1_CQ_PTR_LO_2 0xD62194 + +#define mmNIC2_QM1_CQ_PTR_HI_2 0xD62198 + +#define mmNIC2_QM1_CQ_TSIZE_2 0xD6219C + +#define mmNIC2_QM1_CQ_CTL_2 0xD621A0 + +#define mmNIC2_QM1_CQ_PTR_LO_3 0xD621A4 + +#define mmNIC2_QM1_CQ_PTR_HI_3 0xD621A8 + +#define mmNIC2_QM1_CQ_TSIZE_3 0xD621AC + +#define mmNIC2_QM1_CQ_CTL_3 0xD621B0 + +#define mmNIC2_QM1_CQ_PTR_LO_4 0xD621B4 + +#define mmNIC2_QM1_CQ_PTR_HI_4 0xD621B8 + +#define mmNIC2_QM1_CQ_TSIZE_4 0xD621BC + +#define mmNIC2_QM1_CQ_CTL_4 0xD621C0 + +#define mmNIC2_QM1_CQ_PTR_LO_STS_0 0xD621C4 + +#define mmNIC2_QM1_CQ_PTR_LO_STS_1 0xD621C8 + +#define mmNIC2_QM1_CQ_PTR_LO_STS_2 0xD621CC + +#define mmNIC2_QM1_CQ_PTR_LO_STS_3 0xD621D0 + +#define mmNIC2_QM1_CQ_PTR_LO_STS_4 0xD621D4 + +#define mmNIC2_QM1_CQ_PTR_HI_STS_0 0xD621D8 + +#define mmNIC2_QM1_CQ_PTR_HI_STS_1 0xD621DC + +#define mmNIC2_QM1_CQ_PTR_HI_STS_2 0xD621E0 + +#define mmNIC2_QM1_CQ_PTR_HI_STS_3 0xD621E4 + +#define mmNIC2_QM1_CQ_PTR_HI_STS_4 0xD621E8 + +#define mmNIC2_QM1_CQ_TSIZE_STS_0 0xD621EC + +#define mmNIC2_QM1_CQ_TSIZE_STS_1 0xD621F0 + +#define mmNIC2_QM1_CQ_TSIZE_STS_2 0xD621F4 + +#define mmNIC2_QM1_CQ_TSIZE_STS_3 0xD621F8 + +#define mmNIC2_QM1_CQ_TSIZE_STS_4 0xD621FC + +#define mmNIC2_QM1_CQ_CTL_STS_0 0xD62200 + +#define mmNIC2_QM1_CQ_CTL_STS_1 0xD62204 + +#define mmNIC2_QM1_CQ_CTL_STS_2 0xD62208 + +#define mmNIC2_QM1_CQ_CTL_STS_3 0xD6220C + +#define mmNIC2_QM1_CQ_CTL_STS_4 0xD62210 + +#define mmNIC2_QM1_CQ_IFIFO_CNT_0 0xD62214 + +#define mmNIC2_QM1_CQ_IFIFO_CNT_1 0xD62218 + +#define mmNIC2_QM1_CQ_IFIFO_CNT_2 0xD6221C + +#define mmNIC2_QM1_CQ_IFIFO_CNT_3 0xD62220 + +#define mmNIC2_QM1_CQ_IFIFO_CNT_4 0xD62224 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_0 0xD62228 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_1 0xD6222C + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_2 0xD62230 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_3 0xD62234 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_4 0xD62238 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_0 0xD6223C + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_1 0xD62240 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_2 0xD62244 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_3 0xD62248 + +#define mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_4 0xD6224C + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_0 0xD62250 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_1 0xD62254 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_2 0xD62258 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_3 0xD6225C + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_4 0xD62260 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_0 0xD62264 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_1 0xD62268 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_2 0xD6226C + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_3 0xD62270 + +#define mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_4 0xD62274 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_0 0xD62278 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_1 0xD6227C + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 0xD62280 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_3 0xD62284 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_4 0xD62288 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_0 0xD6228C + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_1 0xD62290 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_2 0xD62294 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_3 0xD62298 + +#define mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_4 0xD6229C + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_0 0xD622A0 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_1 0xD622A4 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_2 0xD622A8 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_3 0xD622AC + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_4 0xD622B0 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_0 0xD622B4 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_1 0xD622B8 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_2 0xD622BC + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_3 0xD622C0 + +#define mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_4 0xD622C4 + +#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_0 0xD622C8 + +#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_1 0xD622CC + +#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_2 0xD622D0 + +#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_3 0xD622D4 + +#define mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_4 0xD622D8 + +#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xD622E0 + +#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xD622E4 + +#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xD622E8 + +#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xD622EC + +#define mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xD622F0 + +#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 0xD622F4 + +#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 0xD622F8 + +#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 0xD622FC + +#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 0xD62300 + +#define mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 0xD62304 + +#define mmNIC2_QM1_CP_FENCE0_RDATA_0 0xD62308 + +#define mmNIC2_QM1_CP_FENCE0_RDATA_1 0xD6230C + +#define mmNIC2_QM1_CP_FENCE0_RDATA_2 0xD62310 + +#define mmNIC2_QM1_CP_FENCE0_RDATA_3 0xD62314 + +#define mmNIC2_QM1_CP_FENCE0_RDATA_4 0xD62318 + +#define mmNIC2_QM1_CP_FENCE1_RDATA_0 0xD6231C + +#define mmNIC2_QM1_CP_FENCE1_RDATA_1 0xD62320 + +#define mmNIC2_QM1_CP_FENCE1_RDATA_2 0xD62324 + +#define mmNIC2_QM1_CP_FENCE1_RDATA_3 0xD62328 + +#define mmNIC2_QM1_CP_FENCE1_RDATA_4 0xD6232C + +#define mmNIC2_QM1_CP_FENCE2_RDATA_0 0xD62330 + +#define mmNIC2_QM1_CP_FENCE2_RDATA_1 0xD62334 + +#define mmNIC2_QM1_CP_FENCE2_RDATA_2 0xD62338 + +#define mmNIC2_QM1_CP_FENCE2_RDATA_3 0xD6233C + +#define mmNIC2_QM1_CP_FENCE2_RDATA_4 0xD62340 + +#define mmNIC2_QM1_CP_FENCE3_RDATA_0 0xD62344 + +#define mmNIC2_QM1_CP_FENCE3_RDATA_1 0xD62348 + +#define mmNIC2_QM1_CP_FENCE3_RDATA_2 0xD6234C + +#define mmNIC2_QM1_CP_FENCE3_RDATA_3 0xD62350 + +#define mmNIC2_QM1_CP_FENCE3_RDATA_4 0xD62354 + +#define mmNIC2_QM1_CP_FENCE0_CNT_0 0xD62358 + +#define mmNIC2_QM1_CP_FENCE0_CNT_1 0xD6235C + +#define mmNIC2_QM1_CP_FENCE0_CNT_2 0xD62360 + +#define mmNIC2_QM1_CP_FENCE0_CNT_3 0xD62364 + +#define mmNIC2_QM1_CP_FENCE0_CNT_4 0xD62368 + +#define mmNIC2_QM1_CP_FENCE1_CNT_0 0xD6236C + +#define mmNIC2_QM1_CP_FENCE1_CNT_1 0xD62370 + +#define mmNIC2_QM1_CP_FENCE1_CNT_2 0xD62374 + +#define mmNIC2_QM1_CP_FENCE1_CNT_3 0xD62378 + +#define mmNIC2_QM1_CP_FENCE1_CNT_4 0xD6237C + +#define mmNIC2_QM1_CP_FENCE2_CNT_0 0xD62380 + +#define mmNIC2_QM1_CP_FENCE2_CNT_1 0xD62384 + +#define mmNIC2_QM1_CP_FENCE2_CNT_2 0xD62388 + +#define mmNIC2_QM1_CP_FENCE2_CNT_3 0xD6238C + +#define mmNIC2_QM1_CP_FENCE2_CNT_4 0xD62390 + +#define mmNIC2_QM1_CP_FENCE3_CNT_0 0xD62394 + +#define mmNIC2_QM1_CP_FENCE3_CNT_1 0xD62398 + +#define mmNIC2_QM1_CP_FENCE3_CNT_2 0xD6239C + +#define mmNIC2_QM1_CP_FENCE3_CNT_3 0xD623A0 + +#define mmNIC2_QM1_CP_FENCE3_CNT_4 0xD623A4 + +#define mmNIC2_QM1_CP_STS_0 0xD623A8 + +#define mmNIC2_QM1_CP_STS_1 0xD623AC + +#define mmNIC2_QM1_CP_STS_2 0xD623B0 + +#define mmNIC2_QM1_CP_STS_3 0xD623B4 + +#define mmNIC2_QM1_CP_STS_4 0xD623B8 + +#define mmNIC2_QM1_CP_CURRENT_INST_LO_0 0xD623BC + +#define mmNIC2_QM1_CP_CURRENT_INST_LO_1 0xD623C0 + +#define mmNIC2_QM1_CP_CURRENT_INST_LO_2 0xD623C4 + +#define mmNIC2_QM1_CP_CURRENT_INST_LO_3 0xD623C8 + +#define mmNIC2_QM1_CP_CURRENT_INST_LO_4 0xD623CC + +#define mmNIC2_QM1_CP_CURRENT_INST_HI_0 0xD623D0 + +#define mmNIC2_QM1_CP_CURRENT_INST_HI_1 0xD623D4 + +#define mmNIC2_QM1_CP_CURRENT_INST_HI_2 0xD623D8 + +#define mmNIC2_QM1_CP_CURRENT_INST_HI_3 0xD623DC + +#define mmNIC2_QM1_CP_CURRENT_INST_HI_4 0xD623E0 + +#define mmNIC2_QM1_CP_BARRIER_CFG_0 0xD623F4 + +#define mmNIC2_QM1_CP_BARRIER_CFG_1 0xD623F8 + +#define mmNIC2_QM1_CP_BARRIER_CFG_2 0xD623FC + +#define mmNIC2_QM1_CP_BARRIER_CFG_3 0xD62400 + +#define mmNIC2_QM1_CP_BARRIER_CFG_4 0xD62404 + +#define mmNIC2_QM1_CP_DBG_0_0 0xD62408 + +#define mmNIC2_QM1_CP_DBG_0_1 0xD6240C + +#define mmNIC2_QM1_CP_DBG_0_2 0xD62410 + +#define mmNIC2_QM1_CP_DBG_0_3 0xD62414 + +#define mmNIC2_QM1_CP_DBG_0_4 0xD62418 + +#define mmNIC2_QM1_CP_ARUSER_31_11_0 0xD6241C + +#define mmNIC2_QM1_CP_ARUSER_31_11_1 0xD62420 + +#define mmNIC2_QM1_CP_ARUSER_31_11_2 0xD62424 + +#define mmNIC2_QM1_CP_ARUSER_31_11_3 0xD62428 + +#define mmNIC2_QM1_CP_ARUSER_31_11_4 0xD6242C + +#define mmNIC2_QM1_CP_AWUSER_31_11_0 0xD62430 + +#define mmNIC2_QM1_CP_AWUSER_31_11_1 0xD62434 + +#define mmNIC2_QM1_CP_AWUSER_31_11_2 0xD62438 + +#define mmNIC2_QM1_CP_AWUSER_31_11_3 0xD6243C + +#define mmNIC2_QM1_CP_AWUSER_31_11_4 0xD62440 + +#define mmNIC2_QM1_ARB_CFG_0 0xD62A00 + +#define mmNIC2_QM1_ARB_CHOISE_Q_PUSH 0xD62A04 + +#define mmNIC2_QM1_ARB_WRR_WEIGHT_0 0xD62A08 + +#define mmNIC2_QM1_ARB_WRR_WEIGHT_1 0xD62A0C + +#define mmNIC2_QM1_ARB_WRR_WEIGHT_2 0xD62A10 + +#define mmNIC2_QM1_ARB_WRR_WEIGHT_3 0xD62A14 + +#define mmNIC2_QM1_ARB_CFG_1 0xD62A18 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_0 0xD62A20 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_1 0xD62A24 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_2 0xD62A28 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_3 0xD62A2C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_4 0xD62A30 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_5 0xD62A34 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_6 0xD62A38 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_7 0xD62A3C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_8 0xD62A40 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_9 0xD62A44 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_10 0xD62A48 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_11 0xD62A4C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_12 0xD62A50 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_13 0xD62A54 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_14 0xD62A58 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_15 0xD62A5C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_16 0xD62A60 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_17 0xD62A64 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_18 0xD62A68 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_19 0xD62A6C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_20 0xD62A70 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_21 0xD62A74 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_22 0xD62A78 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_23 0xD62A7C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 0xD62A80 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_25 0xD62A84 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_26 0xD62A88 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_27 0xD62A8C + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_28 0xD62A90 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_29 0xD62A94 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_30 0xD62A98 + +#define mmNIC2_QM1_ARB_MST_AVAIL_CRED_31 0xD62A9C + +#define mmNIC2_QM1_ARB_MST_CRED_INC 0xD62AA0 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_0 0xD62AA4 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_1 0xD62AA8 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_2 0xD62AAC + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_3 0xD62AB0 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_4 0xD62AB4 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_5 0xD62AB8 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_6 0xD62ABC + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_7 0xD62AC0 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_8 0xD62AC4 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_9 0xD62AC8 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_10 0xD62ACC + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_11 0xD62AD0 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_12 0xD62AD4 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_13 0xD62AD8 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_14 0xD62ADC + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_15 0xD62AE0 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_16 0xD62AE4 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_17 0xD62AE8 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_18 0xD62AEC + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_19 0xD62AF0 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_20 0xD62AF4 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_21 0xD62AF8 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_22 0xD62AFC + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_23 0xD62B00 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_24 0xD62B04 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_25 0xD62B08 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_26 0xD62B0C + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_27 0xD62B10 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_28 0xD62B14 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_29 0xD62B18 + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_30 0xD62B1C + +#define mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_31 0xD62B20 + +#define mmNIC2_QM1_ARB_SLV_MASTER_INC_CRED_OFST 0xD62B28 + +#define mmNIC2_QM1_ARB_MST_SLAVE_EN 0xD62B2C + +#define mmNIC2_QM1_ARB_MST_QUIET_PER 0xD62B34 + +#define mmNIC2_QM1_ARB_SLV_CHOISE_WDT 0xD62B38 + +#define mmNIC2_QM1_ARB_SLV_ID 0xD62B3C + +#define mmNIC2_QM1_ARB_MSG_MAX_INFLIGHT 0xD62B44 + +#define mmNIC2_QM1_ARB_MSG_AWUSER_31_11 0xD62B48 + +#define mmNIC2_QM1_ARB_MSG_AWUSER_SEC_PROP 0xD62B4C + +#define mmNIC2_QM1_ARB_MSG_AWUSER_NON_SEC_PROP 0xD62B50 + +#define mmNIC2_QM1_ARB_BASE_LO 0xD62B54 + +#define mmNIC2_QM1_ARB_BASE_HI 0xD62B58 + +#define mmNIC2_QM1_ARB_STATE_STS 0xD62B80 + +#define mmNIC2_QM1_ARB_CHOISE_FULLNESS_STS 0xD62B84 + +#define mmNIC2_QM1_ARB_MSG_STS 0xD62B88 + +#define mmNIC2_QM1_ARB_SLV_CHOISE_Q_HEAD 0xD62B8C + +#define mmNIC2_QM1_ARB_ERR_CAUSE 0xD62B9C + +#define mmNIC2_QM1_ARB_ERR_MSG_EN 0xD62BA0 + +#define mmNIC2_QM1_ARB_ERR_STS_DRP 0xD62BA8 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_0 0xD62BB0 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_1 0xD62BB4 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_2 0xD62BB8 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_3 0xD62BBC + +#define mmNIC2_QM1_ARB_MST_CRED_STS_4 0xD62BC0 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_5 0xD62BC4 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_6 0xD62BC8 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_7 0xD62BCC + +#define mmNIC2_QM1_ARB_MST_CRED_STS_8 0xD62BD0 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_9 0xD62BD4 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_10 0xD62BD8 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_11 0xD62BDC + +#define mmNIC2_QM1_ARB_MST_CRED_STS_12 0xD62BE0 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_13 0xD62BE4 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_14 0xD62BE8 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_15 0xD62BEC + +#define mmNIC2_QM1_ARB_MST_CRED_STS_16 0xD62BF0 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_17 0xD62BF4 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_18 0xD62BF8 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_19 0xD62BFC + +#define mmNIC2_QM1_ARB_MST_CRED_STS_20 0xD62C00 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_21 0xD62C04 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_22 0xD62C08 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_23 0xD62C0C + +#define mmNIC2_QM1_ARB_MST_CRED_STS_24 0xD62C10 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_25 0xD62C14 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_26 0xD62C18 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_27 0xD62C1C + +#define mmNIC2_QM1_ARB_MST_CRED_STS_28 0xD62C20 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_29 0xD62C24 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_30 0xD62C28 + +#define mmNIC2_QM1_ARB_MST_CRED_STS_31 0xD62C2C + +#define mmNIC2_QM1_CGM_CFG 0xD62C70 + +#define mmNIC2_QM1_CGM_STS 0xD62C74 + +#define mmNIC2_QM1_CGM_CFG1 0xD62C78 + +#define mmNIC2_QM1_LOCAL_RANGE_BASE 0xD62C80 + +#define mmNIC2_QM1_LOCAL_RANGE_SIZE 0xD62C84 + +#define mmNIC2_QM1_CSMR_STRICT_PRIO_CFG 0xD62C90 + +#define mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_1 0xD62C94 + +#define mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_0 0xD62C98 + +#define mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_1 0xD62C9C + +#define mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_0 0xD62CA0 + +#define mmNIC2_QM1_GLBL_AXCACHE 0xD62CA4 + +#define mmNIC2_QM1_IND_GW_APB_CFG 0xD62CB0 + +#define mmNIC2_QM1_IND_GW_APB_WDATA 0xD62CB4 + +#define mmNIC2_QM1_IND_GW_APB_RDATA 0xD62CB8 + +#define mmNIC2_QM1_IND_GW_APB_STATUS 0xD62CBC + +#define mmNIC2_QM1_GLBL_ERR_ADDR_LO 0xD62CD0 + +#define mmNIC2_QM1_GLBL_ERR_ADDR_HI 0xD62CD4 + +#define mmNIC2_QM1_GLBL_ERR_WDATA 0xD62CD8 + +#define mmNIC2_QM1_GLBL_MEM_INIT_BUSY 0xD62D00 + +#endif /* ASIC_REG_NIC2_QM1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h new file mode 100644 index 000000000000..4712cc62b009 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC3_QM0_REGS_H_ +#define ASIC_REG_NIC3_QM0_REGS_H_ + +/* + ***************************************** + * NIC3_QM0 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC3_QM0_GLBL_CFG0 0xDA0000 + +#define mmNIC3_QM0_GLBL_CFG1 0xDA0004 + +#define mmNIC3_QM0_GLBL_PROT 0xDA0008 + +#define mmNIC3_QM0_GLBL_ERR_CFG 0xDA000C + +#define mmNIC3_QM0_GLBL_SECURE_PROPS_0 0xDA0010 + +#define mmNIC3_QM0_GLBL_SECURE_PROPS_1 0xDA0014 + +#define mmNIC3_QM0_GLBL_SECURE_PROPS_2 0xDA0018 + +#define mmNIC3_QM0_GLBL_SECURE_PROPS_3 0xDA001C + +#define mmNIC3_QM0_GLBL_SECURE_PROPS_4 0xDA0020 + +#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0 0xDA0024 + +#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1 0xDA0028 + +#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_2 0xDA002C + +#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_3 0xDA0030 + +#define mmNIC3_QM0_GLBL_NON_SECURE_PROPS_4 0xDA0034 + +#define mmNIC3_QM0_GLBL_STS0 0xDA0038 + +#define mmNIC3_QM0_GLBL_STS1_0 0xDA0040 + +#define mmNIC3_QM0_GLBL_STS1_1 0xDA0044 + +#define mmNIC3_QM0_GLBL_STS1_2 0xDA0048 + +#define mmNIC3_QM0_GLBL_STS1_3 0xDA004C + +#define mmNIC3_QM0_GLBL_STS1_4 0xDA0050 + +#define mmNIC3_QM0_GLBL_MSG_EN_0 0xDA0054 + +#define mmNIC3_QM0_GLBL_MSG_EN_1 0xDA0058 + +#define mmNIC3_QM0_GLBL_MSG_EN_2 0xDA005C + +#define mmNIC3_QM0_GLBL_MSG_EN_3 0xDA0060 + +#define mmNIC3_QM0_GLBL_MSG_EN_4 0xDA0068 + +#define mmNIC3_QM0_PQ_BASE_LO_0 0xDA0070 + +#define mmNIC3_QM0_PQ_BASE_LO_1 0xDA0074 + +#define mmNIC3_QM0_PQ_BASE_LO_2 0xDA0078 + +#define mmNIC3_QM0_PQ_BASE_LO_3 0xDA007C + +#define mmNIC3_QM0_PQ_BASE_HI_0 0xDA0080 + +#define mmNIC3_QM0_PQ_BASE_HI_1 0xDA0084 + +#define mmNIC3_QM0_PQ_BASE_HI_2 0xDA0088 + +#define mmNIC3_QM0_PQ_BASE_HI_3 0xDA008C + +#define mmNIC3_QM0_PQ_SIZE_0 0xDA0090 + +#define mmNIC3_QM0_PQ_SIZE_1 0xDA0094 + +#define mmNIC3_QM0_PQ_SIZE_2 0xDA0098 + +#define mmNIC3_QM0_PQ_SIZE_3 0xDA009C + +#define mmNIC3_QM0_PQ_PI_0 0xDA00A0 + +#define mmNIC3_QM0_PQ_PI_1 0xDA00A4 + +#define mmNIC3_QM0_PQ_PI_2 0xDA00A8 + +#define mmNIC3_QM0_PQ_PI_3 0xDA00AC + +#define mmNIC3_QM0_PQ_CI_0 0xDA00B0 + +#define mmNIC3_QM0_PQ_CI_1 0xDA00B4 + +#define mmNIC3_QM0_PQ_CI_2 0xDA00B8 + +#define mmNIC3_QM0_PQ_CI_3 0xDA00BC + +#define mmNIC3_QM0_PQ_CFG0_0 0xDA00C0 + +#define mmNIC3_QM0_PQ_CFG0_1 0xDA00C4 + +#define mmNIC3_QM0_PQ_CFG0_2 0xDA00C8 + +#define mmNIC3_QM0_PQ_CFG0_3 0xDA00CC + +#define mmNIC3_QM0_PQ_CFG1_0 0xDA00D0 + +#define mmNIC3_QM0_PQ_CFG1_1 0xDA00D4 + +#define mmNIC3_QM0_PQ_CFG1_2 0xDA00D8 + +#define mmNIC3_QM0_PQ_CFG1_3 0xDA00DC + +#define mmNIC3_QM0_PQ_ARUSER_31_11_0 0xDA00E0 + +#define mmNIC3_QM0_PQ_ARUSER_31_11_1 0xDA00E4 + +#define mmNIC3_QM0_PQ_ARUSER_31_11_2 0xDA00E8 + +#define mmNIC3_QM0_PQ_ARUSER_31_11_3 0xDA00EC + +#define mmNIC3_QM0_PQ_STS0_0 0xDA00F0 + +#define mmNIC3_QM0_PQ_STS0_1 0xDA00F4 + +#define mmNIC3_QM0_PQ_STS0_2 0xDA00F8 + +#define mmNIC3_QM0_PQ_STS0_3 0xDA00FC + +#define mmNIC3_QM0_PQ_STS1_0 0xDA0100 + +#define mmNIC3_QM0_PQ_STS1_1 0xDA0104 + +#define mmNIC3_QM0_PQ_STS1_2 0xDA0108 + +#define mmNIC3_QM0_PQ_STS1_3 0xDA010C + +#define mmNIC3_QM0_CQ_CFG0_0 0xDA0110 + +#define mmNIC3_QM0_CQ_CFG0_1 0xDA0114 + +#define mmNIC3_QM0_CQ_CFG0_2 0xDA0118 + +#define mmNIC3_QM0_CQ_CFG0_3 0xDA011C + +#define mmNIC3_QM0_CQ_CFG0_4 0xDA0120 + +#define mmNIC3_QM0_CQ_CFG1_0 0xDA0124 + +#define mmNIC3_QM0_CQ_CFG1_1 0xDA0128 + +#define mmNIC3_QM0_CQ_CFG1_2 0xDA012C + +#define mmNIC3_QM0_CQ_CFG1_3 0xDA0130 + +#define mmNIC3_QM0_CQ_CFG1_4 0xDA0134 + +#define mmNIC3_QM0_CQ_ARUSER_31_11_0 0xDA0138 + +#define mmNIC3_QM0_CQ_ARUSER_31_11_1 0xDA013C + +#define mmNIC3_QM0_CQ_ARUSER_31_11_2 0xDA0140 + +#define mmNIC3_QM0_CQ_ARUSER_31_11_3 0xDA0144 + +#define mmNIC3_QM0_CQ_ARUSER_31_11_4 0xDA0148 + +#define mmNIC3_QM0_CQ_STS0_0 0xDA014C + +#define mmNIC3_QM0_CQ_STS0_1 0xDA0150 + +#define mmNIC3_QM0_CQ_STS0_2 0xDA0154 + +#define mmNIC3_QM0_CQ_STS0_3 0xDA0158 + +#define mmNIC3_QM0_CQ_STS0_4 0xDA015C + +#define mmNIC3_QM0_CQ_STS1_0 0xDA0160 + +#define mmNIC3_QM0_CQ_STS1_1 0xDA0164 + +#define mmNIC3_QM0_CQ_STS1_2 0xDA0168 + +#define mmNIC3_QM0_CQ_STS1_3 0xDA016C + +#define mmNIC3_QM0_CQ_STS1_4 0xDA0170 + +#define mmNIC3_QM0_CQ_PTR_LO_0 0xDA0174 + +#define mmNIC3_QM0_CQ_PTR_HI_0 0xDA0178 + +#define mmNIC3_QM0_CQ_TSIZE_0 0xDA017C + +#define mmNIC3_QM0_CQ_CTL_0 0xDA0180 + +#define mmNIC3_QM0_CQ_PTR_LO_1 0xDA0184 + +#define mmNIC3_QM0_CQ_PTR_HI_1 0xDA0188 + +#define mmNIC3_QM0_CQ_TSIZE_1 0xDA018C + +#define mmNIC3_QM0_CQ_CTL_1 0xDA0190 + +#define mmNIC3_QM0_CQ_PTR_LO_2 0xDA0194 + +#define mmNIC3_QM0_CQ_PTR_HI_2 0xDA0198 + +#define mmNIC3_QM0_CQ_TSIZE_2 0xDA019C + +#define mmNIC3_QM0_CQ_CTL_2 0xDA01A0 + +#define mmNIC3_QM0_CQ_PTR_LO_3 0xDA01A4 + +#define mmNIC3_QM0_CQ_PTR_HI_3 0xDA01A8 + +#define mmNIC3_QM0_CQ_TSIZE_3 0xDA01AC + +#define mmNIC3_QM0_CQ_CTL_3 0xDA01B0 + +#define mmNIC3_QM0_CQ_PTR_LO_4 0xDA01B4 + +#define mmNIC3_QM0_CQ_PTR_HI_4 0xDA01B8 + +#define mmNIC3_QM0_CQ_TSIZE_4 0xDA01BC + +#define mmNIC3_QM0_CQ_CTL_4 0xDA01C0 + +#define mmNIC3_QM0_CQ_PTR_LO_STS_0 0xDA01C4 + +#define mmNIC3_QM0_CQ_PTR_LO_STS_1 0xDA01C8 + +#define mmNIC3_QM0_CQ_PTR_LO_STS_2 0xDA01CC + +#define mmNIC3_QM0_CQ_PTR_LO_STS_3 0xDA01D0 + +#define mmNIC3_QM0_CQ_PTR_LO_STS_4 0xDA01D4 + +#define mmNIC3_QM0_CQ_PTR_HI_STS_0 0xDA01D8 + +#define mmNIC3_QM0_CQ_PTR_HI_STS_1 0xDA01DC + +#define mmNIC3_QM0_CQ_PTR_HI_STS_2 0xDA01E0 + +#define mmNIC3_QM0_CQ_PTR_HI_STS_3 0xDA01E4 + +#define mmNIC3_QM0_CQ_PTR_HI_STS_4 0xDA01E8 + +#define mmNIC3_QM0_CQ_TSIZE_STS_0 0xDA01EC + +#define mmNIC3_QM0_CQ_TSIZE_STS_1 0xDA01F0 + +#define mmNIC3_QM0_CQ_TSIZE_STS_2 0xDA01F4 + +#define mmNIC3_QM0_CQ_TSIZE_STS_3 0xDA01F8 + +#define mmNIC3_QM0_CQ_TSIZE_STS_4 0xDA01FC + +#define mmNIC3_QM0_CQ_CTL_STS_0 0xDA0200 + +#define mmNIC3_QM0_CQ_CTL_STS_1 0xDA0204 + +#define mmNIC3_QM0_CQ_CTL_STS_2 0xDA0208 + +#define mmNIC3_QM0_CQ_CTL_STS_3 0xDA020C + +#define mmNIC3_QM0_CQ_CTL_STS_4 0xDA0210 + +#define mmNIC3_QM0_CQ_IFIFO_CNT_0 0xDA0214 + +#define mmNIC3_QM0_CQ_IFIFO_CNT_1 0xDA0218 + +#define mmNIC3_QM0_CQ_IFIFO_CNT_2 0xDA021C + +#define mmNIC3_QM0_CQ_IFIFO_CNT_3 0xDA0220 + +#define mmNIC3_QM0_CQ_IFIFO_CNT_4 0xDA0224 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_0 0xDA0228 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_1 0xDA022C + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_2 0xDA0230 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_3 0xDA0234 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_4 0xDA0238 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_0 0xDA023C + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_1 0xDA0240 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_2 0xDA0244 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_3 0xDA0248 + +#define mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_4 0xDA024C + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_0 0xDA0250 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_1 0xDA0254 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_2 0xDA0258 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_3 0xDA025C + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_4 0xDA0260 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_0 0xDA0264 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_1 0xDA0268 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_2 0xDA026C + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_3 0xDA0270 + +#define mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_4 0xDA0274 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_0 0xDA0278 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_1 0xDA027C + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 0xDA0280 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_3 0xDA0284 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_4 0xDA0288 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_0 0xDA028C + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_1 0xDA0290 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_2 0xDA0294 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_3 0xDA0298 + +#define mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_4 0xDA029C + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_0 0xDA02A0 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_1 0xDA02A4 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_2 0xDA02A8 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_3 0xDA02AC + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_4 0xDA02B0 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_0 0xDA02B4 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_1 0xDA02B8 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_2 0xDA02BC + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_3 0xDA02C0 + +#define mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_4 0xDA02C4 + +#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_0 0xDA02C8 + +#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_1 0xDA02CC + +#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_2 0xDA02D0 + +#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_3 0xDA02D4 + +#define mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_4 0xDA02D8 + +#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xDA02E0 + +#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xDA02E4 + +#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xDA02E8 + +#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xDA02EC + +#define mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xDA02F0 + +#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 0xDA02F4 + +#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 0xDA02F8 + +#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 0xDA02FC + +#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 0xDA0300 + +#define mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 0xDA0304 + +#define mmNIC3_QM0_CP_FENCE0_RDATA_0 0xDA0308 + +#define mmNIC3_QM0_CP_FENCE0_RDATA_1 0xDA030C + +#define mmNIC3_QM0_CP_FENCE0_RDATA_2 0xDA0310 + +#define mmNIC3_QM0_CP_FENCE0_RDATA_3 0xDA0314 + +#define mmNIC3_QM0_CP_FENCE0_RDATA_4 0xDA0318 + +#define mmNIC3_QM0_CP_FENCE1_RDATA_0 0xDA031C + +#define mmNIC3_QM0_CP_FENCE1_RDATA_1 0xDA0320 + +#define mmNIC3_QM0_CP_FENCE1_RDATA_2 0xDA0324 + +#define mmNIC3_QM0_CP_FENCE1_RDATA_3 0xDA0328 + +#define mmNIC3_QM0_CP_FENCE1_RDATA_4 0xDA032C + +#define mmNIC3_QM0_CP_FENCE2_RDATA_0 0xDA0330 + +#define mmNIC3_QM0_CP_FENCE2_RDATA_1 0xDA0334 + +#define mmNIC3_QM0_CP_FENCE2_RDATA_2 0xDA0338 + +#define mmNIC3_QM0_CP_FENCE2_RDATA_3 0xDA033C + +#define mmNIC3_QM0_CP_FENCE2_RDATA_4 0xDA0340 + +#define mmNIC3_QM0_CP_FENCE3_RDATA_0 0xDA0344 + +#define mmNIC3_QM0_CP_FENCE3_RDATA_1 0xDA0348 + +#define mmNIC3_QM0_CP_FENCE3_RDATA_2 0xDA034C + +#define mmNIC3_QM0_CP_FENCE3_RDATA_3 0xDA0350 + +#define mmNIC3_QM0_CP_FENCE3_RDATA_4 0xDA0354 + +#define mmNIC3_QM0_CP_FENCE0_CNT_0 0xDA0358 + +#define mmNIC3_QM0_CP_FENCE0_CNT_1 0xDA035C + +#define mmNIC3_QM0_CP_FENCE0_CNT_2 0xDA0360 + +#define mmNIC3_QM0_CP_FENCE0_CNT_3 0xDA0364 + +#define mmNIC3_QM0_CP_FENCE0_CNT_4 0xDA0368 + +#define mmNIC3_QM0_CP_FENCE1_CNT_0 0xDA036C + +#define mmNIC3_QM0_CP_FENCE1_CNT_1 0xDA0370 + +#define mmNIC3_QM0_CP_FENCE1_CNT_2 0xDA0374 + +#define mmNIC3_QM0_CP_FENCE1_CNT_3 0xDA0378 + +#define mmNIC3_QM0_CP_FENCE1_CNT_4 0xDA037C + +#define mmNIC3_QM0_CP_FENCE2_CNT_0 0xDA0380 + +#define mmNIC3_QM0_CP_FENCE2_CNT_1 0xDA0384 + +#define mmNIC3_QM0_CP_FENCE2_CNT_2 0xDA0388 + +#define mmNIC3_QM0_CP_FENCE2_CNT_3 0xDA038C + +#define mmNIC3_QM0_CP_FENCE2_CNT_4 0xDA0390 + +#define mmNIC3_QM0_CP_FENCE3_CNT_0 0xDA0394 + +#define mmNIC3_QM0_CP_FENCE3_CNT_1 0xDA0398 + +#define mmNIC3_QM0_CP_FENCE3_CNT_2 0xDA039C + +#define mmNIC3_QM0_CP_FENCE3_CNT_3 0xDA03A0 + +#define mmNIC3_QM0_CP_FENCE3_CNT_4 0xDA03A4 + +#define mmNIC3_QM0_CP_STS_0 0xDA03A8 + +#define mmNIC3_QM0_CP_STS_1 0xDA03AC + +#define mmNIC3_QM0_CP_STS_2 0xDA03B0 + +#define mmNIC3_QM0_CP_STS_3 0xDA03B4 + +#define mmNIC3_QM0_CP_STS_4 0xDA03B8 + +#define mmNIC3_QM0_CP_CURRENT_INST_LO_0 0xDA03BC + +#define mmNIC3_QM0_CP_CURRENT_INST_LO_1 0xDA03C0 + +#define mmNIC3_QM0_CP_CURRENT_INST_LO_2 0xDA03C4 + +#define mmNIC3_QM0_CP_CURRENT_INST_LO_3 0xDA03C8 + +#define mmNIC3_QM0_CP_CURRENT_INST_LO_4 0xDA03CC + +#define mmNIC3_QM0_CP_CURRENT_INST_HI_0 0xDA03D0 + +#define mmNIC3_QM0_CP_CURRENT_INST_HI_1 0xDA03D4 + +#define mmNIC3_QM0_CP_CURRENT_INST_HI_2 0xDA03D8 + +#define mmNIC3_QM0_CP_CURRENT_INST_HI_3 0xDA03DC + +#define mmNIC3_QM0_CP_CURRENT_INST_HI_4 0xDA03E0 + +#define mmNIC3_QM0_CP_BARRIER_CFG_0 0xDA03F4 + +#define mmNIC3_QM0_CP_BARRIER_CFG_1 0xDA03F8 + +#define mmNIC3_QM0_CP_BARRIER_CFG_2 0xDA03FC + +#define mmNIC3_QM0_CP_BARRIER_CFG_3 0xDA0400 + +#define mmNIC3_QM0_CP_BARRIER_CFG_4 0xDA0404 + +#define mmNIC3_QM0_CP_DBG_0_0 0xDA0408 + +#define mmNIC3_QM0_CP_DBG_0_1 0xDA040C + +#define mmNIC3_QM0_CP_DBG_0_2 0xDA0410 + +#define mmNIC3_QM0_CP_DBG_0_3 0xDA0414 + +#define mmNIC3_QM0_CP_DBG_0_4 0xDA0418 + +#define mmNIC3_QM0_CP_ARUSER_31_11_0 0xDA041C + +#define mmNIC3_QM0_CP_ARUSER_31_11_1 0xDA0420 + +#define mmNIC3_QM0_CP_ARUSER_31_11_2 0xDA0424 + +#define mmNIC3_QM0_CP_ARUSER_31_11_3 0xDA0428 + +#define mmNIC3_QM0_CP_ARUSER_31_11_4 0xDA042C + +#define mmNIC3_QM0_CP_AWUSER_31_11_0 0xDA0430 + +#define mmNIC3_QM0_CP_AWUSER_31_11_1 0xDA0434 + +#define mmNIC3_QM0_CP_AWUSER_31_11_2 0xDA0438 + +#define mmNIC3_QM0_CP_AWUSER_31_11_3 0xDA043C + +#define mmNIC3_QM0_CP_AWUSER_31_11_4 0xDA0440 + +#define mmNIC3_QM0_ARB_CFG_0 0xDA0A00 + +#define mmNIC3_QM0_ARB_CHOISE_Q_PUSH 0xDA0A04 + +#define mmNIC3_QM0_ARB_WRR_WEIGHT_0 0xDA0A08 + +#define mmNIC3_QM0_ARB_WRR_WEIGHT_1 0xDA0A0C + +#define mmNIC3_QM0_ARB_WRR_WEIGHT_2 0xDA0A10 + +#define mmNIC3_QM0_ARB_WRR_WEIGHT_3 0xDA0A14 + +#define mmNIC3_QM0_ARB_CFG_1 0xDA0A18 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_0 0xDA0A20 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_1 0xDA0A24 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_2 0xDA0A28 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_3 0xDA0A2C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_4 0xDA0A30 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_5 0xDA0A34 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_6 0xDA0A38 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_7 0xDA0A3C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_8 0xDA0A40 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_9 0xDA0A44 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_10 0xDA0A48 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_11 0xDA0A4C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_12 0xDA0A50 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_13 0xDA0A54 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_14 0xDA0A58 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_15 0xDA0A5C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_16 0xDA0A60 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_17 0xDA0A64 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_18 0xDA0A68 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_19 0xDA0A6C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_20 0xDA0A70 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_21 0xDA0A74 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_22 0xDA0A78 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_23 0xDA0A7C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 0xDA0A80 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_25 0xDA0A84 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_26 0xDA0A88 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_27 0xDA0A8C + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_28 0xDA0A90 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_29 0xDA0A94 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_30 0xDA0A98 + +#define mmNIC3_QM0_ARB_MST_AVAIL_CRED_31 0xDA0A9C + +#define mmNIC3_QM0_ARB_MST_CRED_INC 0xDA0AA0 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_0 0xDA0AA4 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_1 0xDA0AA8 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_2 0xDA0AAC + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_3 0xDA0AB0 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_4 0xDA0AB4 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_5 0xDA0AB8 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_6 0xDA0ABC + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_7 0xDA0AC0 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_8 0xDA0AC4 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_9 0xDA0AC8 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_10 0xDA0ACC + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_11 0xDA0AD0 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_12 0xDA0AD4 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_13 0xDA0AD8 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_14 0xDA0ADC + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_15 0xDA0AE0 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_16 0xDA0AE4 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_17 0xDA0AE8 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_18 0xDA0AEC + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_19 0xDA0AF0 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_20 0xDA0AF4 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_21 0xDA0AF8 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_22 0xDA0AFC + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_23 0xDA0B00 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_24 0xDA0B04 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_25 0xDA0B08 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_26 0xDA0B0C + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_27 0xDA0B10 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_28 0xDA0B14 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_29 0xDA0B18 + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_30 0xDA0B1C + +#define mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_31 0xDA0B20 + +#define mmNIC3_QM0_ARB_SLV_MASTER_INC_CRED_OFST 0xDA0B28 + +#define mmNIC3_QM0_ARB_MST_SLAVE_EN 0xDA0B2C + +#define mmNIC3_QM0_ARB_MST_QUIET_PER 0xDA0B34 + +#define mmNIC3_QM0_ARB_SLV_CHOISE_WDT 0xDA0B38 + +#define mmNIC3_QM0_ARB_SLV_ID 0xDA0B3C + +#define mmNIC3_QM0_ARB_MSG_MAX_INFLIGHT 0xDA0B44 + +#define mmNIC3_QM0_ARB_MSG_AWUSER_31_11 0xDA0B48 + +#define mmNIC3_QM0_ARB_MSG_AWUSER_SEC_PROP 0xDA0B4C + +#define mmNIC3_QM0_ARB_MSG_AWUSER_NON_SEC_PROP 0xDA0B50 + +#define mmNIC3_QM0_ARB_BASE_LO 0xDA0B54 + +#define mmNIC3_QM0_ARB_BASE_HI 0xDA0B58 + +#define mmNIC3_QM0_ARB_STATE_STS 0xDA0B80 + +#define mmNIC3_QM0_ARB_CHOISE_FULLNESS_STS 0xDA0B84 + +#define mmNIC3_QM0_ARB_MSG_STS 0xDA0B88 + +#define mmNIC3_QM0_ARB_SLV_CHOISE_Q_HEAD 0xDA0B8C + +#define mmNIC3_QM0_ARB_ERR_CAUSE 0xDA0B9C + +#define mmNIC3_QM0_ARB_ERR_MSG_EN 0xDA0BA0 + +#define mmNIC3_QM0_ARB_ERR_STS_DRP 0xDA0BA8 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_0 0xDA0BB0 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_1 0xDA0BB4 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_2 0xDA0BB8 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_3 0xDA0BBC + +#define mmNIC3_QM0_ARB_MST_CRED_STS_4 0xDA0BC0 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_5 0xDA0BC4 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_6 0xDA0BC8 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_7 0xDA0BCC + +#define mmNIC3_QM0_ARB_MST_CRED_STS_8 0xDA0BD0 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_9 0xDA0BD4 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_10 0xDA0BD8 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_11 0xDA0BDC + +#define mmNIC3_QM0_ARB_MST_CRED_STS_12 0xDA0BE0 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_13 0xDA0BE4 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_14 0xDA0BE8 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_15 0xDA0BEC + +#define mmNIC3_QM0_ARB_MST_CRED_STS_16 0xDA0BF0 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_17 0xDA0BF4 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_18 0xDA0BF8 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_19 0xDA0BFC + +#define mmNIC3_QM0_ARB_MST_CRED_STS_20 0xDA0C00 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_21 0xDA0C04 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_22 0xDA0C08 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_23 0xDA0C0C + +#define mmNIC3_QM0_ARB_MST_CRED_STS_24 0xDA0C10 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_25 0xDA0C14 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_26 0xDA0C18 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_27 0xDA0C1C + +#define mmNIC3_QM0_ARB_MST_CRED_STS_28 0xDA0C20 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_29 0xDA0C24 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_30 0xDA0C28 + +#define mmNIC3_QM0_ARB_MST_CRED_STS_31 0xDA0C2C + +#define mmNIC3_QM0_CGM_CFG 0xDA0C70 + +#define mmNIC3_QM0_CGM_STS 0xDA0C74 + +#define mmNIC3_QM0_CGM_CFG1 0xDA0C78 + +#define mmNIC3_QM0_LOCAL_RANGE_BASE 0xDA0C80 + +#define mmNIC3_QM0_LOCAL_RANGE_SIZE 0xDA0C84 + +#define mmNIC3_QM0_CSMR_STRICT_PRIO_CFG 0xDA0C90 + +#define mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_1 0xDA0C94 + +#define mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_0 0xDA0C98 + +#define mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_1 0xDA0C9C + +#define mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_0 0xDA0CA0 + +#define mmNIC3_QM0_GLBL_AXCACHE 0xDA0CA4 + +#define mmNIC3_QM0_IND_GW_APB_CFG 0xDA0CB0 + +#define mmNIC3_QM0_IND_GW_APB_WDATA 0xDA0CB4 + +#define mmNIC3_QM0_IND_GW_APB_RDATA 0xDA0CB8 + +#define mmNIC3_QM0_IND_GW_APB_STATUS 0xDA0CBC + +#define mmNIC3_QM0_GLBL_ERR_ADDR_LO 0xDA0CD0 + +#define mmNIC3_QM0_GLBL_ERR_ADDR_HI 0xDA0CD4 + +#define mmNIC3_QM0_GLBL_ERR_WDATA 0xDA0CD8 + +#define mmNIC3_QM0_GLBL_MEM_INIT_BUSY 0xDA0D00 + +#endif /* ASIC_REG_NIC3_QM0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h new file mode 100644 index 000000000000..7fa040f65004 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC3_QM1_REGS_H_ +#define ASIC_REG_NIC3_QM1_REGS_H_ + +/* + ***************************************** + * NIC3_QM1 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC3_QM1_GLBL_CFG0 0xDA2000 + +#define mmNIC3_QM1_GLBL_CFG1 0xDA2004 + +#define mmNIC3_QM1_GLBL_PROT 0xDA2008 + +#define mmNIC3_QM1_GLBL_ERR_CFG 0xDA200C + +#define mmNIC3_QM1_GLBL_SECURE_PROPS_0 0xDA2010 + +#define mmNIC3_QM1_GLBL_SECURE_PROPS_1 0xDA2014 + +#define mmNIC3_QM1_GLBL_SECURE_PROPS_2 0xDA2018 + +#define mmNIC3_QM1_GLBL_SECURE_PROPS_3 0xDA201C + +#define mmNIC3_QM1_GLBL_SECURE_PROPS_4 0xDA2020 + +#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0 0xDA2024 + +#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1 0xDA2028 + +#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_2 0xDA202C + +#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_3 0xDA2030 + +#define mmNIC3_QM1_GLBL_NON_SECURE_PROPS_4 0xDA2034 + +#define mmNIC3_QM1_GLBL_STS0 0xDA2038 + +#define mmNIC3_QM1_GLBL_STS1_0 0xDA2040 + +#define mmNIC3_QM1_GLBL_STS1_1 0xDA2044 + +#define mmNIC3_QM1_GLBL_STS1_2 0xDA2048 + +#define mmNIC3_QM1_GLBL_STS1_3 0xDA204C + +#define mmNIC3_QM1_GLBL_STS1_4 0xDA2050 + +#define mmNIC3_QM1_GLBL_MSG_EN_0 0xDA2054 + +#define mmNIC3_QM1_GLBL_MSG_EN_1 0xDA2058 + +#define mmNIC3_QM1_GLBL_MSG_EN_2 0xDA205C + +#define mmNIC3_QM1_GLBL_MSG_EN_3 0xDA2060 + +#define mmNIC3_QM1_GLBL_MSG_EN_4 0xDA2068 + +#define mmNIC3_QM1_PQ_BASE_LO_0 0xDA2070 + +#define mmNIC3_QM1_PQ_BASE_LO_1 0xDA2074 + +#define mmNIC3_QM1_PQ_BASE_LO_2 0xDA2078 + +#define mmNIC3_QM1_PQ_BASE_LO_3 0xDA207C + +#define mmNIC3_QM1_PQ_BASE_HI_0 0xDA2080 + +#define mmNIC3_QM1_PQ_BASE_HI_1 0xDA2084 + +#define mmNIC3_QM1_PQ_BASE_HI_2 0xDA2088 + +#define mmNIC3_QM1_PQ_BASE_HI_3 0xDA208C + +#define mmNIC3_QM1_PQ_SIZE_0 0xDA2090 + +#define mmNIC3_QM1_PQ_SIZE_1 0xDA2094 + +#define mmNIC3_QM1_PQ_SIZE_2 0xDA2098 + +#define mmNIC3_QM1_PQ_SIZE_3 0xDA209C + +#define mmNIC3_QM1_PQ_PI_0 0xDA20A0 + +#define mmNIC3_QM1_PQ_PI_1 0xDA20A4 + +#define mmNIC3_QM1_PQ_PI_2 0xDA20A8 + +#define mmNIC3_QM1_PQ_PI_3 0xDA20AC + +#define mmNIC3_QM1_PQ_CI_0 0xDA20B0 + +#define mmNIC3_QM1_PQ_CI_1 0xDA20B4 + +#define mmNIC3_QM1_PQ_CI_2 0xDA20B8 + +#define mmNIC3_QM1_PQ_CI_3 0xDA20BC + +#define mmNIC3_QM1_PQ_CFG0_0 0xDA20C0 + +#define mmNIC3_QM1_PQ_CFG0_1 0xDA20C4 + +#define mmNIC3_QM1_PQ_CFG0_2 0xDA20C8 + +#define mmNIC3_QM1_PQ_CFG0_3 0xDA20CC + +#define mmNIC3_QM1_PQ_CFG1_0 0xDA20D0 + +#define mmNIC3_QM1_PQ_CFG1_1 0xDA20D4 + +#define mmNIC3_QM1_PQ_CFG1_2 0xDA20D8 + +#define mmNIC3_QM1_PQ_CFG1_3 0xDA20DC + +#define mmNIC3_QM1_PQ_ARUSER_31_11_0 0xDA20E0 + +#define mmNIC3_QM1_PQ_ARUSER_31_11_1 0xDA20E4 + +#define mmNIC3_QM1_PQ_ARUSER_31_11_2 0xDA20E8 + +#define mmNIC3_QM1_PQ_ARUSER_31_11_3 0xDA20EC + +#define mmNIC3_QM1_PQ_STS0_0 0xDA20F0 + +#define mmNIC3_QM1_PQ_STS0_1 0xDA20F4 + +#define mmNIC3_QM1_PQ_STS0_2 0xDA20F8 + +#define mmNIC3_QM1_PQ_STS0_3 0xDA20FC + +#define mmNIC3_QM1_PQ_STS1_0 0xDA2100 + +#define mmNIC3_QM1_PQ_STS1_1 0xDA2104 + +#define mmNIC3_QM1_PQ_STS1_2 0xDA2108 + +#define mmNIC3_QM1_PQ_STS1_3 0xDA210C + +#define mmNIC3_QM1_CQ_CFG0_0 0xDA2110 + +#define mmNIC3_QM1_CQ_CFG0_1 0xDA2114 + +#define mmNIC3_QM1_CQ_CFG0_2 0xDA2118 + +#define mmNIC3_QM1_CQ_CFG0_3 0xDA211C + +#define mmNIC3_QM1_CQ_CFG0_4 0xDA2120 + +#define mmNIC3_QM1_CQ_CFG1_0 0xDA2124 + +#define mmNIC3_QM1_CQ_CFG1_1 0xDA2128 + +#define mmNIC3_QM1_CQ_CFG1_2 0xDA212C + +#define mmNIC3_QM1_CQ_CFG1_3 0xDA2130 + +#define mmNIC3_QM1_CQ_CFG1_4 0xDA2134 + +#define mmNIC3_QM1_CQ_ARUSER_31_11_0 0xDA2138 + +#define mmNIC3_QM1_CQ_ARUSER_31_11_1 0xDA213C + +#define mmNIC3_QM1_CQ_ARUSER_31_11_2 0xDA2140 + +#define mmNIC3_QM1_CQ_ARUSER_31_11_3 0xDA2144 + +#define mmNIC3_QM1_CQ_ARUSER_31_11_4 0xDA2148 + +#define mmNIC3_QM1_CQ_STS0_0 0xDA214C + +#define mmNIC3_QM1_CQ_STS0_1 0xDA2150 + +#define mmNIC3_QM1_CQ_STS0_2 0xDA2154 + +#define mmNIC3_QM1_CQ_STS0_3 0xDA2158 + +#define mmNIC3_QM1_CQ_STS0_4 0xDA215C + +#define mmNIC3_QM1_CQ_STS1_0 0xDA2160 + +#define mmNIC3_QM1_CQ_STS1_1 0xDA2164 + +#define mmNIC3_QM1_CQ_STS1_2 0xDA2168 + +#define mmNIC3_QM1_CQ_STS1_3 0xDA216C + +#define mmNIC3_QM1_CQ_STS1_4 0xDA2170 + +#define mmNIC3_QM1_CQ_PTR_LO_0 0xDA2174 + +#define mmNIC3_QM1_CQ_PTR_HI_0 0xDA2178 + +#define mmNIC3_QM1_CQ_TSIZE_0 0xDA217C + +#define mmNIC3_QM1_CQ_CTL_0 0xDA2180 + +#define mmNIC3_QM1_CQ_PTR_LO_1 0xDA2184 + +#define mmNIC3_QM1_CQ_PTR_HI_1 0xDA2188 + +#define mmNIC3_QM1_CQ_TSIZE_1 0xDA218C + +#define mmNIC3_QM1_CQ_CTL_1 0xDA2190 + +#define mmNIC3_QM1_CQ_PTR_LO_2 0xDA2194 + +#define mmNIC3_QM1_CQ_PTR_HI_2 0xDA2198 + +#define mmNIC3_QM1_CQ_TSIZE_2 0xDA219C + +#define mmNIC3_QM1_CQ_CTL_2 0xDA21A0 + +#define mmNIC3_QM1_CQ_PTR_LO_3 0xDA21A4 + +#define mmNIC3_QM1_CQ_PTR_HI_3 0xDA21A8 + +#define mmNIC3_QM1_CQ_TSIZE_3 0xDA21AC + +#define mmNIC3_QM1_CQ_CTL_3 0xDA21B0 + +#define mmNIC3_QM1_CQ_PTR_LO_4 0xDA21B4 + +#define mmNIC3_QM1_CQ_PTR_HI_4 0xDA21B8 + +#define mmNIC3_QM1_CQ_TSIZE_4 0xDA21BC + +#define mmNIC3_QM1_CQ_CTL_4 0xDA21C0 + +#define mmNIC3_QM1_CQ_PTR_LO_STS_0 0xDA21C4 + +#define mmNIC3_QM1_CQ_PTR_LO_STS_1 0xDA21C8 + +#define mmNIC3_QM1_CQ_PTR_LO_STS_2 0xDA21CC + +#define mmNIC3_QM1_CQ_PTR_LO_STS_3 0xDA21D0 + +#define mmNIC3_QM1_CQ_PTR_LO_STS_4 0xDA21D4 + +#define mmNIC3_QM1_CQ_PTR_HI_STS_0 0xDA21D8 + +#define mmNIC3_QM1_CQ_PTR_HI_STS_1 0xDA21DC + +#define mmNIC3_QM1_CQ_PTR_HI_STS_2 0xDA21E0 + +#define mmNIC3_QM1_CQ_PTR_HI_STS_3 0xDA21E4 + +#define mmNIC3_QM1_CQ_PTR_HI_STS_4 0xDA21E8 + +#define mmNIC3_QM1_CQ_TSIZE_STS_0 0xDA21EC + +#define mmNIC3_QM1_CQ_TSIZE_STS_1 0xDA21F0 + +#define mmNIC3_QM1_CQ_TSIZE_STS_2 0xDA21F4 + +#define mmNIC3_QM1_CQ_TSIZE_STS_3 0xDA21F8 + +#define mmNIC3_QM1_CQ_TSIZE_STS_4 0xDA21FC + +#define mmNIC3_QM1_CQ_CTL_STS_0 0xDA2200 + +#define mmNIC3_QM1_CQ_CTL_STS_1 0xDA2204 + +#define mmNIC3_QM1_CQ_CTL_STS_2 0xDA2208 + +#define mmNIC3_QM1_CQ_CTL_STS_3 0xDA220C + +#define mmNIC3_QM1_CQ_CTL_STS_4 0xDA2210 + +#define mmNIC3_QM1_CQ_IFIFO_CNT_0 0xDA2214 + +#define mmNIC3_QM1_CQ_IFIFO_CNT_1 0xDA2218 + +#define mmNIC3_QM1_CQ_IFIFO_CNT_2 0xDA221C + +#define mmNIC3_QM1_CQ_IFIFO_CNT_3 0xDA2220 + +#define mmNIC3_QM1_CQ_IFIFO_CNT_4 0xDA2224 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_0 0xDA2228 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_1 0xDA222C + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_2 0xDA2230 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_3 0xDA2234 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_4 0xDA2238 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_0 0xDA223C + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_1 0xDA2240 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_2 0xDA2244 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_3 0xDA2248 + +#define mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_4 0xDA224C + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_0 0xDA2250 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_1 0xDA2254 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_2 0xDA2258 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_3 0xDA225C + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_4 0xDA2260 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_0 0xDA2264 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_1 0xDA2268 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_2 0xDA226C + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_3 0xDA2270 + +#define mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_4 0xDA2274 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_0 0xDA2278 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_1 0xDA227C + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 0xDA2280 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_3 0xDA2284 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_4 0xDA2288 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_0 0xDA228C + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_1 0xDA2290 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_2 0xDA2294 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_3 0xDA2298 + +#define mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_4 0xDA229C + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_0 0xDA22A0 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_1 0xDA22A4 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_2 0xDA22A8 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_3 0xDA22AC + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_4 0xDA22B0 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_0 0xDA22B4 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_1 0xDA22B8 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_2 0xDA22BC + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_3 0xDA22C0 + +#define mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_4 0xDA22C4 + +#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_0 0xDA22C8 + +#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_1 0xDA22CC + +#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_2 0xDA22D0 + +#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_3 0xDA22D4 + +#define mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_4 0xDA22D8 + +#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xDA22E0 + +#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xDA22E4 + +#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xDA22E8 + +#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xDA22EC + +#define mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xDA22F0 + +#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 0xDA22F4 + +#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 0xDA22F8 + +#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 0xDA22FC + +#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 0xDA2300 + +#define mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 0xDA2304 + +#define mmNIC3_QM1_CP_FENCE0_RDATA_0 0xDA2308 + +#define mmNIC3_QM1_CP_FENCE0_RDATA_1 0xDA230C + +#define mmNIC3_QM1_CP_FENCE0_RDATA_2 0xDA2310 + +#define mmNIC3_QM1_CP_FENCE0_RDATA_3 0xDA2314 + +#define mmNIC3_QM1_CP_FENCE0_RDATA_4 0xDA2318 + +#define mmNIC3_QM1_CP_FENCE1_RDATA_0 0xDA231C + +#define mmNIC3_QM1_CP_FENCE1_RDATA_1 0xDA2320 + +#define mmNIC3_QM1_CP_FENCE1_RDATA_2 0xDA2324 + +#define mmNIC3_QM1_CP_FENCE1_RDATA_3 0xDA2328 + +#define mmNIC3_QM1_CP_FENCE1_RDATA_4 0xDA232C + +#define mmNIC3_QM1_CP_FENCE2_RDATA_0 0xDA2330 + +#define mmNIC3_QM1_CP_FENCE2_RDATA_1 0xDA2334 + +#define mmNIC3_QM1_CP_FENCE2_RDATA_2 0xDA2338 + +#define mmNIC3_QM1_CP_FENCE2_RDATA_3 0xDA233C + +#define mmNIC3_QM1_CP_FENCE2_RDATA_4 0xDA2340 + +#define mmNIC3_QM1_CP_FENCE3_RDATA_0 0xDA2344 + +#define mmNIC3_QM1_CP_FENCE3_RDATA_1 0xDA2348 + +#define mmNIC3_QM1_CP_FENCE3_RDATA_2 0xDA234C + +#define mmNIC3_QM1_CP_FENCE3_RDATA_3 0xDA2350 + +#define mmNIC3_QM1_CP_FENCE3_RDATA_4 0xDA2354 + +#define mmNIC3_QM1_CP_FENCE0_CNT_0 0xDA2358 + +#define mmNIC3_QM1_CP_FENCE0_CNT_1 0xDA235C + +#define mmNIC3_QM1_CP_FENCE0_CNT_2 0xDA2360 + +#define mmNIC3_QM1_CP_FENCE0_CNT_3 0xDA2364 + +#define mmNIC3_QM1_CP_FENCE0_CNT_4 0xDA2368 + +#define mmNIC3_QM1_CP_FENCE1_CNT_0 0xDA236C + +#define mmNIC3_QM1_CP_FENCE1_CNT_1 0xDA2370 + +#define mmNIC3_QM1_CP_FENCE1_CNT_2 0xDA2374 + +#define mmNIC3_QM1_CP_FENCE1_CNT_3 0xDA2378 + +#define mmNIC3_QM1_CP_FENCE1_CNT_4 0xDA237C + +#define mmNIC3_QM1_CP_FENCE2_CNT_0 0xDA2380 + +#define mmNIC3_QM1_CP_FENCE2_CNT_1 0xDA2384 + +#define mmNIC3_QM1_CP_FENCE2_CNT_2 0xDA2388 + +#define mmNIC3_QM1_CP_FENCE2_CNT_3 0xDA238C + +#define mmNIC3_QM1_CP_FENCE2_CNT_4 0xDA2390 + +#define mmNIC3_QM1_CP_FENCE3_CNT_0 0xDA2394 + +#define mmNIC3_QM1_CP_FENCE3_CNT_1 0xDA2398 + +#define mmNIC3_QM1_CP_FENCE3_CNT_2 0xDA239C + +#define mmNIC3_QM1_CP_FENCE3_CNT_3 0xDA23A0 + +#define mmNIC3_QM1_CP_FENCE3_CNT_4 0xDA23A4 + +#define mmNIC3_QM1_CP_STS_0 0xDA23A8 + +#define mmNIC3_QM1_CP_STS_1 0xDA23AC + +#define mmNIC3_QM1_CP_STS_2 0xDA23B0 + +#define mmNIC3_QM1_CP_STS_3 0xDA23B4 + +#define mmNIC3_QM1_CP_STS_4 0xDA23B8 + +#define mmNIC3_QM1_CP_CURRENT_INST_LO_0 0xDA23BC + +#define mmNIC3_QM1_CP_CURRENT_INST_LO_1 0xDA23C0 + +#define mmNIC3_QM1_CP_CURRENT_INST_LO_2 0xDA23C4 + +#define mmNIC3_QM1_CP_CURRENT_INST_LO_3 0xDA23C8 + +#define mmNIC3_QM1_CP_CURRENT_INST_LO_4 0xDA23CC + +#define mmNIC3_QM1_CP_CURRENT_INST_HI_0 0xDA23D0 + +#define mmNIC3_QM1_CP_CURRENT_INST_HI_1 0xDA23D4 + +#define mmNIC3_QM1_CP_CURRENT_INST_HI_2 0xDA23D8 + +#define mmNIC3_QM1_CP_CURRENT_INST_HI_3 0xDA23DC + +#define mmNIC3_QM1_CP_CURRENT_INST_HI_4 0xDA23E0 + +#define mmNIC3_QM1_CP_BARRIER_CFG_0 0xDA23F4 + +#define mmNIC3_QM1_CP_BARRIER_CFG_1 0xDA23F8 + +#define mmNIC3_QM1_CP_BARRIER_CFG_2 0xDA23FC + +#define mmNIC3_QM1_CP_BARRIER_CFG_3 0xDA2400 + +#define mmNIC3_QM1_CP_BARRIER_CFG_4 0xDA2404 + +#define mmNIC3_QM1_CP_DBG_0_0 0xDA2408 + +#define mmNIC3_QM1_CP_DBG_0_1 0xDA240C + +#define mmNIC3_QM1_CP_DBG_0_2 0xDA2410 + +#define mmNIC3_QM1_CP_DBG_0_3 0xDA2414 + +#define mmNIC3_QM1_CP_DBG_0_4 0xDA2418 + +#define mmNIC3_QM1_CP_ARUSER_31_11_0 0xDA241C + +#define mmNIC3_QM1_CP_ARUSER_31_11_1 0xDA2420 + +#define mmNIC3_QM1_CP_ARUSER_31_11_2 0xDA2424 + +#define mmNIC3_QM1_CP_ARUSER_31_11_3 0xDA2428 + +#define mmNIC3_QM1_CP_ARUSER_31_11_4 0xDA242C + +#define mmNIC3_QM1_CP_AWUSER_31_11_0 0xDA2430 + +#define mmNIC3_QM1_CP_AWUSER_31_11_1 0xDA2434 + +#define mmNIC3_QM1_CP_AWUSER_31_11_2 0xDA2438 + +#define mmNIC3_QM1_CP_AWUSER_31_11_3 0xDA243C + +#define mmNIC3_QM1_CP_AWUSER_31_11_4 0xDA2440 + +#define mmNIC3_QM1_ARB_CFG_0 0xDA2A00 + +#define mmNIC3_QM1_ARB_CHOISE_Q_PUSH 0xDA2A04 + +#define mmNIC3_QM1_ARB_WRR_WEIGHT_0 0xDA2A08 + +#define mmNIC3_QM1_ARB_WRR_WEIGHT_1 0xDA2A0C + +#define mmNIC3_QM1_ARB_WRR_WEIGHT_2 0xDA2A10 + +#define mmNIC3_QM1_ARB_WRR_WEIGHT_3 0xDA2A14 + +#define mmNIC3_QM1_ARB_CFG_1 0xDA2A18 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_0 0xDA2A20 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_1 0xDA2A24 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_2 0xDA2A28 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_3 0xDA2A2C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_4 0xDA2A30 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_5 0xDA2A34 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_6 0xDA2A38 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_7 0xDA2A3C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_8 0xDA2A40 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_9 0xDA2A44 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_10 0xDA2A48 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_11 0xDA2A4C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_12 0xDA2A50 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_13 0xDA2A54 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_14 0xDA2A58 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_15 0xDA2A5C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_16 0xDA2A60 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_17 0xDA2A64 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_18 0xDA2A68 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_19 0xDA2A6C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_20 0xDA2A70 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_21 0xDA2A74 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_22 0xDA2A78 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_23 0xDA2A7C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 0xDA2A80 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_25 0xDA2A84 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_26 0xDA2A88 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_27 0xDA2A8C + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_28 0xDA2A90 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_29 0xDA2A94 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_30 0xDA2A98 + +#define mmNIC3_QM1_ARB_MST_AVAIL_CRED_31 0xDA2A9C + +#define mmNIC3_QM1_ARB_MST_CRED_INC 0xDA2AA0 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_0 0xDA2AA4 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_1 0xDA2AA8 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_2 0xDA2AAC + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_3 0xDA2AB0 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_4 0xDA2AB4 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_5 0xDA2AB8 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_6 0xDA2ABC + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_7 0xDA2AC0 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_8 0xDA2AC4 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_9 0xDA2AC8 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_10 0xDA2ACC + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_11 0xDA2AD0 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_12 0xDA2AD4 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_13 0xDA2AD8 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_14 0xDA2ADC + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_15 0xDA2AE0 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_16 0xDA2AE4 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_17 0xDA2AE8 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_18 0xDA2AEC + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_19 0xDA2AF0 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_20 0xDA2AF4 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_21 0xDA2AF8 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_22 0xDA2AFC + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_23 0xDA2B00 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_24 0xDA2B04 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_25 0xDA2B08 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_26 0xDA2B0C + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_27 0xDA2B10 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_28 0xDA2B14 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_29 0xDA2B18 + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_30 0xDA2B1C + +#define mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_31 0xDA2B20 + +#define mmNIC3_QM1_ARB_SLV_MASTER_INC_CRED_OFST 0xDA2B28 + +#define mmNIC3_QM1_ARB_MST_SLAVE_EN 0xDA2B2C + +#define mmNIC3_QM1_ARB_MST_QUIET_PER 0xDA2B34 + +#define mmNIC3_QM1_ARB_SLV_CHOISE_WDT 0xDA2B38 + +#define mmNIC3_QM1_ARB_SLV_ID 0xDA2B3C + +#define mmNIC3_QM1_ARB_MSG_MAX_INFLIGHT 0xDA2B44 + +#define mmNIC3_QM1_ARB_MSG_AWUSER_31_11 0xDA2B48 + +#define mmNIC3_QM1_ARB_MSG_AWUSER_SEC_PROP 0xDA2B4C + +#define mmNIC3_QM1_ARB_MSG_AWUSER_NON_SEC_PROP 0xDA2B50 + +#define mmNIC3_QM1_ARB_BASE_LO 0xDA2B54 + +#define mmNIC3_QM1_ARB_BASE_HI 0xDA2B58 + +#define mmNIC3_QM1_ARB_STATE_STS 0xDA2B80 + +#define mmNIC3_QM1_ARB_CHOISE_FULLNESS_STS 0xDA2B84 + +#define mmNIC3_QM1_ARB_MSG_STS 0xDA2B88 + +#define mmNIC3_QM1_ARB_SLV_CHOISE_Q_HEAD 0xDA2B8C + +#define mmNIC3_QM1_ARB_ERR_CAUSE 0xDA2B9C + +#define mmNIC3_QM1_ARB_ERR_MSG_EN 0xDA2BA0 + +#define mmNIC3_QM1_ARB_ERR_STS_DRP 0xDA2BA8 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_0 0xDA2BB0 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_1 0xDA2BB4 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_2 0xDA2BB8 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_3 0xDA2BBC + +#define mmNIC3_QM1_ARB_MST_CRED_STS_4 0xDA2BC0 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_5 0xDA2BC4 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_6 0xDA2BC8 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_7 0xDA2BCC + +#define mmNIC3_QM1_ARB_MST_CRED_STS_8 0xDA2BD0 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_9 0xDA2BD4 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_10 0xDA2BD8 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_11 0xDA2BDC + +#define mmNIC3_QM1_ARB_MST_CRED_STS_12 0xDA2BE0 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_13 0xDA2BE4 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_14 0xDA2BE8 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_15 0xDA2BEC + +#define mmNIC3_QM1_ARB_MST_CRED_STS_16 0xDA2BF0 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_17 0xDA2BF4 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_18 0xDA2BF8 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_19 0xDA2BFC + +#define mmNIC3_QM1_ARB_MST_CRED_STS_20 0xDA2C00 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_21 0xDA2C04 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_22 0xDA2C08 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_23 0xDA2C0C + +#define mmNIC3_QM1_ARB_MST_CRED_STS_24 0xDA2C10 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_25 0xDA2C14 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_26 0xDA2C18 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_27 0xDA2C1C + +#define mmNIC3_QM1_ARB_MST_CRED_STS_28 0xDA2C20 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_29 0xDA2C24 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_30 0xDA2C28 + +#define mmNIC3_QM1_ARB_MST_CRED_STS_31 0xDA2C2C + +#define mmNIC3_QM1_CGM_CFG 0xDA2C70 + +#define mmNIC3_QM1_CGM_STS 0xDA2C74 + +#define mmNIC3_QM1_CGM_CFG1 0xDA2C78 + +#define mmNIC3_QM1_LOCAL_RANGE_BASE 0xDA2C80 + +#define mmNIC3_QM1_LOCAL_RANGE_SIZE 0xDA2C84 + +#define mmNIC3_QM1_CSMR_STRICT_PRIO_CFG 0xDA2C90 + +#define mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_1 0xDA2C94 + +#define mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_0 0xDA2C98 + +#define mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_1 0xDA2C9C + +#define mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_0 0xDA2CA0 + +#define mmNIC3_QM1_GLBL_AXCACHE 0xDA2CA4 + +#define mmNIC3_QM1_IND_GW_APB_CFG 0xDA2CB0 + +#define mmNIC3_QM1_IND_GW_APB_WDATA 0xDA2CB4 + +#define mmNIC3_QM1_IND_GW_APB_RDATA 0xDA2CB8 + +#define mmNIC3_QM1_IND_GW_APB_STATUS 0xDA2CBC + +#define mmNIC3_QM1_GLBL_ERR_ADDR_LO 0xDA2CD0 + +#define mmNIC3_QM1_GLBL_ERR_ADDR_HI 0xDA2CD4 + +#define mmNIC3_QM1_GLBL_ERR_WDATA 0xDA2CD8 + +#define mmNIC3_QM1_GLBL_MEM_INIT_BUSY 0xDA2D00 + +#endif /* ASIC_REG_NIC3_QM1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h new file mode 100644 index 000000000000..99d5319672dd --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC4_QM0_REGS_H_ +#define ASIC_REG_NIC4_QM0_REGS_H_ + +/* + ***************************************** + * NIC4_QM0 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC4_QM0_GLBL_CFG0 0xDE0000 + +#define mmNIC4_QM0_GLBL_CFG1 0xDE0004 + +#define mmNIC4_QM0_GLBL_PROT 0xDE0008 + +#define mmNIC4_QM0_GLBL_ERR_CFG 0xDE000C + +#define mmNIC4_QM0_GLBL_SECURE_PROPS_0 0xDE0010 + +#define mmNIC4_QM0_GLBL_SECURE_PROPS_1 0xDE0014 + +#define mmNIC4_QM0_GLBL_SECURE_PROPS_2 0xDE0018 + +#define mmNIC4_QM0_GLBL_SECURE_PROPS_3 0xDE001C + +#define mmNIC4_QM0_GLBL_SECURE_PROPS_4 0xDE0020 + +#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0 0xDE0024 + +#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1 0xDE0028 + +#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_2 0xDE002C + +#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_3 0xDE0030 + +#define mmNIC4_QM0_GLBL_NON_SECURE_PROPS_4 0xDE0034 + +#define mmNIC4_QM0_GLBL_STS0 0xDE0038 + +#define mmNIC4_QM0_GLBL_STS1_0 0xDE0040 + +#define mmNIC4_QM0_GLBL_STS1_1 0xDE0044 + +#define mmNIC4_QM0_GLBL_STS1_2 0xDE0048 + +#define mmNIC4_QM0_GLBL_STS1_3 0xDE004C + +#define mmNIC4_QM0_GLBL_STS1_4 0xDE0050 + +#define mmNIC4_QM0_GLBL_MSG_EN_0 0xDE0054 + +#define mmNIC4_QM0_GLBL_MSG_EN_1 0xDE0058 + +#define mmNIC4_QM0_GLBL_MSG_EN_2 0xDE005C + +#define mmNIC4_QM0_GLBL_MSG_EN_3 0xDE0060 + +#define mmNIC4_QM0_GLBL_MSG_EN_4 0xDE0068 + +#define mmNIC4_QM0_PQ_BASE_LO_0 0xDE0070 + +#define mmNIC4_QM0_PQ_BASE_LO_1 0xDE0074 + +#define mmNIC4_QM0_PQ_BASE_LO_2 0xDE0078 + +#define mmNIC4_QM0_PQ_BASE_LO_3 0xDE007C + +#define mmNIC4_QM0_PQ_BASE_HI_0 0xDE0080 + +#define mmNIC4_QM0_PQ_BASE_HI_1 0xDE0084 + +#define mmNIC4_QM0_PQ_BASE_HI_2 0xDE0088 + +#define mmNIC4_QM0_PQ_BASE_HI_3 0xDE008C + +#define mmNIC4_QM0_PQ_SIZE_0 0xDE0090 + +#define mmNIC4_QM0_PQ_SIZE_1 0xDE0094 + +#define mmNIC4_QM0_PQ_SIZE_2 0xDE0098 + +#define mmNIC4_QM0_PQ_SIZE_3 0xDE009C + +#define mmNIC4_QM0_PQ_PI_0 0xDE00A0 + +#define mmNIC4_QM0_PQ_PI_1 0xDE00A4 + +#define mmNIC4_QM0_PQ_PI_2 0xDE00A8 + +#define mmNIC4_QM0_PQ_PI_3 0xDE00AC + +#define mmNIC4_QM0_PQ_CI_0 0xDE00B0 + +#define mmNIC4_QM0_PQ_CI_1 0xDE00B4 + +#define mmNIC4_QM0_PQ_CI_2 0xDE00B8 + +#define mmNIC4_QM0_PQ_CI_3 0xDE00BC + +#define mmNIC4_QM0_PQ_CFG0_0 0xDE00C0 + +#define mmNIC4_QM0_PQ_CFG0_1 0xDE00C4 + +#define mmNIC4_QM0_PQ_CFG0_2 0xDE00C8 + +#define mmNIC4_QM0_PQ_CFG0_3 0xDE00CC + +#define mmNIC4_QM0_PQ_CFG1_0 0xDE00D0 + +#define mmNIC4_QM0_PQ_CFG1_1 0xDE00D4 + +#define mmNIC4_QM0_PQ_CFG1_2 0xDE00D8 + +#define mmNIC4_QM0_PQ_CFG1_3 0xDE00DC + +#define mmNIC4_QM0_PQ_ARUSER_31_11_0 0xDE00E0 + +#define mmNIC4_QM0_PQ_ARUSER_31_11_1 0xDE00E4 + +#define mmNIC4_QM0_PQ_ARUSER_31_11_2 0xDE00E8 + +#define mmNIC4_QM0_PQ_ARUSER_31_11_3 0xDE00EC + +#define mmNIC4_QM0_PQ_STS0_0 0xDE00F0 + +#define mmNIC4_QM0_PQ_STS0_1 0xDE00F4 + +#define mmNIC4_QM0_PQ_STS0_2 0xDE00F8 + +#define mmNIC4_QM0_PQ_STS0_3 0xDE00FC + +#define mmNIC4_QM0_PQ_STS1_0 0xDE0100 + +#define mmNIC4_QM0_PQ_STS1_1 0xDE0104 + +#define mmNIC4_QM0_PQ_STS1_2 0xDE0108 + +#define mmNIC4_QM0_PQ_STS1_3 0xDE010C + +#define mmNIC4_QM0_CQ_CFG0_0 0xDE0110 + +#define mmNIC4_QM0_CQ_CFG0_1 0xDE0114 + +#define mmNIC4_QM0_CQ_CFG0_2 0xDE0118 + +#define mmNIC4_QM0_CQ_CFG0_3 0xDE011C + +#define mmNIC4_QM0_CQ_CFG0_4 0xDE0120 + +#define mmNIC4_QM0_CQ_CFG1_0 0xDE0124 + +#define mmNIC4_QM0_CQ_CFG1_1 0xDE0128 + +#define mmNIC4_QM0_CQ_CFG1_2 0xDE012C + +#define mmNIC4_QM0_CQ_CFG1_3 0xDE0130 + +#define mmNIC4_QM0_CQ_CFG1_4 0xDE0134 + +#define mmNIC4_QM0_CQ_ARUSER_31_11_0 0xDE0138 + +#define mmNIC4_QM0_CQ_ARUSER_31_11_1 0xDE013C + +#define mmNIC4_QM0_CQ_ARUSER_31_11_2 0xDE0140 + +#define mmNIC4_QM0_CQ_ARUSER_31_11_3 0xDE0144 + +#define mmNIC4_QM0_CQ_ARUSER_31_11_4 0xDE0148 + +#define mmNIC4_QM0_CQ_STS0_0 0xDE014C + +#define mmNIC4_QM0_CQ_STS0_1 0xDE0150 + +#define mmNIC4_QM0_CQ_STS0_2 0xDE0154 + +#define mmNIC4_QM0_CQ_STS0_3 0xDE0158 + +#define mmNIC4_QM0_CQ_STS0_4 0xDE015C + +#define mmNIC4_QM0_CQ_STS1_0 0xDE0160 + +#define mmNIC4_QM0_CQ_STS1_1 0xDE0164 + +#define mmNIC4_QM0_CQ_STS1_2 0xDE0168 + +#define mmNIC4_QM0_CQ_STS1_3 0xDE016C + +#define mmNIC4_QM0_CQ_STS1_4 0xDE0170 + +#define mmNIC4_QM0_CQ_PTR_LO_0 0xDE0174 + +#define mmNIC4_QM0_CQ_PTR_HI_0 0xDE0178 + +#define mmNIC4_QM0_CQ_TSIZE_0 0xDE017C + +#define mmNIC4_QM0_CQ_CTL_0 0xDE0180 + +#define mmNIC4_QM0_CQ_PTR_LO_1 0xDE0184 + +#define mmNIC4_QM0_CQ_PTR_HI_1 0xDE0188 + +#define mmNIC4_QM0_CQ_TSIZE_1 0xDE018C + +#define mmNIC4_QM0_CQ_CTL_1 0xDE0190 + +#define mmNIC4_QM0_CQ_PTR_LO_2 0xDE0194 + +#define mmNIC4_QM0_CQ_PTR_HI_2 0xDE0198 + +#define mmNIC4_QM0_CQ_TSIZE_2 0xDE019C + +#define mmNIC4_QM0_CQ_CTL_2 0xDE01A0 + +#define mmNIC4_QM0_CQ_PTR_LO_3 0xDE01A4 + +#define mmNIC4_QM0_CQ_PTR_HI_3 0xDE01A8 + +#define mmNIC4_QM0_CQ_TSIZE_3 0xDE01AC + +#define mmNIC4_QM0_CQ_CTL_3 0xDE01B0 + +#define mmNIC4_QM0_CQ_PTR_LO_4 0xDE01B4 + +#define mmNIC4_QM0_CQ_PTR_HI_4 0xDE01B8 + +#define mmNIC4_QM0_CQ_TSIZE_4 0xDE01BC + +#define mmNIC4_QM0_CQ_CTL_4 0xDE01C0 + +#define mmNIC4_QM0_CQ_PTR_LO_STS_0 0xDE01C4 + +#define mmNIC4_QM0_CQ_PTR_LO_STS_1 0xDE01C8 + +#define mmNIC4_QM0_CQ_PTR_LO_STS_2 0xDE01CC + +#define mmNIC4_QM0_CQ_PTR_LO_STS_3 0xDE01D0 + +#define mmNIC4_QM0_CQ_PTR_LO_STS_4 0xDE01D4 + +#define mmNIC4_QM0_CQ_PTR_HI_STS_0 0xDE01D8 + +#define mmNIC4_QM0_CQ_PTR_HI_STS_1 0xDE01DC + +#define mmNIC4_QM0_CQ_PTR_HI_STS_2 0xDE01E0 + +#define mmNIC4_QM0_CQ_PTR_HI_STS_3 0xDE01E4 + +#define mmNIC4_QM0_CQ_PTR_HI_STS_4 0xDE01E8 + +#define mmNIC4_QM0_CQ_TSIZE_STS_0 0xDE01EC + +#define mmNIC4_QM0_CQ_TSIZE_STS_1 0xDE01F0 + +#define mmNIC4_QM0_CQ_TSIZE_STS_2 0xDE01F4 + +#define mmNIC4_QM0_CQ_TSIZE_STS_3 0xDE01F8 + +#define mmNIC4_QM0_CQ_TSIZE_STS_4 0xDE01FC + +#define mmNIC4_QM0_CQ_CTL_STS_0 0xDE0200 + +#define mmNIC4_QM0_CQ_CTL_STS_1 0xDE0204 + +#define mmNIC4_QM0_CQ_CTL_STS_2 0xDE0208 + +#define mmNIC4_QM0_CQ_CTL_STS_3 0xDE020C + +#define mmNIC4_QM0_CQ_CTL_STS_4 0xDE0210 + +#define mmNIC4_QM0_CQ_IFIFO_CNT_0 0xDE0214 + +#define mmNIC4_QM0_CQ_IFIFO_CNT_1 0xDE0218 + +#define mmNIC4_QM0_CQ_IFIFO_CNT_2 0xDE021C + +#define mmNIC4_QM0_CQ_IFIFO_CNT_3 0xDE0220 + +#define mmNIC4_QM0_CQ_IFIFO_CNT_4 0xDE0224 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_0 0xDE0228 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_1 0xDE022C + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_2 0xDE0230 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_3 0xDE0234 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_4 0xDE0238 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_0 0xDE023C + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_1 0xDE0240 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_2 0xDE0244 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_3 0xDE0248 + +#define mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_4 0xDE024C + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_0 0xDE0250 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_1 0xDE0254 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_2 0xDE0258 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_3 0xDE025C + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_4 0xDE0260 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_0 0xDE0264 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_1 0xDE0268 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_2 0xDE026C + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_3 0xDE0270 + +#define mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_4 0xDE0274 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_0 0xDE0278 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_1 0xDE027C + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 0xDE0280 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_3 0xDE0284 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_4 0xDE0288 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_0 0xDE028C + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_1 0xDE0290 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_2 0xDE0294 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_3 0xDE0298 + +#define mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_4 0xDE029C + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_0 0xDE02A0 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_1 0xDE02A4 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_2 0xDE02A8 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_3 0xDE02AC + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_4 0xDE02B0 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_0 0xDE02B4 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_1 0xDE02B8 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_2 0xDE02BC + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_3 0xDE02C0 + +#define mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_4 0xDE02C4 + +#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_0 0xDE02C8 + +#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_1 0xDE02CC + +#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_2 0xDE02D0 + +#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_3 0xDE02D4 + +#define mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_4 0xDE02D8 + +#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xDE02E0 + +#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xDE02E4 + +#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xDE02E8 + +#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xDE02EC + +#define mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xDE02F0 + +#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 0xDE02F4 + +#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 0xDE02F8 + +#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 0xDE02FC + +#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 0xDE0300 + +#define mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 0xDE0304 + +#define mmNIC4_QM0_CP_FENCE0_RDATA_0 0xDE0308 + +#define mmNIC4_QM0_CP_FENCE0_RDATA_1 0xDE030C + +#define mmNIC4_QM0_CP_FENCE0_RDATA_2 0xDE0310 + +#define mmNIC4_QM0_CP_FENCE0_RDATA_3 0xDE0314 + +#define mmNIC4_QM0_CP_FENCE0_RDATA_4 0xDE0318 + +#define mmNIC4_QM0_CP_FENCE1_RDATA_0 0xDE031C + +#define mmNIC4_QM0_CP_FENCE1_RDATA_1 0xDE0320 + +#define mmNIC4_QM0_CP_FENCE1_RDATA_2 0xDE0324 + +#define mmNIC4_QM0_CP_FENCE1_RDATA_3 0xDE0328 + +#define mmNIC4_QM0_CP_FENCE1_RDATA_4 0xDE032C + +#define mmNIC4_QM0_CP_FENCE2_RDATA_0 0xDE0330 + +#define mmNIC4_QM0_CP_FENCE2_RDATA_1 0xDE0334 + +#define mmNIC4_QM0_CP_FENCE2_RDATA_2 0xDE0338 + +#define mmNIC4_QM0_CP_FENCE2_RDATA_3 0xDE033C + +#define mmNIC4_QM0_CP_FENCE2_RDATA_4 0xDE0340 + +#define mmNIC4_QM0_CP_FENCE3_RDATA_0 0xDE0344 + +#define mmNIC4_QM0_CP_FENCE3_RDATA_1 0xDE0348 + +#define mmNIC4_QM0_CP_FENCE3_RDATA_2 0xDE034C + +#define mmNIC4_QM0_CP_FENCE3_RDATA_3 0xDE0350 + +#define mmNIC4_QM0_CP_FENCE3_RDATA_4 0xDE0354 + +#define mmNIC4_QM0_CP_FENCE0_CNT_0 0xDE0358 + +#define mmNIC4_QM0_CP_FENCE0_CNT_1 0xDE035C + +#define mmNIC4_QM0_CP_FENCE0_CNT_2 0xDE0360 + +#define mmNIC4_QM0_CP_FENCE0_CNT_3 0xDE0364 + +#define mmNIC4_QM0_CP_FENCE0_CNT_4 0xDE0368 + +#define mmNIC4_QM0_CP_FENCE1_CNT_0 0xDE036C + +#define mmNIC4_QM0_CP_FENCE1_CNT_1 0xDE0370 + +#define mmNIC4_QM0_CP_FENCE1_CNT_2 0xDE0374 + +#define mmNIC4_QM0_CP_FENCE1_CNT_3 0xDE0378 + +#define mmNIC4_QM0_CP_FENCE1_CNT_4 0xDE037C + +#define mmNIC4_QM0_CP_FENCE2_CNT_0 0xDE0380 + +#define mmNIC4_QM0_CP_FENCE2_CNT_1 0xDE0384 + +#define mmNIC4_QM0_CP_FENCE2_CNT_2 0xDE0388 + +#define mmNIC4_QM0_CP_FENCE2_CNT_3 0xDE038C + +#define mmNIC4_QM0_CP_FENCE2_CNT_4 0xDE0390 + +#define mmNIC4_QM0_CP_FENCE3_CNT_0 0xDE0394 + +#define mmNIC4_QM0_CP_FENCE3_CNT_1 0xDE0398 + +#define mmNIC4_QM0_CP_FENCE3_CNT_2 0xDE039C + +#define mmNIC4_QM0_CP_FENCE3_CNT_3 0xDE03A0 + +#define mmNIC4_QM0_CP_FENCE3_CNT_4 0xDE03A4 + +#define mmNIC4_QM0_CP_STS_0 0xDE03A8 + +#define mmNIC4_QM0_CP_STS_1 0xDE03AC + +#define mmNIC4_QM0_CP_STS_2 0xDE03B0 + +#define mmNIC4_QM0_CP_STS_3 0xDE03B4 + +#define mmNIC4_QM0_CP_STS_4 0xDE03B8 + +#define mmNIC4_QM0_CP_CURRENT_INST_LO_0 0xDE03BC + +#define mmNIC4_QM0_CP_CURRENT_INST_LO_1 0xDE03C0 + +#define mmNIC4_QM0_CP_CURRENT_INST_LO_2 0xDE03C4 + +#define mmNIC4_QM0_CP_CURRENT_INST_LO_3 0xDE03C8 + +#define mmNIC4_QM0_CP_CURRENT_INST_LO_4 0xDE03CC + +#define mmNIC4_QM0_CP_CURRENT_INST_HI_0 0xDE03D0 + +#define mmNIC4_QM0_CP_CURRENT_INST_HI_1 0xDE03D4 + +#define mmNIC4_QM0_CP_CURRENT_INST_HI_2 0xDE03D8 + +#define mmNIC4_QM0_CP_CURRENT_INST_HI_3 0xDE03DC + +#define mmNIC4_QM0_CP_CURRENT_INST_HI_4 0xDE03E0 + +#define mmNIC4_QM0_CP_BARRIER_CFG_0 0xDE03F4 + +#define mmNIC4_QM0_CP_BARRIER_CFG_1 0xDE03F8 + +#define mmNIC4_QM0_CP_BARRIER_CFG_2 0xDE03FC + +#define mmNIC4_QM0_CP_BARRIER_CFG_3 0xDE0400 + +#define mmNIC4_QM0_CP_BARRIER_CFG_4 0xDE0404 + +#define mmNIC4_QM0_CP_DBG_0_0 0xDE0408 + +#define mmNIC4_QM0_CP_DBG_0_1 0xDE040C + +#define mmNIC4_QM0_CP_DBG_0_2 0xDE0410 + +#define mmNIC4_QM0_CP_DBG_0_3 0xDE0414 + +#define mmNIC4_QM0_CP_DBG_0_4 0xDE0418 + +#define mmNIC4_QM0_CP_ARUSER_31_11_0 0xDE041C + +#define mmNIC4_QM0_CP_ARUSER_31_11_1 0xDE0420 + +#define mmNIC4_QM0_CP_ARUSER_31_11_2 0xDE0424 + +#define mmNIC4_QM0_CP_ARUSER_31_11_3 0xDE0428 + +#define mmNIC4_QM0_CP_ARUSER_31_11_4 0xDE042C + +#define mmNIC4_QM0_CP_AWUSER_31_11_0 0xDE0430 + +#define mmNIC4_QM0_CP_AWUSER_31_11_1 0xDE0434 + +#define mmNIC4_QM0_CP_AWUSER_31_11_2 0xDE0438 + +#define mmNIC4_QM0_CP_AWUSER_31_11_3 0xDE043C + +#define mmNIC4_QM0_CP_AWUSER_31_11_4 0xDE0440 + +#define mmNIC4_QM0_ARB_CFG_0 0xDE0A00 + +#define mmNIC4_QM0_ARB_CHOISE_Q_PUSH 0xDE0A04 + +#define mmNIC4_QM0_ARB_WRR_WEIGHT_0 0xDE0A08 + +#define mmNIC4_QM0_ARB_WRR_WEIGHT_1 0xDE0A0C + +#define mmNIC4_QM0_ARB_WRR_WEIGHT_2 0xDE0A10 + +#define mmNIC4_QM0_ARB_WRR_WEIGHT_3 0xDE0A14 + +#define mmNIC4_QM0_ARB_CFG_1 0xDE0A18 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_0 0xDE0A20 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_1 0xDE0A24 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_2 0xDE0A28 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_3 0xDE0A2C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_4 0xDE0A30 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_5 0xDE0A34 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_6 0xDE0A38 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_7 0xDE0A3C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_8 0xDE0A40 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_9 0xDE0A44 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_10 0xDE0A48 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_11 0xDE0A4C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_12 0xDE0A50 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_13 0xDE0A54 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_14 0xDE0A58 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_15 0xDE0A5C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_16 0xDE0A60 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_17 0xDE0A64 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_18 0xDE0A68 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_19 0xDE0A6C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_20 0xDE0A70 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_21 0xDE0A74 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_22 0xDE0A78 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_23 0xDE0A7C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 0xDE0A80 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_25 0xDE0A84 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_26 0xDE0A88 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_27 0xDE0A8C + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_28 0xDE0A90 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_29 0xDE0A94 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_30 0xDE0A98 + +#define mmNIC4_QM0_ARB_MST_AVAIL_CRED_31 0xDE0A9C + +#define mmNIC4_QM0_ARB_MST_CRED_INC 0xDE0AA0 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_0 0xDE0AA4 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_1 0xDE0AA8 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_2 0xDE0AAC + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_3 0xDE0AB0 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_4 0xDE0AB4 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_5 0xDE0AB8 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_6 0xDE0ABC + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_7 0xDE0AC0 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_8 0xDE0AC4 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_9 0xDE0AC8 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_10 0xDE0ACC + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_11 0xDE0AD0 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_12 0xDE0AD4 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_13 0xDE0AD8 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_14 0xDE0ADC + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_15 0xDE0AE0 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_16 0xDE0AE4 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_17 0xDE0AE8 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_18 0xDE0AEC + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_19 0xDE0AF0 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_20 0xDE0AF4 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_21 0xDE0AF8 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_22 0xDE0AFC + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_23 0xDE0B00 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_24 0xDE0B04 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_25 0xDE0B08 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_26 0xDE0B0C + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_27 0xDE0B10 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_28 0xDE0B14 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_29 0xDE0B18 + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_30 0xDE0B1C + +#define mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_31 0xDE0B20 + +#define mmNIC4_QM0_ARB_SLV_MASTER_INC_CRED_OFST 0xDE0B28 + +#define mmNIC4_QM0_ARB_MST_SLAVE_EN 0xDE0B2C + +#define mmNIC4_QM0_ARB_MST_QUIET_PER 0xDE0B34 + +#define mmNIC4_QM0_ARB_SLV_CHOISE_WDT 0xDE0B38 + +#define mmNIC4_QM0_ARB_SLV_ID 0xDE0B3C + +#define mmNIC4_QM0_ARB_MSG_MAX_INFLIGHT 0xDE0B44 + +#define mmNIC4_QM0_ARB_MSG_AWUSER_31_11 0xDE0B48 + +#define mmNIC4_QM0_ARB_MSG_AWUSER_SEC_PROP 0xDE0B4C + +#define mmNIC4_QM0_ARB_MSG_AWUSER_NON_SEC_PROP 0xDE0B50 + +#define mmNIC4_QM0_ARB_BASE_LO 0xDE0B54 + +#define mmNIC4_QM0_ARB_BASE_HI 0xDE0B58 + +#define mmNIC4_QM0_ARB_STATE_STS 0xDE0B80 + +#define mmNIC4_QM0_ARB_CHOISE_FULLNESS_STS 0xDE0B84 + +#define mmNIC4_QM0_ARB_MSG_STS 0xDE0B88 + +#define mmNIC4_QM0_ARB_SLV_CHOISE_Q_HEAD 0xDE0B8C + +#define mmNIC4_QM0_ARB_ERR_CAUSE 0xDE0B9C + +#define mmNIC4_QM0_ARB_ERR_MSG_EN 0xDE0BA0 + +#define mmNIC4_QM0_ARB_ERR_STS_DRP 0xDE0BA8 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_0 0xDE0BB0 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_1 0xDE0BB4 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_2 0xDE0BB8 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_3 0xDE0BBC + +#define mmNIC4_QM0_ARB_MST_CRED_STS_4 0xDE0BC0 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_5 0xDE0BC4 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_6 0xDE0BC8 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_7 0xDE0BCC + +#define mmNIC4_QM0_ARB_MST_CRED_STS_8 0xDE0BD0 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_9 0xDE0BD4 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_10 0xDE0BD8 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_11 0xDE0BDC + +#define mmNIC4_QM0_ARB_MST_CRED_STS_12 0xDE0BE0 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_13 0xDE0BE4 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_14 0xDE0BE8 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_15 0xDE0BEC + +#define mmNIC4_QM0_ARB_MST_CRED_STS_16 0xDE0BF0 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_17 0xDE0BF4 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_18 0xDE0BF8 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_19 0xDE0BFC + +#define mmNIC4_QM0_ARB_MST_CRED_STS_20 0xDE0C00 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_21 0xDE0C04 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_22 0xDE0C08 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_23 0xDE0C0C + +#define mmNIC4_QM0_ARB_MST_CRED_STS_24 0xDE0C10 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_25 0xDE0C14 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_26 0xDE0C18 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_27 0xDE0C1C + +#define mmNIC4_QM0_ARB_MST_CRED_STS_28 0xDE0C20 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_29 0xDE0C24 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_30 0xDE0C28 + +#define mmNIC4_QM0_ARB_MST_CRED_STS_31 0xDE0C2C + +#define mmNIC4_QM0_CGM_CFG 0xDE0C70 + +#define mmNIC4_QM0_CGM_STS 0xDE0C74 + +#define mmNIC4_QM0_CGM_CFG1 0xDE0C78 + +#define mmNIC4_QM0_LOCAL_RANGE_BASE 0xDE0C80 + +#define mmNIC4_QM0_LOCAL_RANGE_SIZE 0xDE0C84 + +#define mmNIC4_QM0_CSMR_STRICT_PRIO_CFG 0xDE0C90 + +#define mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_1 0xDE0C94 + +#define mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_0 0xDE0C98 + +#define mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_1 0xDE0C9C + +#define mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_0 0xDE0CA0 + +#define mmNIC4_QM0_GLBL_AXCACHE 0xDE0CA4 + +#define mmNIC4_QM0_IND_GW_APB_CFG 0xDE0CB0 + +#define mmNIC4_QM0_IND_GW_APB_WDATA 0xDE0CB4 + +#define mmNIC4_QM0_IND_GW_APB_RDATA 0xDE0CB8 + +#define mmNIC4_QM0_IND_GW_APB_STATUS 0xDE0CBC + +#define mmNIC4_QM0_GLBL_ERR_ADDR_LO 0xDE0CD0 + +#define mmNIC4_QM0_GLBL_ERR_ADDR_HI 0xDE0CD4 + +#define mmNIC4_QM0_GLBL_ERR_WDATA 0xDE0CD8 + +#define mmNIC4_QM0_GLBL_MEM_INIT_BUSY 0xDE0D00 + +#endif /* ASIC_REG_NIC4_QM0_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h new file mode 100644 index 000000000000..34b21b21da52 --- /dev/null +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h @@ -0,0 +1,834 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_NIC4_QM1_REGS_H_ +#define ASIC_REG_NIC4_QM1_REGS_H_ + +/* + ***************************************** + * NIC4_QM1 (Prototype: QMAN) + ***************************************** + */ + +#define mmNIC4_QM1_GLBL_CFG0 0xDE2000 + +#define mmNIC4_QM1_GLBL_CFG1 0xDE2004 + +#define mmNIC4_QM1_GLBL_PROT 0xDE2008 + +#define mmNIC4_QM1_GLBL_ERR_CFG 0xDE200C + +#define mmNIC4_QM1_GLBL_SECURE_PROPS_0 0xDE2010 + +#define mmNIC4_QM1_GLBL_SECURE_PROPS_1 0xDE2014 + +#define mmNIC4_QM1_GLBL_SECURE_PROPS_2 0xDE2018 + +#define mmNIC4_QM1_GLBL_SECURE_PROPS_3 0xDE201C + +#define mmNIC4_QM1_GLBL_SECURE_PROPS_4 0xDE2020 + +#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0 0xDE2024 + +#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1 0xDE2028 + +#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_2 0xDE202C + +#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_3 0xDE2030 + +#define mmNIC4_QM1_GLBL_NON_SECURE_PROPS_4 0xDE2034 + +#define mmNIC4_QM1_GLBL_STS0 0xDE2038 + +#define mmNIC4_QM1_GLBL_STS1_0 0xDE2040 + +#define mmNIC4_QM1_GLBL_STS1_1 0xDE2044 + +#define mmNIC4_QM1_GLBL_STS1_2 0xDE2048 + +#define mmNIC4_QM1_GLBL_STS1_3 0xDE204C + +#define mmNIC4_QM1_GLBL_STS1_4 0xDE2050 + +#define mmNIC4_QM1_GLBL_MSG_EN_0 0xDE2054 + +#define mmNIC4_QM1_GLBL_MSG_EN_1 0xDE2058 + +#define mmNIC4_QM1_GLBL_MSG_EN_2 0xDE205C + +#define mmNIC4_QM1_GLBL_MSG_EN_3 0xDE2060 + +#define mmNIC4_QM1_GLBL_MSG_EN_4 0xDE2068 + +#define mmNIC4_QM1_PQ_BASE_LO_0 0xDE2070 + +#define mmNIC4_QM1_PQ_BASE_LO_1 0xDE2074 + +#define mmNIC4_QM1_PQ_BASE_LO_2 0xDE2078 + +#define mmNIC4_QM1_PQ_BASE_LO_3 0xDE207C + +#define mmNIC4_QM1_PQ_BASE_HI_0 0xDE2080 + +#define mmNIC4_QM1_PQ_BASE_HI_1 0xDE2084 + +#define mmNIC4_QM1_PQ_BASE_HI_2 0xDE2088 + +#define mmNIC4_QM1_PQ_BASE_HI_3 0xDE208C + +#define mmNIC4_QM1_PQ_SIZE_0 0xDE2090 + +#define mmNIC4_QM1_PQ_SIZE_1 0xDE2094 + +#define mmNIC4_QM1_PQ_SIZE_2 0xDE2098 + +#define mmNIC4_QM1_PQ_SIZE_3 0xDE209C + +#define mmNIC4_QM1_PQ_PI_0 0xDE20A0 + +#define mmNIC4_QM1_PQ_PI_1 0xDE20A4 + +#define mmNIC4_QM1_PQ_PI_2 0xDE20A8 + +#define mmNIC4_QM1_PQ_PI_3 0xDE20AC + +#define mmNIC4_QM1_PQ_CI_0 0xDE20B0 + +#define mmNIC4_QM1_PQ_CI_1 0xDE20B4 + +#define mmNIC4_QM1_PQ_CI_2 0xDE20B8 + +#define mmNIC4_QM1_PQ_CI_3 0xDE20BC + +#define mmNIC4_QM1_PQ_CFG0_0 0xDE20C0 + +#define mmNIC4_QM1_PQ_CFG0_1 0xDE20C4 + +#define mmNIC4_QM1_PQ_CFG0_2 0xDE20C8 + +#define mmNIC4_QM1_PQ_CFG0_3 0xDE20CC + +#define mmNIC4_QM1_PQ_CFG1_0 0xDE20D0 + +#define mmNIC4_QM1_PQ_CFG1_1 0xDE20D4 + +#define mmNIC4_QM1_PQ_CFG1_2 0xDE20D8 + +#define mmNIC4_QM1_PQ_CFG1_3 0xDE20DC + +#define mmNIC4_QM1_PQ_ARUSER_31_11_0 0xDE20E0 + +#define mmNIC4_QM1_PQ_ARUSER_31_11_1 0xDE20E4 + +#define mmNIC4_QM1_PQ_ARUSER_31_11_2 0xDE20E8 + +#define mmNIC4_QM1_PQ_ARUSER_31_11_3 0xDE20EC + +#define mmNIC4_QM1_PQ_STS0_0 0xDE20F0 + +#define mmNIC4_QM1_PQ_STS0_1 0xDE20F4 + +#define mmNIC4_QM1_PQ_STS0_2 0xDE20F8 + +#define mmNIC4_QM1_PQ_STS0_3 0xDE20FC + +#define mmNIC4_QM1_PQ_STS1_0 0xDE2100 + +#define mmNIC4_QM1_PQ_STS1_1 0xDE2104 + +#define mmNIC4_QM1_PQ_STS1_2 0xDE2108 + +#define mmNIC4_QM1_PQ_STS1_3 0xDE210C + +#define mmNIC4_QM1_CQ_CFG0_0 0xDE2110 + +#define mmNIC4_QM1_CQ_CFG0_1 0xDE2114 + +#define mmNIC4_QM1_CQ_CFG0_2 0xDE2118 + +#define mmNIC4_QM1_CQ_CFG0_3 0xDE211C + +#define mmNIC4_QM1_CQ_CFG0_4 0xDE2120 + +#define mmNIC4_QM1_CQ_CFG1_0 0xDE2124 + +#define mmNIC4_QM1_CQ_CFG1_1 0xDE2128 + +#define mmNIC4_QM1_CQ_CFG1_2 0xDE212C + +#define mmNIC4_QM1_CQ_CFG1_3 0xDE2130 + +#define mmNIC4_QM1_CQ_CFG1_4 0xDE2134 + +#define mmNIC4_QM1_CQ_ARUSER_31_11_0 0xDE2138 + +#define mmNIC4_QM1_CQ_ARUSER_31_11_1 0xDE213C + +#define mmNIC4_QM1_CQ_ARUSER_31_11_2 0xDE2140 + +#define mmNIC4_QM1_CQ_ARUSER_31_11_3 0xDE2144 + +#define mmNIC4_QM1_CQ_ARUSER_31_11_4 0xDE2148 + +#define mmNIC4_QM1_CQ_STS0_0 0xDE214C + +#define mmNIC4_QM1_CQ_STS0_1 0xDE2150 + +#define mmNIC4_QM1_CQ_STS0_2 0xDE2154 + +#define mmNIC4_QM1_CQ_STS0_3 0xDE2158 + +#define mmNIC4_QM1_CQ_STS0_4 0xDE215C + +#define mmNIC4_QM1_CQ_STS1_0 0xDE2160 + +#define mmNIC4_QM1_CQ_STS1_1 0xDE2164 + +#define mmNIC4_QM1_CQ_STS1_2 0xDE2168 + +#define mmNIC4_QM1_CQ_STS1_3 0xDE216C + +#define mmNIC4_QM1_CQ_STS1_4 0xDE2170 + +#define mmNIC4_QM1_CQ_PTR_LO_0 0xDE2174 + +#define mmNIC4_QM1_CQ_PTR_HI_0 0xDE2178 + +#define mmNIC4_QM1_CQ_TSIZE_0 0xDE217C + +#define mmNIC4_QM1_CQ_CTL_0 0xDE2180 + +#define mmNIC4_QM1_CQ_PTR_LO_1 0xDE2184 + +#define mmNIC4_QM1_CQ_PTR_HI_1 0xDE2188 + +#define mmNIC4_QM1_CQ_TSIZE_1 0xDE218C + +#define mmNIC4_QM1_CQ_CTL_1 0xDE2190 + +#define mmNIC4_QM1_CQ_PTR_LO_2 0xDE2194 + +#define mmNIC4_QM1_CQ_PTR_HI_2 0xDE2198 + +#define mmNIC4_QM1_CQ_TSIZE_2 0xDE219C + +#define mmNIC4_QM1_CQ_CTL_2 0xDE21A0 + +#define mmNIC4_QM1_CQ_PTR_LO_3 0xDE21A4 + +#define mmNIC4_QM1_CQ_PTR_HI_3 0xDE21A8 + +#define mmNIC4_QM1_CQ_TSIZE_3 0xDE21AC + +#define mmNIC4_QM1_CQ_CTL_3 0xDE21B0 + +#define mmNIC4_QM1_CQ_PTR_LO_4 0xDE21B4 + +#define mmNIC4_QM1_CQ_PTR_HI_4 0xDE21B8 + +#define mmNIC4_QM1_CQ_TSIZE_4 0xDE21BC + +#define mmNIC4_QM1_CQ_CTL_4 0xDE21C0 + +#define mmNIC4_QM1_CQ_PTR_LO_STS_0 0xDE21C4 + +#define mmNIC4_QM1_CQ_PTR_LO_STS_1 0xDE21C8 + +#define mmNIC4_QM1_CQ_PTR_LO_STS_2 0xDE21CC + +#define mmNIC4_QM1_CQ_PTR_LO_STS_3 0xDE21D0 + +#define mmNIC4_QM1_CQ_PTR_LO_STS_4 0xDE21D4 + +#define mmNIC4_QM1_CQ_PTR_HI_STS_0 0xDE21D8 + +#define mmNIC4_QM1_CQ_PTR_HI_STS_1 0xDE21DC + +#define mmNIC4_QM1_CQ_PTR_HI_STS_2 0xDE21E0 + +#define mmNIC4_QM1_CQ_PTR_HI_STS_3 0xDE21E4 + +#define mmNIC4_QM1_CQ_PTR_HI_STS_4 0xDE21E8 + +#define mmNIC4_QM1_CQ_TSIZE_STS_0 0xDE21EC + +#define mmNIC4_QM1_CQ_TSIZE_STS_1 0xDE21F0 + +#define mmNIC4_QM1_CQ_TSIZE_STS_2 0xDE21F4 + +#define mmNIC4_QM1_CQ_TSIZE_STS_3 0xDE21F8 + +#define mmNIC4_QM1_CQ_TSIZE_STS_4 0xDE21FC + +#define mmNIC4_QM1_CQ_CTL_STS_0 0xDE2200 + +#define mmNIC4_QM1_CQ_CTL_STS_1 0xDE2204 + +#define mmNIC4_QM1_CQ_CTL_STS_2 0xDE2208 + +#define mmNIC4_QM1_CQ_CTL_STS_3 0xDE220C + +#define mmNIC4_QM1_CQ_CTL_STS_4 0xDE2210 + +#define mmNIC4_QM1_CQ_IFIFO_CNT_0 0xDE2214 + +#define mmNIC4_QM1_CQ_IFIFO_CNT_1 0xDE2218 + +#define mmNIC4_QM1_CQ_IFIFO_CNT_2 0xDE221C + +#define mmNIC4_QM1_CQ_IFIFO_CNT_3 0xDE2220 + +#define mmNIC4_QM1_CQ_IFIFO_CNT_4 0xDE2224 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_0 0xDE2228 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_1 0xDE222C + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_2 0xDE2230 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_3 0xDE2234 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_4 0xDE2238 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_0 0xDE223C + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_1 0xDE2240 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_2 0xDE2244 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_3 0xDE2248 + +#define mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_4 0xDE224C + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_0 0xDE2250 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_1 0xDE2254 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_2 0xDE2258 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_3 0xDE225C + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_4 0xDE2260 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_0 0xDE2264 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_1 0xDE2268 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_2 0xDE226C + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_3 0xDE2270 + +#define mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_4 0xDE2274 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_0 0xDE2278 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_1 0xDE227C + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 0xDE2280 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_3 0xDE2284 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_4 0xDE2288 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_0 0xDE228C + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_1 0xDE2290 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_2 0xDE2294 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_3 0xDE2298 + +#define mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_4 0xDE229C + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_0 0xDE22A0 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_1 0xDE22A4 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_2 0xDE22A8 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_3 0xDE22AC + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_4 0xDE22B0 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_0 0xDE22B4 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_1 0xDE22B8 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_2 0xDE22BC + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_3 0xDE22C0 + +#define mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_4 0xDE22C4 + +#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_0 0xDE22C8 + +#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_1 0xDE22CC + +#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_2 0xDE22D0 + +#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_3 0xDE22D4 + +#define mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_4 0xDE22D8 + +#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 0xDE22E0 + +#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 0xDE22E4 + +#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 0xDE22E8 + +#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 0xDE22EC + +#define mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 0xDE22F0 + +#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 0xDE22F4 + +#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 0xDE22F8 + +#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 0xDE22FC + +#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 0xDE2300 + +#define mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 0xDE2304 + +#define mmNIC4_QM1_CP_FENCE0_RDATA_0 0xDE2308 + +#define mmNIC4_QM1_CP_FENCE0_RDATA_1 0xDE230C + +#define mmNIC4_QM1_CP_FENCE0_RDATA_2 0xDE2310 + +#define mmNIC4_QM1_CP_FENCE0_RDATA_3 0xDE2314 + +#define mmNIC4_QM1_CP_FENCE0_RDATA_4 0xDE2318 + +#define mmNIC4_QM1_CP_FENCE1_RDATA_0 0xDE231C + +#define mmNIC4_QM1_CP_FENCE1_RDATA_1 0xDE2320 + +#define mmNIC4_QM1_CP_FENCE1_RDATA_2 0xDE2324 + +#define mmNIC4_QM1_CP_FENCE1_RDATA_3 0xDE2328 + +#define mmNIC4_QM1_CP_FENCE1_RDATA_4 0xDE232C + +#define mmNIC4_QM1_CP_FENCE2_RDATA_0 0xDE2330 + +#define mmNIC4_QM1_CP_FENCE2_RDATA_1 0xDE2334 + +#define mmNIC4_QM1_CP_FENCE2_RDATA_2 0xDE2338 + +#define mmNIC4_QM1_CP_FENCE2_RDATA_3 0xDE233C + +#define mmNIC4_QM1_CP_FENCE2_RDATA_4 0xDE2340 + +#define mmNIC4_QM1_CP_FENCE3_RDATA_0 0xDE2344 + +#define mmNIC4_QM1_CP_FENCE3_RDATA_1 0xDE2348 + +#define mmNIC4_QM1_CP_FENCE3_RDATA_2 0xDE234C + +#define mmNIC4_QM1_CP_FENCE3_RDATA_3 0xDE2350 + +#define mmNIC4_QM1_CP_FENCE3_RDATA_4 0xDE2354 + +#define mmNIC4_QM1_CP_FENCE0_CNT_0 0xDE2358 + +#define mmNIC4_QM1_CP_FENCE0_CNT_1 0xDE235C + +#define mmNIC4_QM1_CP_FENCE0_CNT_2 0xDE2360 + +#define mmNIC4_QM1_CP_FENCE0_CNT_3 0xDE2364 + +#define mmNIC4_QM1_CP_FENCE0_CNT_4 0xDE2368 + +#define mmNIC4_QM1_CP_FENCE1_CNT_0 0xDE236C + +#define mmNIC4_QM1_CP_FENCE1_CNT_1 0xDE2370 + +#define mmNIC4_QM1_CP_FENCE1_CNT_2 0xDE2374 + +#define mmNIC4_QM1_CP_FENCE1_CNT_3 0xDE2378 + +#define mmNIC4_QM1_CP_FENCE1_CNT_4 0xDE237C + +#define mmNIC4_QM1_CP_FENCE2_CNT_0 0xDE2380 + +#define mmNIC4_QM1_CP_FENCE2_CNT_1 0xDE2384 + +#define mmNIC4_QM1_CP_FENCE2_CNT_2 0xDE2388 + +#define mmNIC4_QM1_CP_FENCE2_CNT_3 0xDE238C + +#define mmNIC4_QM1_CP_FENCE2_CNT_4 0xDE2390 + +#define mmNIC4_QM1_CP_FENCE3_CNT_0 0xDE2394 + +#define mmNIC4_QM1_CP_FENCE3_CNT_1 0xDE2398 + +#define mmNIC4_QM1_CP_FENCE3_CNT_2 0xDE239C + +#define mmNIC4_QM1_CP_FENCE3_CNT_3 0xDE23A0 + +#define mmNIC4_QM1_CP_FENCE3_CNT_4 0xDE23A4 + +#define mmNIC4_QM1_CP_STS_0 0xDE23A8 + +#define mmNIC4_QM1_CP_STS_1 0xDE23AC + +#define mmNIC4_QM1_CP_STS_2 0xDE23B0 + +#define mmNIC4_QM1_CP_STS_3 0xDE23B4 + +#define mmNIC4_QM1_CP_STS_4 0xDE23B8 + +#define mmNIC4_QM1_CP_CURRENT_INST_LO_0 0xDE23BC + +#define mmNIC4_QM1_CP_CURRENT_INST_LO_1 0xDE23C0 + +#define mmNIC4_QM1_CP_CURRENT_INST_LO_2 0xDE23C4 + +#define mmNIC4_QM1_CP_CURRENT_INST_LO_3 0xDE23C8 + +#define mmNIC4_QM1_CP_CURRENT_INST_LO_4 0xDE23CC + +#define mmNIC4_QM1_CP_CURRENT_INST_HI_0 0xDE23D0 + +#define mmNIC4_QM1_CP_CURRENT_INST_HI_1 0xDE23D4 + +#define mmNIC4_QM1_CP_CURRENT_INST_HI_2 0xDE23D8 + +#define mmNIC4_QM1_CP_CURRENT_INST_HI_3 0xDE23DC + +#define mmNIC4_QM1_CP_CURRENT_INST_HI_4 0xDE23E0 + +#define mmNIC4_QM1_CP_BARRIER_CFG_0 0xDE23F4 + +#define mmNIC4_QM1_CP_BARRIER_CFG_1 0xDE23F8 + +#define mmNIC4_QM1_CP_BARRIER_CFG_2 0xDE23FC + +#define mmNIC4_QM1_CP_BARRIER_CFG_3 0xDE2400 + +#define mmNIC4_QM1_CP_BARRIER_CFG_4 0xDE2404 + +#define mmNIC4_QM1_CP_DBG_0_0 0xDE2408 + +#define mmNIC4_QM1_CP_DBG_0_1 0xDE240C + +#define mmNIC4_QM1_CP_DBG_0_2 0xDE2410 + +#define mmNIC4_QM1_CP_DBG_0_3 0xDE2414 + +#define mmNIC4_QM1_CP_DBG_0_4 0xDE2418 + +#define mmNIC4_QM1_CP_ARUSER_31_11_0 0xDE241C + +#define mmNIC4_QM1_CP_ARUSER_31_11_1 0xDE2420 + +#define mmNIC4_QM1_CP_ARUSER_31_11_2 0xDE2424 + +#define mmNIC4_QM1_CP_ARUSER_31_11_3 0xDE2428 + +#define mmNIC4_QM1_CP_ARUSER_31_11_4 0xDE242C + +#define mmNIC4_QM1_CP_AWUSER_31_11_0 0xDE2430 + +#define mmNIC4_QM1_CP_AWUSER_31_11_1 0xDE2434 + +#define mmNIC4_QM1_CP_AWUSER_31_11_2 0xDE2438 + +#define mmNIC4_QM1_CP_AWUSER_31_11_3 0xDE243C + +#define mmNIC4_QM1_CP_AWUSER_31_11_4 0xDE2440 + +#define mmNIC4_QM1_ARB_CFG_0 0xDE2A00 + +#define mmNIC4_QM1_ARB_CHOISE_Q_PUSH 0xDE2A04 + +#define mmNIC4_QM1_ARB_WRR_WEIGHT_0 0xDE2A08 + +#define mmNIC4_QM1_ARB_WRR_WEIGHT_1 0xDE2A0C + +#define mmNIC4_QM1_ARB_WRR_WEIGHT_2 0xDE2A10 + +#define mmNIC4_QM1_ARB_WRR_WEIGHT_3 0xDE2A14 + +#define mmNIC4_QM1_ARB_CFG_1 0xDE2A18 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_0 0xDE2A20 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_1 0xDE2A24 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_2 0xDE2A28 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_3 0xDE2A2C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_4 0xDE2A30 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_5 0xDE2A34 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_6 0xDE2A38 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_7 0xDE2A3C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_8 0xDE2A40 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_9 0xDE2A44 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_10 0xDE2A48 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_11 0xDE2A4C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_12 0xDE2A50 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_13 0xDE2A54 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_14 0xDE2A58 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_15 0xDE2A5C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_16 0xDE2A60 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_17 0xDE2A64 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_18 0xDE2A68 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_19 0xDE2A6C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_20 0xDE2A70 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_21 0xDE2A74 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_22 0xDE2A78 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_23 0xDE2A7C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 0xDE2A80 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_25 0xDE2A84 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_26 0xDE2A88 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_27 0xDE2A8C + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_28 0xDE2A90 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_29 0xDE2A94 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_30 0xDE2A98 + +#define mmNIC4_QM1_ARB_MST_AVAIL_CRED_31 0xDE2A9C + +#define mmNIC4_QM1_ARB_MST_CRED_INC 0xDE2AA0 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_0 0xDE2AA4 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_1 0xDE2AA8 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_2 0xDE2AAC + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_3 0xDE2AB0 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_4 0xDE2AB4 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_5 0xDE2AB8 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_6 0xDE2ABC + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_7 0xDE2AC0 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_8 0xDE2AC4 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_9 0xDE2AC8 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_10 0xDE2ACC + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_11 0xDE2AD0 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_12 0xDE2AD4 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_13 0xDE2AD8 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_14 0xDE2ADC + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_15 0xDE2AE0 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_16 0xDE2AE4 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_17 0xDE2AE8 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_18 0xDE2AEC + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_19 0xDE2AF0 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_20 0xDE2AF4 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_21 0xDE2AF8 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_22 0xDE2AFC + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_23 0xDE2B00 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_24 0xDE2B04 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_25 0xDE2B08 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_26 0xDE2B0C + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_27 0xDE2B10 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_28 0xDE2B14 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_29 0xDE2B18 + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_30 0xDE2B1C + +#define mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_31 0xDE2B20 + +#define mmNIC4_QM1_ARB_SLV_MASTER_INC_CRED_OFST 0xDE2B28 + +#define mmNIC4_QM1_ARB_MST_SLAVE_EN 0xDE2B2C + +#define mmNIC4_QM1_ARB_MST_QUIET_PER 0xDE2B34 + +#define mmNIC4_QM1_ARB_SLV_CHOISE_WDT 0xDE2B38 + +#define mmNIC4_QM1_ARB_SLV_ID 0xDE2B3C + +#define mmNIC4_QM1_ARB_MSG_MAX_INFLIGHT 0xDE2B44 + +#define mmNIC4_QM1_ARB_MSG_AWUSER_31_11 0xDE2B48 + +#define mmNIC4_QM1_ARB_MSG_AWUSER_SEC_PROP 0xDE2B4C + +#define mmNIC4_QM1_ARB_MSG_AWUSER_NON_SEC_PROP 0xDE2B50 + +#define mmNIC4_QM1_ARB_BASE_LO 0xDE2B54 + +#define mmNIC4_QM1_ARB_BASE_HI 0xDE2B58 + +#define mmNIC4_QM1_ARB_STATE_STS 0xDE2B80 + +#define mmNIC4_QM1_ARB_CHOISE_FULLNESS_STS 0xDE2B84 + +#define mmNIC4_QM1_ARB_MSG_STS 0xDE2B88 + +#define mmNIC4_QM1_ARB_SLV_CHOISE_Q_HEAD 0xDE2B8C + +#define mmNIC4_QM1_ARB_ERR_CAUSE 0xDE2B9C + +#define mmNIC4_QM1_ARB_ERR_MSG_EN 0xDE2BA0 + +#define mmNIC4_QM1_ARB_ERR_STS_DRP 0xDE2BA8 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_0 0xDE2BB0 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_1 0xDE2BB4 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_2 0xDE2BB8 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_3 0xDE2BBC + +#define mmNIC4_QM1_ARB_MST_CRED_STS_4 0xDE2BC0 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_5 0xDE2BC4 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_6 0xDE2BC8 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_7 0xDE2BCC + +#define mmNIC4_QM1_ARB_MST_CRED_STS_8 0xDE2BD0 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_9 0xDE2BD4 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_10 0xDE2BD8 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_11 0xDE2BDC + +#define mmNIC4_QM1_ARB_MST_CRED_STS_12 0xDE2BE0 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_13 0xDE2BE4 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_14 0xDE2BE8 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_15 0xDE2BEC + +#define mmNIC4_QM1_ARB_MST_CRED_STS_16 0xDE2BF0 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_17 0xDE2BF4 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_18 0xDE2BF8 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_19 0xDE2BFC + +#define mmNIC4_QM1_ARB_MST_CRED_STS_20 0xDE2C00 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_21 0xDE2C04 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_22 0xDE2C08 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_23 0xDE2C0C + +#define mmNIC4_QM1_ARB_MST_CRED_STS_24 0xDE2C10 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_25 0xDE2C14 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_26 0xDE2C18 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_27 0xDE2C1C + +#define mmNIC4_QM1_ARB_MST_CRED_STS_28 0xDE2C20 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_29 0xDE2C24 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_30 0xDE2C28 + +#define mmNIC4_QM1_ARB_MST_CRED_STS_31 0xDE2C2C + +#define mmNIC4_QM1_CGM_CFG 0xDE2C70 + +#define mmNIC4_QM1_CGM_STS 0xDE2C74 + +#define mmNIC4_QM1_CGM_CFG1 0xDE2C78 + +#define mmNIC4_QM1_LOCAL_RANGE_BASE 0xDE2C80 + +#define mmNIC4_QM1_LOCAL_RANGE_SIZE 0xDE2C84 + +#define mmNIC4_QM1_CSMR_STRICT_PRIO_CFG 0xDE2C90 + +#define mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_1 0xDE2C94 + +#define mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_0 0xDE2C98 + +#define mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_1 0xDE2C9C + +#define mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_0 0xDE2CA0 + +#define mmNIC4_QM1_GLBL_AXCACHE 0xDE2CA4 + +#define mmNIC4_QM1_IND_GW_APB_CFG 0xDE2CB0 + +#define mmNIC4_QM1_IND_GW_APB_WDATA 0xDE2CB4 + +#define mmNIC4_QM1_IND_GW_APB_RDATA 0xDE2CB8 + +#define mmNIC4_QM1_IND_GW_APB_STATUS 0xDE2CBC + +#define mmNIC4_QM1_GLBL_ERR_ADDR_LO 0xDE2CD0 + +#define mmNIC4_QM1_GLBL_ERR_ADDR_HI 0xDE2CD4 + +#define mmNIC4_QM1_GLBL_ERR_WDATA 0xDE2CD8 + +#define mmNIC4_QM1_GLBL_MEM_INIT_BUSY 0xDE2D00 + +#endif /* ASIC_REG_NIC4_QM1_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h index 46aed13f16b1..b9b90d079e23 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h @@ -41,6 +41,11 @@ (FIELD_PREP(TPC0_QM_GLBL_CFG0_CQF_EN_MASK, 0x1F)) | \ (FIELD_PREP(TPC0_QM_GLBL_CFG0_CP_EN_MASK, 0x1F))) +#define NIC_QMAN_ENABLE (\ + (FIELD_PREP(NIC0_QM0_GLBL_CFG0_PQF_EN_MASK, 0xF)) | \ + (FIELD_PREP(NIC0_QM0_GLBL_CFG0_CQF_EN_MASK, 0xF)) | \ + (FIELD_PREP(NIC0_QM0_GLBL_CFG0_CP_EN_MASK, 0xF))) + #define QMAN_UPPER_CP_CGM_PWR_GATE_EN (\ (FIELD_PREP(DMA0_QM_CGM_CFG_IDLE_TH_MASK, 0x20)) | \ (FIELD_PREP(DMA0_QM_CGM_CFG_G2F_TH_MASK, 0xA)) | \ @@ -93,6 +98,16 @@ (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0x1F)) | \ (FIELD_PREP(MME0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0x1F))) +#define NIC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK (\ + (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK, 0xF)) | \ + (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK, 0xF)) | \ + (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK, 0xF))) + +#define NIC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK (\ + (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK, 0xF)) | \ + (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK, 0xF)) | \ + (FIELD_PREP(NIC0_QM0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK, 0xF))) + #define QMAN_CGM1_PWR_GATE_EN (FIELD_PREP(DMA0_QM_CGM_CFG1_MASK_TH_MASK, 0xA)) /* RESET registers configuration */ -- cgit v1.2.3 From b3a9c0bd2f1847e637147b7175fe7d99d28bef33 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Mon, 2 Nov 2020 21:07:51 +0200 Subject: habanalabs/gaudi: add NIC firmware-related definitions Add new structures and messages that the driver use to interact with the firmware to receive information and events (errors) about GAUDI's NIC. Signed-off-by: Omer Shpigelman Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/include/common/cpucp_if.h | 34 +++++++++++++++++++--- .../misc/habanalabs/include/gaudi/gaudi_fw_if.h | 24 +++++++++++++++ 2 files changed, 54 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h index 2a5c9cb3d505..782b8b8636be 100644 --- a/drivers/misc/habanalabs/include/common/cpucp_if.h +++ b/drivers/misc/habanalabs/include/common/cpucp_if.h @@ -9,6 +9,7 @@ #define CPUCP_IF_H #include +#include /* * EVENT QUEUE @@ -199,6 +200,11 @@ enum pq_init_status { * CpuCP to write to the structure, to prevent data corruption in case of * mismatched driver/FW versions. * + * CPUCP_PACKET_NIC_INFO_GET - + * Fetch information from the device regarding the NIC. the host's driver + * passes the max size it allows the CpuCP to write to the structure, to + * prevent data corruption in case of mismatched driver/FW versions. + * * CPUCP_PACKET_TEMPERATURE_SET - * Set the value of the offset property of a specified thermal sensor. * The packet's arguments specify the desired sensor and the field to @@ -244,12 +250,12 @@ enum cpucp_packet_id { CPUCP_PACKET_MAX_POWER_GET, /* sysfs */ CPUCP_PACKET_MAX_POWER_SET, /* sysfs */ CPUCP_PACKET_EEPROM_DATA_GET, /* sysfs */ - CPUCP_RESERVED, + CPUCP_PACKET_NIC_INFO_GET, /* internal */ CPUCP_PACKET_TEMPERATURE_SET, /* sysfs */ CPUCP_PACKET_VOLTAGE_SET, /* sysfs */ CPUCP_PACKET_CURRENT_SET, /* sysfs */ - CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */ - CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */ + CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */ + CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */ CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */ CPUCP_PACKET_PLL_REG_GET, /* internal */ }; @@ -300,7 +306,7 @@ struct cpucp_packet { /* For led set */ __le32 led_index; - /* For get CpuCP info/EEPROM data */ + /* For get CpuCP info/EEPROM data/NIC info */ __le32 data_max_size; }; @@ -392,6 +398,12 @@ struct eq_generic_event { #define CARD_NAME_MAX_LEN 16 #define VERSION_MAX_LEN 128 #define CPUCP_MAX_SENSORS 128 +#define CPUCP_MAX_NICS 128 +#define CPUCP_LANES_PER_NIC 4 +#define CPUCP_NIC_QSFP_EEPROM_MAX_LEN 1024 +#define CPUCP_MAX_NIC_LANES (CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC) +#define CPUCP_NIC_MASK_ARR_LEN ((CPUCP_MAX_NICS + 63) / 64) +#define CPUCP_NIC_POLARITY_ARR_LEN ((CPUCP_MAX_NIC_LANES + 63) / 64) struct cpucp_sensor { __le32 type; @@ -440,4 +452,18 @@ struct cpucp_info { char card_name[CARD_NAME_MAX_LEN]; }; +struct cpucp_mac_addr { + __u8 mac_addr[ETH_ALEN]; +}; + +struct cpucp_nic_info { + struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS]; + __le64 link_mask[CPUCP_NIC_MASK_ARR_LEN]; + __le64 pol_tx_mask[CPUCP_NIC_POLARITY_ARR_LEN]; + __le64 pol_rx_mask[CPUCP_NIC_POLARITY_ARR_LEN]; + __le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN]; + __u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN]; + __le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN]; +}; + #endif /* CPUCP_IF_H */ diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h index 8aadc6357da1..d61a4c87b765 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h @@ -8,6 +8,8 @@ #ifndef GAUDI_FW_IF_H #define GAUDI_FW_IF_H +#include + #define GAUDI_EVENT_QUEUE_MSI_IDX 8 #define GAUDI_NIC_PORT1_MSI_IDX 10 #define GAUDI_NIC_PORT3_MSI_IDX 12 @@ -31,6 +33,28 @@ enum gaudi_pll_index { IF_PLL }; +enum gaudi_nic_axi_error { + RXB, + RXE, + TXS, + TXE, + QPC_RESP, + NON_AXI_ERR, +}; + +/* + * struct eq_nic_sei_event - describes an AXI error cause. + * @axi_error_cause: one of the events defined in enum gaudi_nic_axi_error. + * @id: can be either 0 or 1, to further describe unit with interrupt cause + * (i.e. TXE0 or TXE1). + * @pad[6]: padding structure to 64bit. + */ +struct eq_nic_sei_event { + __u8 axi_error_cause; + __u8 id; + __u8 pad[6]; +}; + #define GAUDI_PLL_FREQ_LOW 200000000 /* 200 MHz */ #endif /* GAUDI_FW_IF_H */ -- cgit v1.2.3 From 11dcb8c712354709b9d9aa6d6d0880217d008e99 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Mon, 2 Nov 2020 21:09:33 +0200 Subject: habanalabs/gaudi: add NIC security configuration Configure the security properties of the NIC IP. This is to prevent the user process from doing something with the NIC that he shouldn't do. e.g. crash the server, steal data, etc. Signed-off-by: Omer Shpigelman Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi_security.c | 3973 ++++++++++++++++++++++++ 1 file changed, 3973 insertions(+) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c index 2d7add0e5bcc..8a921ab56557 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi_security.c +++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c @@ -5157,6 +5157,3977 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev) WREG32(pb_addr + word_offset, ~mask); } +static void gaudi_init_nic_protection_bits(struct hl_device *hdev) +{ + u32 pb_addr, mask; + u8 word_offset; + + WREG32(mmNIC0_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmNIC0_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmNIC0_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM0_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC0_QM1_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC0_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC0_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC0_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmNIC1_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmNIC1_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmNIC1_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM0_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC1_QM1_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC1_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC1_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC1_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmNIC2_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmNIC2_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmNIC2_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM0_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC2_QM1_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC2_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC2_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC2_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmNIC3_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmNIC3_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmNIC3_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM0_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC3_QM1_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC3_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC3_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC3_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + WREG32(mmNIC4_QM0_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + WREG32(mmNIC4_QM1_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); + + pb_addr = (mmNIC4_QM0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM0_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM0_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM0_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM0_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_PROT & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_NON_SECURE_PROPS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_STS0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_STS1_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_MSG_EN_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_LO_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_PQ_BASE_HI_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_PQ_BASE_HI_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_PQ_BASE_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_BASE_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_SIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_PI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_PI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_PI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_PI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_CFG1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS0_3 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_PQ_STS1_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_PQ_STS1_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_PQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_PQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS0_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS1_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS1_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS1_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_STS1_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_0 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CQ_CTL_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CQ_CTL_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CQ_CTL_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_LO_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_PTR_HI_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_TSIZE_STS_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CQ_CTL_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CQ_CTL_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CQ_CTL_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_CTL_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CQ_IFIFO_CNT_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE0_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE1_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE2_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_MSG_BASE3_ADDR_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_TSIZE_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_SRC_BASE_LO_OFFSET_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_LDMA_DST_BASE_LO_OFFSET_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CP_STS_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CP_STS_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CP_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_LO_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_CURRENT_INST_HI_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CP_BARRIER_CFG_3 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CP_BARRIER_CFG_3 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_BARRIER_CFG_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_CP_DBG_0_2 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_CP_DBG_0_2 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_CP_DBG_0_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_DBG_0_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_ARUSER_31_11_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CP_AWUSER_31_11_4 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_ARB_CFG_0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_ARB_CFG_0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_ARB_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_19 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_23 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_AVAIL_CRED_31 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & ~0xFFF) + + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_ARB_MST_CHOISE_PUSH_OFST_23 & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_ARB_SLV_CHOISE_WDT & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MSG_MAX_INFLIGHT & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MSG_AWUSER_31_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MSG_AWUSER_SEC_PROP & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MSG_AWUSER_NON_SEC_PROP & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_ARB_STATE_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_ARB_STATE_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_ARB_STATE_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_CHOISE_FULLNESS_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MSG_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_SLV_CHOISE_Q_HEAD & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_ERR_CAUSE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_ERR_MSG_EN & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_ERR_STS_DRP & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_2 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_3 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_4 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_5 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_6 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_7 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_8 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_9 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_10 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_11 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_12 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_13 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_14 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_15 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_16 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_17 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_18 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_19 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_ARB_MST_CRED_STS_20 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_ARB_MST_CRED_STS_20 & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_20 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_21 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_22 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_23 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_24 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_25 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_26 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_27 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_28 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_29 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_30 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_ARB_MST_CRED_STS_31 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CGM_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CGM_STS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CGM_CFG1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_LOCAL_RANGE_BASE & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_LOCAL_RANGE_BASE & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_LOCAL_RANGE_BASE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_LOCAL_RANGE_SIZE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_CSMR_STRICT_PRIO_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_LBW_WR_RATE_LIM_CFG_1 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_HBW_RD_RATE_LIM_CFG_0 & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_AXCACHE & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_CFG & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_WDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_RDATA & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_IND_GW_APB_STATUS & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1U << ((mmNIC4_QM1_GLBL_ERR_WDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmNIC4_QM1_GLBL_MEM_INIT_BUSY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmNIC4_QM1_GLBL_MEM_INIT_BUSY & PROT_BITS_OFFS) + >> 7) << 2; + mask = 1U << ((mmNIC4_QM1_GLBL_MEM_INIT_BUSY & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); +} + static void gaudi_init_tpc_protection_bits(struct hl_device *hdev) { u32 pb_addr, mask; @@ -8861,6 +12832,8 @@ static void gaudi_init_protection_bits(struct hl_device *hdev) gaudi_init_mme_protection_bits(hdev); + gaudi_init_nic_protection_bits(hdev); + gaudi_init_tpc_protection_bits(hdev); } -- cgit v1.2.3 From 3c68157fb829a1b1c9615a6a78179a664602f96a Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Mon, 2 Nov 2020 21:10:39 +0200 Subject: habanalabs/gaudi: add support for NIC QMANs Initialize the QMANs that are responsible to submit doorbells to the NIC engines. Add support for stopping and disabling them, and reset them as part of the hard-reset procedure of GAUDI. This will allow the user to submit work to the NICs. Add support for receiving events on QMAN errors from the firmware. However, the nic_ports_mask is still initialized to 0. That means this code won't initialize the QMANs just yet. That will be in a later patch. Signed-off-by: Omer Shpigelman Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs.h | 3 +- drivers/misc/habanalabs/gaudi/gaudi.c | 735 ++++++++++++++++++++++++++-- drivers/misc/habanalabs/gaudi/gaudiP.h | 32 ++ 3 files changed, 723 insertions(+), 47 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index c61967213f89..b5a34936e22d 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1633,8 +1633,6 @@ struct hl_mmu_funcs { * @pmmu_huge_range: is a different virtual addresses range used for PMMU with * huge pages. * @init_done: is the initialization of the device done. - * @mmu_enable: is MMU enabled. - * @mmu_huge_page_opt: is MMU huge pages optimization enabled. * @device_cpu_disabled: is the device CPU disabled (due to timeouts) * @dma_mask: the dma mask that was set for this device * @in_debug: is device under debug. This, together with fpriv_list, enforces @@ -1750,6 +1748,7 @@ struct hl_device { u8 supports_cb_mapping; /* Parameters for bring-up */ + u64 nic_ports_mask; u64 fw_loading; u8 mmu_enable; u8 mmu_huge_page_opt; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 40cd561e57f4..2dd9b732299a 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -301,46 +301,46 @@ static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = { QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_1 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_2 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_0_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_1_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_2_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_3_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_4_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_5_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_6_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_7_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_8_3 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_0 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_1 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_2 */ - QUEUE_TYPE_NA, /* GAUDI_QUEUE_ID_NIC_9_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_3 */ }; struct ecc_info_extract_params { @@ -799,6 +799,27 @@ static int gaudi_late_init(struct hl_device *hdev) return rc; } + if ((hdev->card_type == cpucp_card_type_pci) && + (hdev->nic_ports_mask & 0x3)) { + dev_info(hdev->dev, + "PCI card detected, only 8 ports are enabled\n"); + hdev->nic_ports_mask &= ~0x3; + + /* Stop and disable unused NIC QMANs */ + WREG32(mmNIC0_QM0_GLBL_CFG1, NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + WREG32(mmNIC0_QM1_GLBL_CFG1, NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + WREG32(mmNIC0_QM0_GLBL_CFG0, 0); + WREG32(mmNIC0_QM1_GLBL_CFG0, 0); + + gaudi->hw_cap_initialized &= ~(HW_CAP_NIC0 | HW_CAP_NIC1); + } + rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS); if (rc) { dev_err(hdev->dev, "Failed to enable PCI access from CPU\n"); @@ -945,6 +966,9 @@ static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev) case GAUDI_QUEUE_ID_TPC_0_0 ... GAUDI_QUEUE_ID_TPC_7_3: q->pq_size = TPC_QMAN_SIZE_IN_BYTES; break; + case GAUDI_QUEUE_ID_NIC_0_0 ... GAUDI_QUEUE_ID_NIC_9_3: + q->pq_size = NIC_QMAN_SIZE_IN_BYTES; + break; default: dev_err(hdev->dev, "Bad internal queue index %d", i); rc = -EINVAL; @@ -2362,6 +2386,125 @@ static void gaudi_init_tpc_qmans(struct hl_device *hdev) } } +static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset, + int qman_id, u64 qman_base_addr, int nic_id) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 q_off; + u32 nic_qm_err_cfg; + + mtr_base_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + + q_off = nic_offset + qman_id * 4; + + WREG32(mmNIC0_QM0_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_base_addr)); + WREG32(mmNIC0_QM0_PQ_BASE_HI_0 + q_off, upper_32_bits(qman_base_addr)); + + WREG32(mmNIC0_QM0_PQ_SIZE_0 + q_off, ilog2(NIC_QMAN_LENGTH)); + WREG32(mmNIC0_QM0_PQ_PI_0 + q_off, 0); + WREG32(mmNIC0_QM0_PQ_CI_0 + q_off, 0); + + WREG32(mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74); + WREG32(mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14); + WREG32(mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C); + + WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo); + WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi); + WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo); + WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi); + + if (qman_id == 0) { + /* Configure RAZWI IRQ */ + nic_qm_err_cfg = NIC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK; + if (hdev->stop_on_err) { + nic_qm_err_cfg |= + NIC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK; + } + + WREG32(mmNIC0_QM0_GLBL_ERR_CFG + nic_offset, nic_qm_err_cfg); + WREG32(mmNIC0_QM0_GLBL_ERR_ADDR_LO + nic_offset, + lower_32_bits(CFG_BASE + + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmNIC0_QM0_GLBL_ERR_ADDR_HI + nic_offset, + upper_32_bits(CFG_BASE + + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR)); + WREG32(mmNIC0_QM0_GLBL_ERR_WDATA + nic_offset, + gaudi_irq_map_table[GAUDI_EVENT_NIC0_QM0].cpu_id + + nic_id); + + WREG32(mmNIC0_QM0_ARB_ERR_MSG_EN + nic_offset, + QM_ARB_ERR_MSG_EN_MASK); + + /* Increase ARB WDT to support streams architecture */ + WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset, + GAUDI_ARB_WDT_TIMEOUT); + + WREG32(mmNIC0_QM0_GLBL_CFG1 + nic_offset, 0); + WREG32(mmNIC0_QM0_GLBL_PROT + nic_offset, + QMAN_INTERNAL_MAKE_TRUSTED); + } +} + +static void gaudi_init_nic_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct gaudi_internal_qman_info *q; + u64 qman_base_addr; + u32 nic_offset = 0; + u32 nic_delta_between_qmans = + mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; + u32 nic_delta_between_nics = + mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; + int i, nic_id, internal_q_index; + + if (!hdev->nic_ports_mask) + return; + + if (gaudi->hw_cap_initialized & HW_CAP_NIC_MASK) + return; + + dev_dbg(hdev->dev, "Initializing NIC QMANs\n"); + + for (nic_id = 0 ; nic_id < NIC_NUMBER_OF_ENGINES ; nic_id++) { + if (!(hdev->nic_ports_mask & (1 << nic_id))) { + nic_offset += nic_delta_between_qmans; + if (nic_id & 1) { + nic_offset -= (nic_delta_between_qmans * 2); + nic_offset += nic_delta_between_nics; + } + continue; + } + + for (i = 0 ; i < QMAN_STREAMS ; i++) { + internal_q_index = GAUDI_QUEUE_ID_NIC_0_0 + + nic_id * QMAN_STREAMS + i; + q = &gaudi->internal_qmans[internal_q_index]; + qman_base_addr = (u64) q->pq_dma_addr; + gaudi_init_nic_qman(hdev, nic_offset, (i & 0x3), + qman_base_addr, nic_id); + } + + /* Enable the QMAN */ + WREG32(mmNIC0_QM0_GLBL_CFG0 + nic_offset, NIC_QMAN_ENABLE); + + nic_offset += nic_delta_between_qmans; + if (nic_id & 1) { + nic_offset -= (nic_delta_between_qmans * 2); + nic_offset += nic_delta_between_nics; + } + + gaudi->hw_cap_initialized |= 1 << (HW_CAP_NIC_SHIFT + nic_id); + } +} + static void gaudi_disable_pci_dma_qmans(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; @@ -2414,6 +2557,30 @@ static void gaudi_disable_tpc_qmans(struct hl_device *hdev) } } +static void gaudi_disable_nic_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + u32 nic_mask, nic_offset = 0; + u32 nic_delta_between_qmans = + mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; + u32 nic_delta_between_nics = + mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; + int nic_id; + + for (nic_id = 0 ; nic_id < NIC_NUMBER_OF_ENGINES ; nic_id++) { + nic_mask = 1 << (HW_CAP_NIC_SHIFT + nic_id); + + if (gaudi->hw_cap_initialized & nic_mask) + WREG32(mmNIC0_QM0_GLBL_CFG0 + nic_offset, 0); + + nic_offset += nic_delta_between_qmans; + if (nic_id & 1) { + nic_offset -= (nic_delta_between_qmans * 2); + nic_offset += nic_delta_between_nics; + } + } +} + static void gaudi_stop_pci_dma_qmans(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; @@ -2472,6 +2639,73 @@ static void gaudi_stop_tpc_qmans(struct hl_device *hdev) WREG32(mmTPC7_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); } +static void gaudi_stop_nic_qmans(struct hl_device *hdev) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + + /* Stop upper CPs of QMANs */ + + if (gaudi->hw_cap_initialized & HW_CAP_NIC0) + WREG32(mmNIC0_QM0_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC1) + WREG32(mmNIC0_QM1_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC2) + WREG32(mmNIC1_QM0_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC3) + WREG32(mmNIC1_QM1_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC4) + WREG32(mmNIC2_QM0_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC5) + WREG32(mmNIC2_QM1_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC6) + WREG32(mmNIC3_QM0_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC7) + WREG32(mmNIC3_QM1_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC8) + WREG32(mmNIC4_QM0_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); + + if (gaudi->hw_cap_initialized & HW_CAP_NIC9) + WREG32(mmNIC4_QM1_GLBL_CFG1, + NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK | + NIC0_QM0_GLBL_CFG1_CP_STOP_MASK); +} + static void gaudi_pci_dma_stall(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; @@ -2661,6 +2895,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset) else wait_timeout_ms = GAUDI_RESET_WAIT_MSEC; + gaudi_stop_nic_qmans(hdev); gaudi_stop_mme_qmans(hdev); gaudi_stop_tpc_qmans(hdev); @@ -2678,6 +2913,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset) msleep(wait_timeout_ms); + gaudi_disable_nic_qmans(hdev); gaudi_disable_mme_qmans(hdev); gaudi_disable_tpc_qmans(hdev); gaudi_disable_hbm_dma_qmans(hdev); @@ -2978,11 +3214,13 @@ static int gaudi_hw_init(struct hl_device *hdev) gaudi_init_tpc_qmans(hdev); + gaudi_init_nic_qmans(hdev); + hdev->asic_funcs->set_clock_gating(hdev); gaudi_enable_timestamp(hdev); - /* MSI must be enabled before CPU queues are initialized */ + /* MSI must be enabled before CPU queues and NIC are initialized */ rc = gaudi_enable_msi(hdev); if (rc) goto disable_queues; @@ -3081,7 +3319,7 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) HW_CAP_HBM | HW_CAP_PCI_DMA | HW_CAP_MME | HW_CAP_TPC_MASK | HW_CAP_HBM_DMA | HW_CAP_PLL | - HW_CAP_MMU | + HW_CAP_NIC_MASK | HW_CAP_MMU | HW_CAP_SRAM_SCRAMBLER | HW_CAP_HBM_SCRAMBLER | HW_CAP_CLK_GATE); @@ -3351,6 +3589,166 @@ static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) db_reg_offset = mmTPC7_QM_PQ_PI_3; break; + case GAUDI_QUEUE_ID_NIC_0_0: + db_reg_offset = mmNIC0_QM0_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_0_1: + db_reg_offset = mmNIC0_QM0_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_0_2: + db_reg_offset = mmNIC0_QM0_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_0_3: + db_reg_offset = mmNIC0_QM0_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_1_0: + db_reg_offset = mmNIC0_QM1_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_1_1: + db_reg_offset = mmNIC0_QM1_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_1_2: + db_reg_offset = mmNIC0_QM1_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_1_3: + db_reg_offset = mmNIC0_QM1_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_2_0: + db_reg_offset = mmNIC1_QM0_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_2_1: + db_reg_offset = mmNIC1_QM0_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_2_2: + db_reg_offset = mmNIC1_QM0_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_2_3: + db_reg_offset = mmNIC1_QM0_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_3_0: + db_reg_offset = mmNIC1_QM1_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_3_1: + db_reg_offset = mmNIC1_QM1_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_3_2: + db_reg_offset = mmNIC1_QM1_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_3_3: + db_reg_offset = mmNIC1_QM1_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_4_0: + db_reg_offset = mmNIC2_QM0_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_4_1: + db_reg_offset = mmNIC2_QM0_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_4_2: + db_reg_offset = mmNIC2_QM0_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_4_3: + db_reg_offset = mmNIC2_QM0_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_5_0: + db_reg_offset = mmNIC2_QM1_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_5_1: + db_reg_offset = mmNIC2_QM1_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_5_2: + db_reg_offset = mmNIC2_QM1_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_5_3: + db_reg_offset = mmNIC2_QM1_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_6_0: + db_reg_offset = mmNIC3_QM0_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_6_1: + db_reg_offset = mmNIC3_QM0_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_6_2: + db_reg_offset = mmNIC3_QM0_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_6_3: + db_reg_offset = mmNIC3_QM0_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_7_0: + db_reg_offset = mmNIC3_QM1_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_7_1: + db_reg_offset = mmNIC3_QM1_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_7_2: + db_reg_offset = mmNIC3_QM1_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_7_3: + db_reg_offset = mmNIC3_QM1_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_8_0: + db_reg_offset = mmNIC4_QM0_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_8_1: + db_reg_offset = mmNIC4_QM0_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_8_2: + db_reg_offset = mmNIC4_QM0_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_8_3: + db_reg_offset = mmNIC4_QM0_PQ_PI_3; + break; + + case GAUDI_QUEUE_ID_NIC_9_0: + db_reg_offset = mmNIC4_QM1_PQ_PI_0; + break; + + case GAUDI_QUEUE_ID_NIC_9_1: + db_reg_offset = mmNIC4_QM1_PQ_PI_1; + break; + + case GAUDI_QUEUE_ID_NIC_9_2: + db_reg_offset = mmNIC4_QM1_PQ_PI_2; + break; + + case GAUDI_QUEUE_ID_NIC_9_3: + db_reg_offset = mmNIC4_QM1_PQ_PI_3; + break; + default: invalid_queue = true; } @@ -4244,6 +4642,17 @@ static int gaudi_parse_cb_no_ext_queue(struct hl_device *hdev, struct hl_cs_parser *parser) { struct asic_fixed_properties *asic_prop = &hdev->asic_prop; + struct gaudi_device *gaudi = hdev->asic_specific; + u32 nic_mask_q_id = 1 << (HW_CAP_NIC_SHIFT + + ((parser->hw_queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2)); + + if ((parser->hw_queue_id >= GAUDI_QUEUE_ID_NIC_0_0) && + (parser->hw_queue_id <= GAUDI_QUEUE_ID_NIC_9_3) && + (!(gaudi->hw_cap_initialized & nic_mask_q_id))) { + dev_err(hdev->dev, "h/w queue %d is disabled\n", + parser->hw_queue_id); + return -EINVAL; + } /* For internal queue jobs just check if CB address is valid */ if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, @@ -4476,6 +4885,12 @@ static void gaudi_restore_qm_registers(struct hl_device *hdev) qman_offset = i * TPC_QMAN_OFFSET; WREG32(mmTPC0_QM_ARB_CFG_0 + qman_offset, 0); } + + for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) { + qman_offset = (i >> 1) * NIC_MACRO_QMAN_OFFSET + + (i & 0x1) * NIC_ENGINE_QMAN_OFFSET; + WREG32(mmNIC0_QM0_ARB_CFG_0 + qman_offset, 0); + } } static void gaudi_restore_user_registers(struct hl_device *hdev) @@ -4908,6 +5323,136 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid) gaudi_mmu_prepare_reg(hdev, mmMME2_ACC_WBC, asid); gaudi_mmu_prepare_reg(hdev, mmMME3_ACC_WBC, asid); + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC0) { + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC1) { + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC2) { + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC3) { + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC4) { + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC5) { + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC6) { + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC7) { + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC8) { + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_4, + asid); + } + + if (hdev->nic_ports_mask & GAUDI_NIC_MASK_NIC9) { + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_2, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_3, + asid); + gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_4, + asid); + } + hdev->asic_funcs->set_clock_gating(hdev); mutex_unlock(&gaudi->clk_gate_mutex); @@ -5487,6 +6032,56 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type) mmDMA0_QM_ARB_ERR_CAUSE + index * DMA_QMAN_OFFSET; snprintf(desc, ARRAY_SIZE(desc), "%s%d", "DMA_QM", index); break; + case GAUDI_EVENT_NIC0_QM0: + glbl_sts_addr = mmNIC0_QM0_GLBL_STS1_0; + arb_err_addr = mmNIC0_QM0_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC0_QM0"); + break; + case GAUDI_EVENT_NIC0_QM1: + glbl_sts_addr = mmNIC0_QM1_GLBL_STS1_0; + arb_err_addr = mmNIC0_QM1_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC0_QM1"); + break; + case GAUDI_EVENT_NIC1_QM0: + glbl_sts_addr = mmNIC1_QM0_GLBL_STS1_0; + arb_err_addr = mmNIC1_QM0_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC1_QM0"); + break; + case GAUDI_EVENT_NIC1_QM1: + glbl_sts_addr = mmNIC1_QM1_GLBL_STS1_0; + arb_err_addr = mmNIC1_QM1_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC1_QM1"); + break; + case GAUDI_EVENT_NIC2_QM0: + glbl_sts_addr = mmNIC2_QM0_GLBL_STS1_0; + arb_err_addr = mmNIC2_QM0_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC2_QM0"); + break; + case GAUDI_EVENT_NIC2_QM1: + glbl_sts_addr = mmNIC2_QM1_GLBL_STS1_0; + arb_err_addr = mmNIC2_QM1_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC2_QM1"); + break; + case GAUDI_EVENT_NIC3_QM0: + glbl_sts_addr = mmNIC3_QM0_GLBL_STS1_0; + arb_err_addr = mmNIC3_QM0_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC3_QM0"); + break; + case GAUDI_EVENT_NIC3_QM1: + glbl_sts_addr = mmNIC3_QM1_GLBL_STS1_0; + arb_err_addr = mmNIC3_QM1_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC3_QM1"); + break; + case GAUDI_EVENT_NIC4_QM0: + glbl_sts_addr = mmNIC4_QM0_GLBL_STS1_0; + arb_err_addr = mmNIC4_QM0_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC4_QM0"); + break; + case GAUDI_EVENT_NIC4_QM1: + glbl_sts_addr = mmNIC4_QM1_GLBL_STS1_0; + arb_err_addr = mmNIC4_QM1_ARB_ERR_CAUSE; + snprintf(desc, ARRAY_SIZE(desc), "NIC4_QM1"); + break; default: return; } @@ -5864,6 +6459,16 @@ static void gaudi_handle_eqe(struct hl_device *hdev, case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM: case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM: fallthrough; + case GAUDI_EVENT_NIC0_QM0: + case GAUDI_EVENT_NIC0_QM1: + case GAUDI_EVENT_NIC1_QM0: + case GAUDI_EVENT_NIC1_QM1: + case GAUDI_EVENT_NIC2_QM0: + case GAUDI_EVENT_NIC2_QM1: + case GAUDI_EVENT_NIC3_QM0: + case GAUDI_EVENT_NIC3_QM1: + case GAUDI_EVENT_NIC4_QM0: + case GAUDI_EVENT_NIC4_QM1: case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE: gaudi_print_irq_info(hdev, event_type, true); gaudi_handle_qman_err(hdev, event_type); @@ -6097,10 +6702,11 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask, struct gaudi_device *gaudi = hdev->asic_specific; const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n"; const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n"; + const char *nic_fmt = "%-5d%-9s%#-14x%#x\n"; u32 qm_glbl_sts0, qm_cgm_sts, dma_core_sts0, tpc_cfg_sts, mme_arch_sts; bool is_idle = true, is_eng_idle, is_slave; u64 offset; - int i, dma_id; + int i, dma_id, port; mutex_lock(&gaudi->clk_gate_mutex); @@ -6189,6 +6795,45 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask, } } + if (s) + seq_puts(s, "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n" + "--- ------- ------------ ----------\n"); + + for (i = 0 ; i < (NIC_NUMBER_OF_ENGINES / 2) ; i++) { + offset = i * NIC_MACRO_QMAN_OFFSET; + port = 2 * i; + if (hdev->nic_ports_mask & BIT(port)) { + qm_glbl_sts0 = RREG32(mmNIC0_QM0_GLBL_STS0 + offset); + qm_cgm_sts = RREG32(mmNIC0_QM0_CGM_STS + offset); + is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts); + is_idle &= is_eng_idle; + + if (mask) + *mask |= ((u64) !is_eng_idle) << + (GAUDI_ENGINE_ID_NIC_0 + port); + if (s) + seq_printf(s, nic_fmt, port, + is_eng_idle ? "Y" : "N", + qm_glbl_sts0, qm_cgm_sts); + } + + port = 2 * i + 1; + if (hdev->nic_ports_mask & BIT(port)) { + qm_glbl_sts0 = RREG32(mmNIC0_QM1_GLBL_STS0 + offset); + qm_cgm_sts = RREG32(mmNIC0_QM1_CGM_STS + offset); + is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts); + is_idle &= is_eng_idle; + + if (mask) + *mask |= ((u64) !is_eng_idle) << + (GAUDI_ENGINE_ID_NIC_0 + port); + if (s) + seq_printf(s, nic_fmt, port, + is_eng_idle ? "Y" : "N", + qm_glbl_sts0, qm_cgm_sts); + } + } + if (s) seq_puts(s, "\n"); @@ -6583,8 +7228,8 @@ static u32 gaudi_add_mon_pkts(void *buf, u16 mon_id, u64 fence_addr) return size; } -u32 gaudi_gen_wait_cb(struct hl_device *hdev, - struct hl_gen_wait_properties *prop) +static u32 gaudi_gen_wait_cb(struct hl_device *hdev, + struct hl_gen_wait_properties *prop) { struct hl_cb *cb = (struct hl_cb *) prop->data; void *buf = cb->kernel_address; diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index 8eb598db81b2..277c391272ac 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -79,6 +79,7 @@ #define TPC_QMAN_OFFSET (mmTPC1_QM_BASE - mmTPC0_QM_BASE) #define MME_QMAN_OFFSET (mmMME1_QM_BASE - mmMME0_QM_BASE) #define NIC_MACRO_QMAN_OFFSET (mmNIC1_QM0_BASE - mmNIC0_QM0_BASE) +#define NIC_ENGINE_QMAN_OFFSET (mmNIC0_QM1_BASE - mmNIC0_QM0_BASE) #define TPC_CFG_OFFSET (mmTPC1_CFG_BASE - mmTPC0_CFG_BASE) @@ -140,6 +141,10 @@ #define TPC_QMAN_LENGTH 1024 #define TPC_QMAN_SIZE_IN_BYTES (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE) +#define NIC_QMAN_LENGTH 1024 +#define NIC_QMAN_SIZE_IN_BYTES (NIC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE) + + #define SRAM_USER_BASE_OFFSET GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START /* Virtual address space */ @@ -161,6 +166,19 @@ #define HW_CAP_SRAM_SCRAMBLER BIT(10) #define HW_CAP_HBM_SCRAMBLER BIT(11) +#define HW_CAP_NIC0 BIT(14) +#define HW_CAP_NIC1 BIT(15) +#define HW_CAP_NIC2 BIT(16) +#define HW_CAP_NIC3 BIT(17) +#define HW_CAP_NIC4 BIT(18) +#define HW_CAP_NIC5 BIT(19) +#define HW_CAP_NIC6 BIT(20) +#define HW_CAP_NIC7 BIT(21) +#define HW_CAP_NIC8 BIT(22) +#define HW_CAP_NIC9 BIT(23) +#define HW_CAP_NIC_MASK GENMASK(23, 14) +#define HW_CAP_NIC_SHIFT 14 + #define HW_CAP_TPC0 BIT(24) #define HW_CAP_TPC1 BIT(25) #define HW_CAP_TPC2 BIT(26) @@ -208,6 +226,20 @@ enum gaudi_tpc_mask { GAUDI_TPC_MASK_ALL = 0xFF }; +enum gaudi_nic_mask { + GAUDI_NIC_MASK_NIC0 = 0x01, + GAUDI_NIC_MASK_NIC1 = 0x02, + GAUDI_NIC_MASK_NIC2 = 0x04, + GAUDI_NIC_MASK_NIC3 = 0x08, + GAUDI_NIC_MASK_NIC4 = 0x10, + GAUDI_NIC_MASK_NIC5 = 0x20, + GAUDI_NIC_MASK_NIC6 = 0x40, + GAUDI_NIC_MASK_NIC7 = 0x80, + GAUDI_NIC_MASK_NIC8 = 0x100, + GAUDI_NIC_MASK_NIC9 = 0x200, + GAUDI_NIC_MASK_ALL = 0x3FF +}; + /** * struct gaudi_internal_qman_info - Internal QMAN information. * @pq_kernel_addr: Kernel address of the PQ memory area in the host. -- cgit v1.2.3 From 4bb1f2f3fb31ed60a23064a8fc4d5ecde5d1002d Mon Sep 17 00:00:00 2001 From: Tal Cohen Date: Wed, 3 Jun 2020 09:25:27 +0300 Subject: habanalabs: use enum for CB allocation options In the future there will be situations where queues can accept either kernel allocated CBs or user allocated CBs, depending on different states. Therefore, instead of using a boolean variable of kernel/user allocated CB, we need to use a bitmask to indicate that, which will allow to combine the two options. Add a flag to the uapi so the user will be able to indicate whether the CB was allocated by kernel or by user. Of course the driver validates that. Signed-off-by: Tal Cohen Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 31 ++++++++++++++++++++-- drivers/misc/habanalabs/common/habanalabs.h | 19 ++++++++++--- drivers/misc/habanalabs/gaudi/gaudi.c | 13 ++++++--- drivers/misc/habanalabs/goya/goya.c | 6 ++--- include/uapi/misc/habanalabs.h | 16 +++++++++++ 5 files changed, 73 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 0e37aad85930..cd3422bfe6f8 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -568,9 +568,36 @@ static int validate_queue_index(struct hl_device *hdev, return -EINVAL; } - *queue_type = hw_queue_prop->type; - *is_kernel_allocated_cb = !!hw_queue_prop->requires_kernel_cb; + /* When hw queue type isn't QUEUE_TYPE_HW, + * USER_ALLOC_CB flag shall be referred as "don't care". + */ + if (hw_queue_prop->type == QUEUE_TYPE_HW) { + if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) { + if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) { + dev_err(hdev->dev, + "Queue index %d doesn't support user CB\n", + chunk->queue_index); + return -EINVAL; + } + *is_kernel_allocated_cb = false; + } else { + if (!(hw_queue_prop->cb_alloc_flags & + CB_ALLOC_KERNEL)) { + dev_err(hdev->dev, + "Queue index %d doesn't support kernel CB\n", + chunk->queue_index); + return -EINVAL; + } + + *is_kernel_allocated_cb = true; + } + } else { + *is_kernel_allocated_cb = !!(hw_queue_prop->cb_alloc_flags + & CB_ALLOC_KERNEL); + } + + *queue_type = hw_queue_prop->type; return 0; } diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index b5a34936e22d..0823798f292e 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -206,6 +206,17 @@ struct hl_outbound_pci_region { u64 size; }; +/* + * enum queue_cb_alloc_flags - Indicates queue support for CBs that + * allocated by Kernel or by User + * @CB_ALLOC_KERNEL: support only CBs that allocated by Kernel + * @CB_ALLOC_USER: support only CBs that allocated by User + */ +enum queue_cb_alloc_flags { + CB_ALLOC_KERNEL = 0x1, + CB_ALLOC_USER = 0x2 +}; + /* * struct hl_hw_sob - H/W SOB info. * @hdev: habanalabs device structure. @@ -223,16 +234,18 @@ struct hl_hw_sob { /** * struct hw_queue_properties - queue information. * @type: queue type. + * @queue_cb_alloc_flags: bitmap which indicates if the hw queue supports CB + * that allocated by the Kernel driver and therefore, + * a CB handle can be provided for jobs on this queue. + * Otherwise, a CB address must be provided. * @driver_only: true if only the driver is allowed to send a job to this queue, * false otherwise. - * @requires_kernel_cb: true if a CB handle must be provided for jobs on this - * queue, false otherwise (a CB address must be provided). * @supports_sync_stream: True if queue supports sync stream */ struct hw_queue_properties { enum hl_queue_type type; + enum queue_cb_alloc_flags cb_alloc_flags; u8 driver_only; - u8 requires_kernel_cb; u8 supports_sync_stream; }; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 2dd9b732299a..9393e34b9719 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -381,23 +381,28 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) if (gaudi_queue_type[i] == QUEUE_TYPE_EXT) { prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; prop->hw_queues_props[i].driver_only = 0; - prop->hw_queues_props[i].requires_kernel_cb = 1; prop->hw_queues_props[i].supports_sync_stream = 1; + prop->hw_queues_props[i].cb_alloc_flags = + CB_ALLOC_KERNEL; num_sync_stream_queues++; } else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) { prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; prop->hw_queues_props[i].driver_only = 1; - prop->hw_queues_props[i].requires_kernel_cb = 0; prop->hw_queues_props[i].supports_sync_stream = 0; + prop->hw_queues_props[i].cb_alloc_flags = + CB_ALLOC_KERNEL; } else if (gaudi_queue_type[i] == QUEUE_TYPE_INT) { prop->hw_queues_props[i].type = QUEUE_TYPE_INT; prop->hw_queues_props[i].driver_only = 0; - prop->hw_queues_props[i].requires_kernel_cb = 0; + prop->hw_queues_props[i].supports_sync_stream = 0; + prop->hw_queues_props[i].cb_alloc_flags = + CB_ALLOC_USER; } else if (gaudi_queue_type[i] == QUEUE_TYPE_NA) { prop->hw_queues_props[i].type = QUEUE_TYPE_NA; prop->hw_queues_props[i].driver_only = 0; - prop->hw_queues_props[i].requires_kernel_cb = 0; prop->hw_queues_props[i].supports_sync_stream = 0; + prop->hw_queues_props[i].cb_alloc_flags = + CB_ALLOC_USER; } } diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index d873f613acb0..74c44278166b 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -373,20 +373,20 @@ int goya_get_fixed_properties(struct hl_device *hdev) for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) { prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; prop->hw_queues_props[i].driver_only = 0; - prop->hw_queues_props[i].requires_kernel_cb = 1; + prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL; } for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) { prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; prop->hw_queues_props[i].driver_only = 1; - prop->hw_queues_props[i].requires_kernel_cb = 0; + prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL; } for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES + NUMBER_OF_INT_HW_QUEUES; i++) { prop->hw_queues_props[i].type = QUEUE_TYPE_INT; prop->hw_queues_props[i].driver_only = 0; - prop->hw_queues_props[i].requires_kernel_cb = 0; + prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER; } prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 9705b8adb60c..5753157e71b3 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -490,6 +490,22 @@ union hl_cb_args { struct hl_cb_out out; }; +/* HL_CS_CHUNK_FLAGS_ values + * + * HL_CS_CHUNK_FLAGS_USER_ALLOC_CB: + * Indicates if the CB was allocated and mapped by userspace. + * User allocated CB is a command buffer allocated by the user, via malloc + * (or similar). After allocating the CB, the user invokes “memory ioctl” + * to map the user memory into a device virtual address. The user provides + * this address via the cb_handle field. The interface provides the + * ability to create a large CBs, Which aren’t limited to + * “HL_MAX_CB_SIZE”. Therefore, it increases the PCI-DMA queues + * throughput. This CB allocation method also reduces the use of Linux + * DMA-able memory pool. Which are limited and used by other Linux + * sub-systems. + */ +#define HL_CS_CHUNK_FLAGS_USER_ALLOC_CB 0x1 + /* * This structure size must always be fixed to 64-bytes for backward * compatibility -- cgit v1.2.3 From 5fe1c17ddf2e5e5d05e983b56ebbc0d1c702b16a Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 10 Sep 2020 10:10:55 +0300 Subject: habanalabs: sync stream collective infrastructure Define new API for collective wait support and modify sync stream common flow. In addition add kernel CB allocation support for internal queues. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 38 ++++++++++++--- drivers/misc/habanalabs/common/habanalabs.h | 55 +++++++++++++++++++--- drivers/misc/habanalabs/common/hw_queue.c | 44 +++++++++++++++-- drivers/misc/habanalabs/gaudi/gaudi.c | 22 ++++++++- drivers/misc/habanalabs/goya/goya.c | 22 ++++++++- include/uapi/misc/habanalabs.h | 17 +++++-- 6 files changed, 176 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index cd3422bfe6f8..2dbd42b6ad0c 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -85,7 +85,8 @@ static void hl_fence_release(struct kref *kref) goto free; if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) || - (hl_cs_cmpl->type == CS_TYPE_WAIT)) { + (hl_cs_cmpl->type == CS_TYPE_WAIT) || + (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)) { dev_dbg(hdev->dev, "CS 0x%llx type %d finished, sob_id: %d, sob_val: 0x%x\n", @@ -112,6 +113,10 @@ static void hl_fence_release(struct kref *kref) * hence the above scenario is avoided. */ kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset); + + if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) + hdev->asic_funcs->reset_sob_group(hdev, + hl_cs_cmpl->sob_group); } free: @@ -247,9 +252,11 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) /* For H/W queue jobs, if a user CB was allocated by driver and MMU is * enabled, the user CB isn't released in cs_parser() and thus should be * released here. + * This is also true for INT queues jobs which were allocated by driver */ - if (job->queue_type == QUEUE_TYPE_HW && - job->is_kernel_allocated_cb && hdev->mmu_enable) { + if (job->is_kernel_allocated_cb && + ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) || + job->queue_type == QUEUE_TYPE_INT)) { spin_lock(&job->user_cb->lock); job->user_cb->cs_cnt--; spin_unlock(&job->user_cb->lock); @@ -931,7 +938,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, struct hl_cs_compl *sig_waitcs_cmpl; struct hl_cs *cs; enum hl_queue_type q_type; - u32 size_to_copy, q_idx; + u32 size_to_copy, q_idx, collective_engine_id; u64 signal_seq; int rc; @@ -981,7 +988,18 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, goto free_cs_chunk_array; } - if (cs_type == CS_TYPE_WAIT) { + if (cs_type == CS_TYPE_COLLECTIVE_WAIT) { + if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) { + dev_err(hdev->dev, + "Queue index %d is invalid\n", q_idx); + rc = -EINVAL; + goto free_cs_chunk_array; + } + + collective_engine_id = chunk->collective_engine_id; + } + + if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) { rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq); if (rc) goto free_cs_chunk_array; @@ -1026,7 +1044,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, rc = allocate_cs(hdev, ctx, cs_type, &cs); if (rc) { - if (cs_type == CS_TYPE_WAIT) + if (cs_type == CS_TYPE_WAIT || + cs_type == CS_TYPE_COLLECTIVE_WAIT) hl_fence_put(sig_fence); hl_ctx_put(ctx); goto free_cs_chunk_array; @@ -1036,7 +1055,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, * Save the signal CS fence for later initialization right before * hanging the wait CS on the queue. */ - if (cs_type == CS_TYPE_WAIT) + if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) cs->signal_fence = sig_fence; hl_debugfs_add_cs(cs); @@ -1046,6 +1065,9 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL) rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type, q_idx); + else + rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx, + cs, q_idx, collective_engine_id); if (rc) goto put_cs; @@ -1120,6 +1142,8 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) cs_type = CS_TYPE_SIGNAL; else if (args->in.cs_flags & HL_CS_FLAGS_WAIT) cs_type = CS_TYPE_WAIT; + else if (args->in.cs_flags & HL_CS_FLAGS_COLLECTIVE_WAIT) + cs_type = CS_TYPE_COLLECTIVE_WAIT; else cs_type = CS_TYPE_DEFAULT; diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 0823798f292e..98249a2c97e7 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -68,6 +68,11 @@ #define HL_RSVD_SOBS 4 #define HL_RSVD_MONS 2 +/* + * HL_COLLECTIVE_RSVD_MSTR_MONS 'collective' reserved monitors per QMAN stream + */ +#define HL_COLLECTIVE_RSVD_MSTR_MONS 2 + #define HL_MAX_SOB_VAL (1 << 15) #define IS_POWER_OF_2(n) (n != 0 && ((n & (n - 1)) == 0)) @@ -177,7 +182,8 @@ enum hl_queue_type { enum hl_cs_type { CS_TYPE_DEFAULT, CS_TYPE_SIGNAL, - CS_TYPE_WAIT + CS_TYPE_WAIT, + CS_TYPE_COLLECTIVE_WAIT }; /* @@ -231,6 +237,12 @@ struct hl_hw_sob { u32 q_idx; }; +enum hl_collective_mode { + HL_COLLECTIVE_NOT_SUPPORTED = 0x0, + HL_COLLECTIVE_MASTER = 0x1, + HL_COLLECTIVE_SLAVE = 0x2 +}; + /** * struct hw_queue_properties - queue information. * @type: queue type. @@ -238,6 +250,7 @@ struct hl_hw_sob { * that allocated by the Kernel driver and therefore, * a CB handle can be provided for jobs on this queue. * Otherwise, a CB address must be provided. + * @collective_mode: collective mode of current queue * @driver_only: true if only the driver is allowed to send a job to this queue, * false otherwise. * @supports_sync_stream: True if queue supports sync stream @@ -245,6 +258,7 @@ struct hl_hw_sob { struct hw_queue_properties { enum hl_queue_type type; enum queue_cb_alloc_flags cb_alloc_flags; + enum hl_collective_mode collective_mode; u8 driver_only; u8 supports_sync_stream; }; @@ -358,6 +372,8 @@ struct hl_mmu_properties { * @cb_pool_cb_size: size of each CB in the CB pool. * @max_pending_cs: maximum of concurrent pending command submissions * @max_queues: maximum amount of queues in the system + * @collective_first_sob: first sync object available for collective use + * @collective_first_mon: first monitor available for collective use * @sync_stream_first_sob: first sync object available for sync stream use * @sync_stream_first_mon: first monitor available for sync stream use * @first_available_user_sob: first sob available for the user @@ -410,6 +426,8 @@ struct asic_fixed_properties { u32 cb_pool_cb_size; u32 max_pending_cs; u32 max_queues; + u16 collective_first_sob; + u16 collective_first_mon; u16 sync_stream_first_sob; u16 sync_stream_first_mon; u16 first_available_user_sob[HL_MAX_DCORES]; @@ -441,6 +459,7 @@ struct hl_fence { * @cs_seq: command submission sequence number. * @type: type of the CS - signal/wait. * @sob_val: the SOB value that is used in this signal/wait CS. + * @sob_group: the SOB group that is used in this collective wait CS. */ struct hl_cs_compl { struct hl_fence base_fence; @@ -450,6 +469,7 @@ struct hl_cs_compl { u64 cs_seq; enum hl_cs_type type; u16 sob_val; + u16 sob_group; }; /* @@ -512,6 +532,7 @@ struct hl_cb { * QUEUES */ +struct hl_cs; struct hl_cs_job; /* Queue length of external and HW queues */ @@ -540,15 +561,24 @@ struct hl_cs_job; * @next_sob_val: the next value to use for the currently used SOB. * @base_sob_id: the base SOB id of the SOBs used by this queue. * @base_mon_id: the base MON id of the MONs used by this queue. + * @collective_mstr_mon_id: the MON ids of the MONs used by this master queue + * in order to sync with all slave queues. + * @collective_slave_mon_id: the MON id used by this slave queue in order to + * sync with its master queue. + * @collective_sob_id: current SOB id used by this collective slave queue + * to signal its collective master queue upon completion. * @curr_sob_offset: the id offset to the currently used SOB from the * HL_RSVD_SOBS that are being used by this queue. */ struct hl_sync_stream_properties { - struct hl_hw_sob hw_sob[HL_RSVD_SOBS]; - u16 next_sob_val; - u16 base_sob_id; - u16 base_mon_id; - u8 curr_sob_offset; + struct hl_hw_sob hw_sob[HL_RSVD_SOBS]; + u16 next_sob_val; + u16 base_sob_id; + u16 base_mon_id; + u16 collective_mstr_mon_id[HL_COLLECTIVE_RSVD_MSTR_MONS]; + u16 collective_slave_mon_id; + u16 collective_sob_id; + u8 curr_sob_offset; }; /** @@ -556,6 +586,7 @@ struct hl_sync_stream_properties { * @shadow_queue: pointer to a shadow queue that holds pointers to jobs. * @sync_stream_prop: sync stream queue properties * @queue_type: type of queue. + * @collective_mode: collective mode of current queue * @kernel_address: holds the queue's kernel virtual address. * @bus_address: holds the queue's DMA address. * @pi: holds the queue's pi value. @@ -572,6 +603,7 @@ struct hl_hw_queue { struct hl_cs_job **shadow_queue; struct hl_sync_stream_properties sync_stream_prop; enum hl_queue_type queue_type; + enum hl_collective_mode collective_mode; void *kernel_address; dma_addr_t bus_address; u32 pi; @@ -764,9 +796,13 @@ enum div_select_defs { * @gen_signal_cb: Generate a signal CB. * @gen_wait_cb: Generate a wait CB. * @reset_sob: Reset a SOB. + * @reset_sob_group: Reset SOB group * @set_dma_mask_from_fw: set the DMA mask in the driver according to the * firmware configuration * @get_device_time: Get the device time. + * @collective_wait_init_cs: Generate collective master/slave packets + * and place them in the relevant cs jobs + * @collective_wait_create_jobs: allocate collective wait cs jobs */ struct hl_asic_funcs { int (*early_init)(struct hl_device *hdev); @@ -868,8 +904,13 @@ struct hl_asic_funcs { u32 (*gen_wait_cb)(struct hl_device *hdev, struct hl_gen_wait_properties *prop); void (*reset_sob)(struct hl_device *hdev, void *data); + void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group); void (*set_dma_mask_from_fw)(struct hl_device *hdev); u64 (*get_device_time)(struct hl_device *hdev); + void (*collective_wait_init_cs)(struct hl_cs *cs); + int (*collective_wait_create_jobs)(struct hl_device *hdev, + struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id, + u32 collective_engine_id); }; @@ -1656,6 +1697,7 @@ struct hl_mmu_funcs { * @stop_on_err: true if engines should stop on error. * @supports_sync_stream: is sync stream supported. * @sync_stream_queue_idx: helper index for sync stream queues initialization. + * @collective_mon_idx: helper index for collective initialization * @supports_coresight: is CoreSight supported. * @supports_soft_reset: is soft reset supported. * @supports_cb_mapping: is mapping a CB to the device's MMU supported. @@ -1756,6 +1798,7 @@ struct hl_device { u8 stop_on_err; u8 supports_sync_stream; u8 sync_stream_queue_idx; + u8 collective_mon_idx; u8 supports_coresight; u8 supports_soft_reset; u8 supports_cb_mapping; diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index be1d0e2c99d8..d9448375beac 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -333,7 +333,14 @@ static void int_queue_schedule_job(struct hl_cs_job *job) bd.ctl = 0; bd.len = cpu_to_le32(job->job_cb_size); - bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb); + + if (job->is_kernel_allocated_cb) + /* bus_address is actually a mmu mapped address + * allocated from an internal pool + */ + bd.ptr = cpu_to_le64(job->user_cb->bus_address); + else + bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb); pi = q->kernel_address + (q->pi & (q->int_queue_len - 1)) * sizeof(bd); @@ -562,6 +569,8 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) if ((cs->type == CS_TYPE_SIGNAL) || (cs->type == CS_TYPE_WAIT)) init_signal_wait_cs(cs); + else if (cs->type == CS_TYPE_COLLECTIVE_WAIT) + hdev->asic_funcs->collective_wait_init_cs(cs); spin_lock(&hdev->hw_queues_mirror_lock); list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list); @@ -741,12 +750,40 @@ static void sync_stream_queue_init(struct hl_device *hdev, u32 q_idx) struct hl_sync_stream_properties *sync_stream_prop; struct asic_fixed_properties *prop = &hdev->asic_prop; struct hl_hw_sob *hw_sob; - int sob, queue_idx; + int sob, reserved_mon_idx, queue_idx; + + sync_stream_prop = &hdev->kernel_queues[q_idx].sync_stream_prop; + + /* We use 'collective_mon_idx' as a running index in order to reserve + * monitors for collective master/slave queues. + * collective master queue gets 2 reserved monitors + * collective slave queue gets 1 reserved monitor + */ + if (hdev->kernel_queues[q_idx].collective_mode == + HL_COLLECTIVE_MASTER) { + reserved_mon_idx = hdev->collective_mon_idx; + + /* reserve the first monitor for collective master queue */ + sync_stream_prop->collective_mstr_mon_id[0] = + prop->collective_first_mon + reserved_mon_idx; + + /* reserve the second monitor for collective master queue */ + sync_stream_prop->collective_mstr_mon_id[1] = + prop->collective_first_mon + reserved_mon_idx + 1; + + hdev->collective_mon_idx += HL_COLLECTIVE_RSVD_MSTR_MONS; + } else if (hdev->kernel_queues[q_idx].collective_mode == + HL_COLLECTIVE_SLAVE) { + reserved_mon_idx = hdev->collective_mon_idx++; + + /* reserve a monitor for collective slave queue */ + sync_stream_prop->collective_slave_mon_id = + prop->collective_first_mon + reserved_mon_idx; + } if (!hdev->kernel_queues[q_idx].supports_sync_stream) return; - sync_stream_prop = &hdev->kernel_queues[q_idx].sync_stream_prop; queue_idx = hdev->sync_stream_queue_idx++; sync_stream_prop->base_sob_id = prop->sync_stream_first_sob + @@ -897,6 +934,7 @@ int hl_hw_queues_create(struct hl_device *hdev) q->queue_type = asic->hw_queues_props[i].type; q->supports_sync_stream = asic->hw_queues_props[i].supports_sync_stream; + q->collective_mode = asic->hw_queues_props[i].collective_mode; rc = queue_init(hdev, q, i); if (rc) { dev_err(hdev->dev, diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 9393e34b9719..d4218c4b4887 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -793,6 +793,23 @@ out: return rc; } +static void gaudi_reset_sob_group(struct hl_device *hdev, u16 sob_groupt) +{ + +} + +static void gaudi_collective_wait_init_cs(struct hl_cs *cs) +{ + +} + +static int gaudi_collective_wait_create_jobs(struct hl_device *hdev, + struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id, + u32 collective_engine_id) +{ + return -EINVAL; +} + static int gaudi_late_init(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; @@ -7358,8 +7375,11 @@ static const struct hl_asic_funcs gaudi_funcs = { .gen_signal_cb = gaudi_gen_signal_cb, .gen_wait_cb = gaudi_gen_wait_cb, .reset_sob = gaudi_reset_sob, + .reset_sob_group = gaudi_reset_sob_group, .set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw, - .get_device_time = gaudi_get_device_time + .get_device_time = gaudi_get_device_time, + .collective_wait_init_cs = gaudi_collective_wait_init_cs, + .collective_wait_create_jobs = gaudi_collective_wait_create_jobs }; /** diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 74c44278166b..9332580b038d 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5305,6 +5305,11 @@ static void goya_reset_sob(struct hl_device *hdev, void *data) } +void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group) +{ + +} + static void goya_set_dma_mask_from_fw(struct hl_device *hdev) { if (RREG32(mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0) == @@ -5326,6 +5331,18 @@ u64 goya_get_device_time(struct hl_device *hdev) return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL); } +void goya_collective_wait_init_cs(struct hl_cs *cs) +{ + +} + +int goya_collective_wait_create_jobs(struct hl_device *hdev, + struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id, + u32 collective_engine_id) +{ + return -EINVAL; +} + static const struct hl_asic_funcs goya_funcs = { .early_init = goya_early_init, .early_fini = goya_early_fini, @@ -5397,8 +5414,11 @@ static const struct hl_asic_funcs goya_funcs = { .gen_signal_cb = goya_gen_signal_cb, .gen_wait_cb = goya_gen_wait_cb, .reset_sob = goya_reset_sob, + .reset_sob_group = goya_reset_sob_group, .set_dma_mask_from_fw = goya_set_dma_mask_from_fw, - .get_device_time = goya_get_device_time + .get_device_time = goya_get_device_time, + .collective_wait_init_cs = goya_collective_wait_init_cs, + .collective_wait_create_jobs = goya_collective_wait_create_jobs }; /* diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 5753157e71b3..2b244d0bdc26 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -523,7 +523,8 @@ struct hl_cs_chunk { */ __u64 cb_handle; - /* Relevant only when HL_CS_FLAGS_WAIT is set. + /* Relevant only when HL_CS_FLAGS_WAIT or + * HL_CS_FLAGS_COLLECTIVE_WAIT is set. * This holds address of array of u64 values that contain * signal CS sequence numbers. The wait described by this job * will listen on all those signals (wait event per signal) @@ -541,7 +542,8 @@ struct hl_cs_chunk { */ __u32 cb_size; - /* Relevant only when HL_CS_FLAGS_WAIT is set. + /* Relevant only when HL_CS_FLAGS_WAIT or + * HL_CS_FLAGS_COLLECTIVE_WAIT is set. * Number of entries in signal_seq_arr */ __u32 num_signal_seq_arr; @@ -550,14 +552,21 @@ struct hl_cs_chunk { /* HL_CS_CHUNK_FLAGS_* */ __u32 cs_chunk_flags; + /* Relevant only when HL_CS_FLAGS_COLLECTIVE_WAIT is set. + * This holds the collective engine ID. The wait described by this job + * will sync with this engine and with all NICs before completion. + */ + __u32 collective_engine_id; + /* Align structure to 64 bytes */ - __u32 pad[11]; + __u32 pad[10]; }; -/* SIGNAL and WAIT flags are mutually exclusive */ +/* SIGNAL and WAIT/COLLECTIVE_WAIT flags are mutually exclusive */ #define HL_CS_FLAGS_FORCE_RESTORE 0x1 #define HL_CS_FLAGS_SIGNAL 0x2 #define HL_CS_FLAGS_WAIT 0x4 +#define HL_CS_FLAGS_COLLECTIVE_WAIT 0x8 #define HL_CS_STATUS_SUCCESS 0 -- cgit v1.2.3 From 0940cabafde98466bec8ba32dd567f0ec060478d Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Mon, 31 Aug 2020 08:52:56 +0300 Subject: habanalabs/gaudi: Set DMA5 QMAN internal DMA5 QMAN is designated to be used for reduction process, hence it will be no longer configured as external queue. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 31 +++++++++++++------------------ drivers/misc/habanalabs/gaudi/gaudiP.h | 8 ++++---- include/uapi/misc/habanalabs.h | 12 ++++++------ 3 files changed, 23 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index d4218c4b4887..7e1557f8a73f 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -38,7 +38,7 @@ * * MMU is always enabled. * - * QMAN DMA channels 0,1,5 (PCI DMAN): + * QMAN DMA channels 0,1 (PCI DMAN): * - DMA is not secured. * - PQ and CQ are secured. * - CP is secured: The driver needs to parse CB but WREG should be allowed @@ -55,7 +55,7 @@ * idle) * - MMU page tables area clear (happens on init) * - * QMAN DMA 2-4,6,7, TPC, MME, NIC: + * QMAN DMA 2-7, TPC, MME, NIC: * PQ is secured and is located on the Host (HBM CON TPC3 bug) * CQ, CP and the engine are not secured * @@ -113,12 +113,12 @@ static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = { [GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0, [GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1, - [GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5, [GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2, [GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3, [GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4, - [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6, - [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7 + [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_5, + [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_6, + [GAUDI_HBM_DMA_6] = GAUDI_ENGINE_ID_DMA_7 }; static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = { @@ -130,10 +130,6 @@ static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = { [5] = GAUDI_QUEUE_ID_DMA_1_1, [6] = GAUDI_QUEUE_ID_DMA_1_2, [7] = GAUDI_QUEUE_ID_DMA_1_3, - [8] = GAUDI_QUEUE_ID_DMA_5_0, - [9] = GAUDI_QUEUE_ID_DMA_5_1, - [10] = GAUDI_QUEUE_ID_DMA_5_2, - [11] = GAUDI_QUEUE_ID_DMA_5_3 }; static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = { @@ -249,10 +245,10 @@ static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = { QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_1 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_2 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_3 */ - QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_0 */ - QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_1 */ - QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_2 */ - QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_5_3 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_0 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_1 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_2 */ + QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_3 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_0 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_1 */ QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_2 */ @@ -978,8 +974,7 @@ static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev) q = &gaudi->internal_qmans[i]; switch (i) { - case GAUDI_QUEUE_ID_DMA_2_0 ... GAUDI_QUEUE_ID_DMA_4_3: - case GAUDI_QUEUE_ID_DMA_6_0 ... GAUDI_QUEUE_ID_DMA_7_3: + case GAUDI_QUEUE_ID_DMA_2_0 ... GAUDI_QUEUE_ID_DMA_7_3: q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES; break; case GAUDI_QUEUE_ID_MME_0_0 ... GAUDI_QUEUE_ID_MME_1_3: @@ -3424,21 +3419,21 @@ static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) break; case GAUDI_QUEUE_ID_DMA_5_0...GAUDI_QUEUE_ID_DMA_5_3: - dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_3]; + dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_4]; dma_qm_offset = dma_id * DMA_QMAN_OFFSET; q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; break; case GAUDI_QUEUE_ID_DMA_6_0...GAUDI_QUEUE_ID_DMA_6_3: - dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_4]; + dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_5]; dma_qm_offset = dma_id * DMA_QMAN_OFFSET; q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; break; case GAUDI_QUEUE_ID_DMA_7_0...GAUDI_QUEUE_ID_DMA_7_3: - dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_5]; + dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_6]; dma_qm_offset = dma_id * DMA_QMAN_OFFSET; q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off; diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index 277c391272ac..16871d9ff88b 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -15,7 +15,7 @@ #include "../include/gaudi/gaudi.h" #include "../include/gaudi/gaudi_async_events.h" -#define NUMBER_OF_EXT_HW_QUEUES 12 +#define NUMBER_OF_EXT_HW_QUEUES 8 #define NUMBER_OF_CMPLT_QUEUES NUMBER_OF_EXT_HW_QUEUES #define NUMBER_OF_CPU_HW_QUEUES 1 #define NUMBER_OF_INT_HW_QUEUES 100 @@ -62,8 +62,8 @@ #error "GAUDI_MAX_PENDING_CS must be power of 2 and greater than 1" #endif -#define PCI_DMA_NUMBER_OF_CHNLS 3 -#define HBM_DMA_NUMBER_OF_CHNLS 5 +#define PCI_DMA_NUMBER_OF_CHNLS 2 +#define HBM_DMA_NUMBER_OF_CHNLS 6 #define DMA_NUMBER_OF_CHNLS (PCI_DMA_NUMBER_OF_CHNLS + \ HBM_DMA_NUMBER_OF_CHNLS) @@ -205,12 +205,12 @@ enum gaudi_dma_channels { GAUDI_PCI_DMA_1, GAUDI_PCI_DMA_2, - GAUDI_PCI_DMA_3, GAUDI_HBM_DMA_1, GAUDI_HBM_DMA_2, GAUDI_HBM_DMA_3, GAUDI_HBM_DMA_4, GAUDI_HBM_DMA_5, + GAUDI_HBM_DMA_6, GAUDI_DMA_MAX }; diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 2b244d0bdc26..4661a74f0425 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -18,8 +18,8 @@ #define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */ #define GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START 0x80 /* 128 bytes */ -#define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT 48 -#define GAUDI_FIRST_AVAILABLE_W_S_MONITOR 24 +#define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT 32 +#define GAUDI_FIRST_AVAILABLE_W_S_MONITOR 16 /* * Goya queue Numbering * @@ -76,10 +76,10 @@ enum gaudi_queue_id { GAUDI_QUEUE_ID_DMA_4_1 = 18, /* internal */ GAUDI_QUEUE_ID_DMA_4_2 = 19, /* internal */ GAUDI_QUEUE_ID_DMA_4_3 = 20, /* internal */ - GAUDI_QUEUE_ID_DMA_5_0 = 21, /* external */ - GAUDI_QUEUE_ID_DMA_5_1 = 22, /* external */ - GAUDI_QUEUE_ID_DMA_5_2 = 23, /* external */ - GAUDI_QUEUE_ID_DMA_5_3 = 24, /* external */ + GAUDI_QUEUE_ID_DMA_5_0 = 21, /* internal */ + GAUDI_QUEUE_ID_DMA_5_1 = 22, /* internal */ + GAUDI_QUEUE_ID_DMA_5_2 = 23, /* internal */ + GAUDI_QUEUE_ID_DMA_5_3 = 24, /* internal */ GAUDI_QUEUE_ID_DMA_6_0 = 25, /* internal */ GAUDI_QUEUE_ID_DMA_6_1 = 26, /* internal */ GAUDI_QUEUE_ID_DMA_6_2 = 27, /* internal */ -- cgit v1.2.3 From 5de406c0b5c747c40277861ecf204ebfa095caa5 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 10 Sep 2020 10:56:26 +0300 Subject: habanalabs: sync stream collective support Implement sync stream collective for GAUDI. Need to allocate additional resources for that and add ctx_fini() to clean up those resources. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 11 +- drivers/misc/habanalabs/common/context.c | 1 + drivers/misc/habanalabs/common/habanalabs.h | 7 +- drivers/misc/habanalabs/gaudi/gaudi.c | 846 +++++++++++++++++++-- drivers/misc/habanalabs/gaudi/gaudiP.h | 40 +- drivers/misc/habanalabs/goya/goya.c | 6 + include/uapi/misc/habanalabs.h | 14 +- 7 files changed, 862 insertions(+), 63 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 2dbd42b6ad0c..5ece52588ec6 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -142,7 +142,7 @@ static void hl_fence_init(struct hl_fence *fence) init_completion(&fence->completion); } -static void cs_get(struct hl_cs *cs) +void cs_get(struct hl_cs *cs) { kref_get(&cs->refcount); } @@ -917,6 +917,9 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev, job->job_cb_size = job->user_cb_size; hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + /* increment refcount as for external queues we get completion */ + cs_get(cs); + cs->jobs_in_queue_cnt[job->hw_queue_id]++; list_add_tail(&job->cs_node, &cs->job_list); @@ -1070,11 +1073,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, cs, q_idx, collective_engine_id); if (rc) - goto put_cs; - - - /* increment refcount as for external queues we get completion */ - cs_get(cs); + goto free_cs_object; rc = hl_hw_queue_schedule_cs(cs); if (rc) { diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c index 7a59dd7c6450..2077bbe3606a 100644 --- a/drivers/misc/habanalabs/common/context.c +++ b/drivers/misc/habanalabs/common/context.c @@ -40,6 +40,7 @@ static void hl_ctx_fini(struct hl_ctx *ctx) if ((hdev->in_debug) && (hdev->compute_ctx == ctx)) hl_device_set_debug_mode(hdev, false); + hdev->asic_funcs->ctx_fini(ctx); hl_cb_va_pool_fini(ctx); hl_vm_ctx_fini(ctx); hl_asid_free(hdev, ctx->asid); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 98249a2c97e7..d6eb5c6a2873 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -65,8 +65,8 @@ * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream */ -#define HL_RSVD_SOBS 4 -#define HL_RSVD_MONS 2 +#define HL_RSVD_SOBS 2 +#define HL_RSVD_MONS 1 /* * HL_COLLECTIVE_RSVD_MSTR_MONS 'collective' reserved monitors per QMAN stream @@ -785,6 +785,7 @@ enum div_select_defs { * @wreg: Write a register. Needed for simulator support. * @halt_coresight: stop the ETF and ETR traces. * @ctx_init: context dependent initialization. + * @ctx_fini: context dependent cleanup. * @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz * @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index. * @read_device_fw_version: read the device's firmware versions that are @@ -891,6 +892,7 @@ struct hl_asic_funcs { void (*wreg)(struct hl_device *hdev, u32 reg, u32 val); void (*halt_coresight)(struct hl_device *hdev); int (*ctx_init)(struct hl_ctx *ctx); + void (*ctx_fini)(struct hl_ctx *ctx); int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk); u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx); void (*read_device_fw_version)(struct hl_device *hdev, @@ -1992,6 +1994,7 @@ void hl_sob_reset_error(struct kref *ref); int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask); void hl_fence_put(struct hl_fence *fence); void hl_fence_get(struct hl_fence *fence); +void cs_get(struct hl_cs *cs); void goya_set_asic_funcs(struct hl_device *hdev); void gaudi_set_asic_funcs(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 7e1557f8a73f..c8e59a8338fb 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -358,6 +358,31 @@ static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev); static int gaudi_cpucp_info_get(struct hl_device *hdev); static void gaudi_disable_clock_gating(struct hl_device *hdev); static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid); +static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, + u32 size); +static u32 gaudi_gen_wait_cb(struct hl_device *hdev, + struct hl_gen_wait_properties *prop); + +static inline enum hl_collective_mode +get_collective_mode(struct hl_device *hdev, u32 queue_id) +{ + if (gaudi_queue_type[queue_id] == QUEUE_TYPE_EXT) + return HL_COLLECTIVE_MASTER; + + if (queue_id >= GAUDI_QUEUE_ID_DMA_5_0 && + queue_id <= GAUDI_QUEUE_ID_DMA_5_3) + return HL_COLLECTIVE_SLAVE; + + if (queue_id >= GAUDI_QUEUE_ID_TPC_7_0 && + queue_id <= GAUDI_QUEUE_ID_TPC_7_3) + return HL_COLLECTIVE_SLAVE; + + if (queue_id >= GAUDI_QUEUE_ID_NIC_0_0 && + queue_id <= GAUDI_QUEUE_ID_NIC_9_3) + return HL_COLLECTIVE_SLAVE; + + return HL_COLLECTIVE_NOT_SUPPORTED; +} static int gaudi_get_fixed_properties(struct hl_device *hdev) { @@ -393,18 +418,28 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) prop->hw_queues_props[i].supports_sync_stream = 0; prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER; - } else if (gaudi_queue_type[i] == QUEUE_TYPE_NA) { - prop->hw_queues_props[i].type = QUEUE_TYPE_NA; - prop->hw_queues_props[i].driver_only = 0; - prop->hw_queues_props[i].supports_sync_stream = 0; - prop->hw_queues_props[i].cb_alloc_flags = - CB_ALLOC_USER; + } + prop->hw_queues_props[i].collective_mode = + get_collective_mode(hdev, i); } prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; - prop->sync_stream_first_sob = 0; - prop->sync_stream_first_mon = 0; + prop->collective_first_sob = 0; + prop->collective_first_mon = 0; + + /* 2 SOBs per internal queue stream are reserved for collective */ + prop->sync_stream_first_sob = + ALIGN(NUMBER_OF_SOBS_IN_GRP, HL_MAX_SOBS_PER_MONITOR) + * QMAN_STREAMS * HL_RSVD_SOBS; + + /* 1 monitor per internal queue stream are reserved for collective + * 2 monitors per external queue stream are reserved for collective + */ + prop->sync_stream_first_mon = + (NUMBER_OF_COLLECTIVE_QUEUES * QMAN_STREAMS) + + (NUMBER_OF_EXT_HW_QUEUES * 2); + prop->dram_base_address = DRAM_PHYS_BASE; prop->dram_size = GAUDI_HBM_SIZE_32GB; prop->dram_end_address = prop->dram_base_address + @@ -789,21 +824,451 @@ out: return rc; } -static void gaudi_reset_sob_group(struct hl_device *hdev, u16 sob_groupt) +static void gaudi_collective_map_sobs(struct hl_device *hdev, u32 stream) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct gaudi_collective_properties *prop = &gaudi->collective_props; + struct hl_hw_queue *q; + u32 i, sob_id, sob_group_id, queue_id; + + /* Iterate through SOB groups and assign a SOB for each slave queue */ + sob_group_id = + stream * HL_RSVD_SOBS + prop->curr_sob_group_idx[stream]; + sob_id = prop->hw_sob_group[sob_group_id].base_sob_id; + + queue_id = GAUDI_QUEUE_ID_NIC_0_0 + stream; + for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) { + q = &hdev->kernel_queues[queue_id + (4 * i)]; + q->sync_stream_prop.collective_sob_id = sob_id + i; + } + + /* Both DMA5 and TPC7 use the same resources since only a single + * engine need to participate in the reduction process + */ + queue_id = GAUDI_QUEUE_ID_DMA_5_0 + stream; + q = &hdev->kernel_queues[queue_id]; + q->sync_stream_prop.collective_sob_id = + sob_id + NIC_NUMBER_OF_ENGINES; + + queue_id = GAUDI_QUEUE_ID_TPC_7_0 + stream; + q = &hdev->kernel_queues[queue_id]; + q->sync_stream_prop.collective_sob_id = + sob_id + NIC_NUMBER_OF_ENGINES; +} + +static void gaudi_sob_group_hw_reset(struct kref *ref) +{ + struct gaudi_hw_sob_group *hw_sob_group = + container_of(ref, struct gaudi_hw_sob_group, kref); + struct hl_device *hdev = hw_sob_group->hdev; + int i; + + for (i = 0 ; i < NUMBER_OF_SOBS_IN_GRP ; i++) + WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + + (hw_sob_group->base_sob_id + i) * 4, 0); + + kref_init(&hw_sob_group->kref); +} + +static void gaudi_sob_group_reset_error(struct kref *ref) +{ + struct gaudi_hw_sob_group *hw_sob_group = + container_of(ref, struct gaudi_hw_sob_group, kref); + struct hl_device *hdev = hw_sob_group->hdev; + + dev_crit(hdev->dev, + "SOB release shouldn't be called here, base_sob_id: %d\n", + hw_sob_group->base_sob_id); +} + +static int gaudi_collective_init(struct hl_device *hdev) +{ + u32 i, master_monitor_sobs, sob_id, reserved_sobs_per_group; + struct gaudi_collective_properties *prop; + struct gaudi_device *gaudi; + + gaudi = hdev->asic_specific; + prop = &gaudi->collective_props; + sob_id = hdev->asic_prop.collective_first_sob; + + /* First sob in group must be aligned to HL_MAX_SOBS_PER_MONITOR */ + reserved_sobs_per_group = + ALIGN(NUMBER_OF_SOBS_IN_GRP, HL_MAX_SOBS_PER_MONITOR); + + /* Init SOB groups */ + for (i = 0 ; i < NUM_SOB_GROUPS; i++) { + prop->hw_sob_group[i].hdev = hdev; + prop->hw_sob_group[i].base_sob_id = sob_id; + sob_id += reserved_sobs_per_group; + gaudi_sob_group_hw_reset(&prop->hw_sob_group[i].kref); + } + + for (i = 0 ; i < QMAN_STREAMS; i++) { + prop->next_sob_group_val[i] = 1; + prop->curr_sob_group_idx[i] = 0; + gaudi_collective_map_sobs(hdev, i); + } + + prop->mstr_sob_mask[0] = 0; + master_monitor_sobs = HL_MAX_SOBS_PER_MONITOR; + for (i = 0 ; i < master_monitor_sobs ; i++) + if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i)) + prop->mstr_sob_mask[0] |= BIT(i); + + prop->mstr_sob_mask[1] = 0; + master_monitor_sobs = + NIC_NUMBER_OF_ENGINES - HL_MAX_SOBS_PER_MONITOR; + for (i = 0 ; i < master_monitor_sobs; i++) { + if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i)) + prop->mstr_sob_mask[1] |= BIT(i); + } + + /* Set collective engine bit */ + prop->mstr_sob_mask[1] |= BIT(i); + + return 0; +} + +static void gaudi_reset_sob_group(struct hl_device *hdev, u16 sob_group) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + struct gaudi_collective_properties *cprop = &gaudi->collective_props; + + kref_put(&cprop->hw_sob_group[sob_group].kref, + gaudi_sob_group_hw_reset); +} + +static void gaudi_collective_master_init_job(struct hl_device *hdev, + struct hl_cs_job *job, u32 stream, u32 sob_group_offset) +{ + u32 master_sob_base, master_monitor, queue_id, cb_size = 0; + struct gaudi_collective_properties *cprop; + struct hl_gen_wait_properties wait_prop; + struct hl_sync_stream_properties *prop; + struct gaudi_device *gaudi; + + gaudi = hdev->asic_specific; + cprop = &gaudi->collective_props; + queue_id = job->hw_queue_id; + prop = &hdev->kernel_queues[queue_id].sync_stream_prop; + + master_sob_base = + cprop->hw_sob_group[sob_group_offset].base_sob_id; + master_monitor = prop->collective_mstr_mon_id[0]; + + dev_dbg(hdev->dev, + "Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n", + master_sob_base, cprop->mstr_sob_mask[0], + cprop->next_sob_group_val[stream], + master_monitor, queue_id); + + wait_prop.data = (void *) job->patched_cb; + wait_prop.sob_base = master_sob_base; + wait_prop.sob_mask = cprop->mstr_sob_mask[0]; + wait_prop.sob_val = cprop->next_sob_group_val[stream]; + wait_prop.mon_id = master_monitor; + wait_prop.q_idx = queue_id; + wait_prop.size = cb_size; + cb_size += gaudi_gen_wait_cb(hdev, &wait_prop); + + master_sob_base += HL_MAX_SOBS_PER_MONITOR; + master_monitor = prop->collective_mstr_mon_id[1]; + + dev_dbg(hdev->dev, + "Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n", + master_sob_base, cprop->mstr_sob_mask[1], + cprop->next_sob_group_val[stream], + master_monitor, queue_id); + + wait_prop.sob_base = master_sob_base; + wait_prop.sob_mask = cprop->mstr_sob_mask[1]; + wait_prop.mon_id = master_monitor; + wait_prop.size = cb_size; + cb_size += gaudi_gen_wait_cb(hdev, &wait_prop); +} + +static void gaudi_collective_slave_init_job(struct hl_device *hdev, + struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl) { + struct hl_gen_wait_properties wait_prop; + struct hl_sync_stream_properties *prop; + u32 queue_id, cb_size = 0; + + queue_id = job->hw_queue_id; + prop = &hdev->kernel_queues[queue_id].sync_stream_prop; + + /* Add to wait CBs using slave monitor */ + wait_prop.data = (void *) job->user_cb; + wait_prop.sob_base = cs_cmpl->hw_sob->sob_id; + wait_prop.sob_mask = 0x1; + wait_prop.sob_val = cs_cmpl->sob_val; + wait_prop.mon_id = prop->collective_slave_mon_id; + wait_prop.q_idx = queue_id; + wait_prop.size = cb_size; + + dev_dbg(hdev->dev, + "Generate slave wait CB, sob %d, val:0x%x, mon %d, q %d\n", + cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, + prop->collective_slave_mon_id, queue_id); + + cb_size += gaudi_gen_wait_cb(hdev, &wait_prop); + + dev_dbg(hdev->dev, + "generate signal CB, sob_id: %d, sob val: 1, q_idx: %d\n", + prop->collective_sob_id, queue_id); + cb_size += gaudi_gen_signal_cb(hdev, job->user_cb, + prop->collective_sob_id, cb_size); } static void gaudi_collective_wait_init_cs(struct hl_cs *cs) { + struct hl_cs_compl *signal_cs_cmpl = + container_of(cs->signal_fence, struct hl_cs_compl, base_fence); + struct hl_cs_compl *cs_cmpl = + container_of(cs->fence, struct hl_cs_compl, base_fence); + struct gaudi_collective_properties *cprop; + u32 stream, queue_id, sob_group_offset; + struct gaudi_device *gaudi; + struct hl_device *hdev; + struct hl_cs_job *job; + struct hl_ctx *ctx; + + ctx = cs->ctx; + hdev = ctx->hdev; + gaudi = hdev->asic_specific; + cprop = &gaudi->collective_props; + + /* copy the SOB id and value of the signal CS */ + cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; + cs_cmpl->sob_val = signal_cs_cmpl->sob_val; + + /* Calculate the stream from collective master queue (1st job) */ + job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node); + stream = job->hw_queue_id % 4; + sob_group_offset = + stream * HL_RSVD_SOBS + cprop->curr_sob_group_idx[stream]; + + list_for_each_entry(job, &cs->job_list, cs_node) { + queue_id = job->hw_queue_id; + + if (hdev->kernel_queues[queue_id].collective_mode == + HL_COLLECTIVE_MASTER) + gaudi_collective_master_init_job(hdev, job, stream, + sob_group_offset); + else + gaudi_collective_slave_init_job(hdev, job, cs_cmpl); + } + + cs_cmpl->sob_group = sob_group_offset; + + /* Handle sob group kref and wraparound */ + kref_get(&cprop->hw_sob_group[sob_group_offset].kref); + cprop->next_sob_group_val[stream]++; + if (cprop->next_sob_group_val[stream] == HL_MAX_SOB_VAL) { + /* + * Decrement as we reached the max value. + * The release function won't be called here as we've + * just incremented the refcount. + */ + kref_put(&cprop->hw_sob_group[sob_group_offset].kref, + gaudi_sob_group_reset_error); + cprop->next_sob_group_val[stream] = 1; + /* only two SOBs are currently in use */ + cprop->curr_sob_group_idx[stream] = + (cprop->curr_sob_group_idx[stream] + 1) & + (HL_RSVD_SOBS - 1); + + gaudi_collective_map_sobs(hdev, stream); + + dev_dbg(hdev->dev, "switched to SOB group %d, stream: %d\n", + cprop->curr_sob_group_idx[stream], stream); + } + + /* Increment kref since all slave queues are now waiting on it */ + kref_get(&cs_cmpl->hw_sob->kref); + /* + * Must put the signal fence after the SOB refcnt increment so + * the SOB refcnt won't turn 0 and reset the SOB before the + * wait CS was submitted. + */ + mb(); + hl_fence_put(cs->signal_fence); + cs->signal_fence = NULL; +} + +static int gaudi_collective_wait_create_job(struct hl_device *hdev, + struct hl_ctx *ctx, struct hl_cs *cs, + enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id) +{ + struct hw_queue_properties *hw_queue_prop; + struct hl_cs_counters_atomic *cntr; + struct hl_cs_job *job; + struct hl_cb *cb; + u32 cb_size; + bool patched_cb; + + cntr = &hdev->aggregated_cs_counters; + + if (mode == HL_COLLECTIVE_MASTER) { + /* CB size of collective master queue contains + * 4 msg short packets for monitor 1 configuration + * 1 fence packet + * 4 msg short packets for monitor 2 configuration + * 1 fence packet + * 2 msg prot packets for completion and MSI-X + */ + cb_size = sizeof(struct packet_msg_short) * 8 + + sizeof(struct packet_fence) * 2 + + sizeof(struct packet_msg_prot) * 2; + patched_cb = true; + } else { + /* CB size of collective slave queues contains + * 4 msg short packets for monitor configuration + * 1 fence packet + * 1 additional msg short packet for sob signal + */ + cb_size = sizeof(struct packet_msg_short) * 5 + + sizeof(struct packet_fence); + patched_cb = false; + } + + hw_queue_prop = &hdev->asic_prop.hw_queues_props[queue_id]; + job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true); + if (!job) { + ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc(&cntr->out_of_mem_drop_cnt); + dev_err(hdev->dev, "Failed to allocate a new job\n"); + return -ENOMEM; + } + + /* Allocate internal mapped CB for non patched CBs */ + cb = hl_cb_kernel_create(hdev, cb_size, + hdev->mmu_enable && !patched_cb); + if (!cb) { + ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc(&cntr->out_of_mem_drop_cnt); + kfree(job); + return -EFAULT; + } + + job->id = 0; + job->cs = cs; + job->user_cb = cb; + job->user_cb->cs_cnt++; + job->user_cb_size = cb_size; + job->hw_queue_id = queue_id; + + /* + * No need in parsing, user CB is the patched CB. + * We call hl_cb_destroy() out of two reasons - we don't need + * the CB in the CB idr anymore and to decrement its refcount as + * it was incremented inside hl_cb_kernel_create(). + */ + if (patched_cb) + job->patched_cb = job->user_cb; + else + job->patched_cb = NULL; + + job->job_cb_size = job->user_cb_size; + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + /* increment refcount as for external queues we get completion */ + if (hw_queue_prop->type == QUEUE_TYPE_EXT) + cs_get(cs); + + cs->jobs_in_queue_cnt[job->hw_queue_id]++; + + list_add_tail(&job->cs_node, &cs->job_list); + + hl_debugfs_add_job(hdev, job); + + return 0; } static int gaudi_collective_wait_create_jobs(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id, u32 collective_engine_id) { - return -EINVAL; + struct gaudi_device *gaudi = hdev->asic_specific; + struct hw_queue_properties *hw_queue_prop; + u32 queue_id, collective_queue, num_jobs; + u32 stream, nic_queue, nic_idx = 0; + bool skip; + int i, rc; + + /* Verify wait queue id is configured as master */ + hw_queue_prop = &hdev->asic_prop.hw_queues_props[wait_queue_id]; + if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) { + dev_err(hdev->dev, + "Queue %d is not configured as collective master\n", + wait_queue_id); + return -EINVAL; + } + + /* Verify engine id is supported */ + if (collective_engine_id != GAUDI_ENGINE_ID_DMA_5 && + collective_engine_id != GAUDI_ENGINE_ID_TPC_7) { + dev_err(hdev->dev, + "Collective wait does not support engine %u\n", + collective_engine_id); + return -EINVAL; + } + + stream = wait_queue_id % 4; + + if (collective_engine_id == GAUDI_ENGINE_ID_DMA_5) + collective_queue = GAUDI_QUEUE_ID_DMA_5_0 + stream; + else if (collective_engine_id == GAUDI_ENGINE_ID_TPC_7) + collective_queue = GAUDI_QUEUE_ID_TPC_7_0 + stream; + else + return -EINVAL; + + num_jobs = NUMBER_OF_SOBS_IN_GRP + 1; + nic_queue = GAUDI_QUEUE_ID_NIC_0_0 + stream; + + /* First job goes to the collective master queue, it will wait for + * the collective slave queues to finish execution. + * The synchronization is done using two monitors: + * First monitor for NICs 0-7, second monitor for NICs 8-9 and the + * reduction engine (DMA5/TPC7). + * + * Rest of the jobs goes to the collective slave queues which will + * all wait for the user to signal sob 'cs_cmpl->sob_val'. + */ + for (i = 0 ; i < num_jobs ; i++) { + if (i == 0) { + queue_id = wait_queue_id; + rc = gaudi_collective_wait_create_job(hdev, ctx, cs, + HL_COLLECTIVE_MASTER, queue_id, wait_queue_id); + } else { + if (nic_idx < NIC_NUMBER_OF_ENGINES) { + if (gaudi->hw_cap_initialized & + BIT(HW_CAP_NIC_SHIFT + nic_idx)) + skip = false; + else + skip = true; + + queue_id = nic_queue; + nic_queue += 4; + nic_idx++; + + if (skip) + continue; + } else { + queue_id = collective_queue; + } + + rc = gaudi_collective_wait_create_job(hdev, ctx, cs, + HL_COLLECTIVE_SLAVE, queue_id, wait_queue_id); + } + + if (rc) + return rc; + } + + return rc; } static int gaudi_late_init(struct hl_device *hdev) @@ -860,6 +1325,12 @@ static int gaudi_late_init(struct hl_device *hdev) goto disable_pci_access; } + rc = gaudi_collective_init(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to init collective\n"); + goto disable_pci_access; + } + return 0; disable_pci_access: @@ -2041,21 +2512,29 @@ static void gaudi_init_pci_dma_qmans(struct hl_device *hdev) static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id, int qman_id, u64 qman_base_addr) { - u32 mtr_base_lo, mtr_base_hi; - u32 so_base_lo, so_base_hi; + u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi; + u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi; u32 q_off, dma_qm_offset; u32 dma_qm_err_cfg; dma_qm_offset = dma_id * DMA_QMAN_OFFSET; - mtr_base_lo = lower_32_bits(CFG_BASE + - mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); - mtr_base_hi = upper_32_bits(CFG_BASE + + mtr_base_en_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_en_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); - so_base_lo = lower_32_bits(CFG_BASE + + so_base_en_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); - so_base_hi = upper_32_bits(CFG_BASE + + so_base_en_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + mtr_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); q_off = dma_qm_offset + qman_id * 4; @@ -2113,10 +2592,22 @@ static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id, QMAN_INTERNAL_MAKE_TRUSTED); } - WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo); - WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi); - WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo); - WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi); + WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi); + WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi); + + /* Configure DMA5 CP_MSG_BASE 2/3 for sync stream collective */ + if (gaudi_dma_assignment[dma_id] == GAUDI_ENGINE_ID_DMA_5) { + WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off, + mtr_base_ws_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off, + mtr_base_ws_hi); + WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, + so_base_ws_lo); + WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, + so_base_ws_hi); + } } static void gaudi_init_hbm_dma_qmans(struct hl_device *hdev) @@ -2279,22 +2770,33 @@ static void gaudi_init_mme_qmans(struct hl_device *hdev) static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset, int qman_id, u64 qman_base_addr) { - u32 mtr_base_lo, mtr_base_hi; - u32 so_base_lo, so_base_hi; + u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi; + u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi; u32 q_off, tpc_id; u32 tpc_qm_err_cfg; - mtr_base_lo = lower_32_bits(CFG_BASE + - mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); - mtr_base_hi = upper_32_bits(CFG_BASE + + mtr_base_en_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_en_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); - so_base_lo = lower_32_bits(CFG_BASE + + so_base_en_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); - so_base_hi = upper_32_bits(CFG_BASE + + so_base_en_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + mtr_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); q_off = tpc_offset + qman_id * 4; + tpc_id = tpc_offset / + (mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0); + if (qman_id < 4) { WREG32(mmTPC0_QM_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_base_addr)); @@ -2320,9 +2822,6 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset, QMAN_LDMA_DST_OFFSET); /* Configure RAZWI IRQ */ - tpc_id = tpc_offset / - (mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0); - tpc_qm_err_cfg = TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK; if (hdev->stop_on_err) { tpc_qm_err_cfg |= @@ -2352,10 +2851,22 @@ static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset, QMAN_INTERNAL_MAKE_TRUSTED); } - WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo); - WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi); - WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo); - WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi); + WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi); + WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi); + + /* Configure TPC7 CP_MSG_BASE 2/3 for sync stream collective */ + if (tpc_id == 6) { + WREG32(mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off, + mtr_base_ws_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off, + mtr_base_ws_hi); + WREG32(mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, + so_base_ws_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, + so_base_ws_hi); + } } static void gaudi_init_tpc_qmans(struct hl_device *hdev) @@ -2406,19 +2917,27 @@ static void gaudi_init_tpc_qmans(struct hl_device *hdev) static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset, int qman_id, u64 qman_base_addr, int nic_id) { - u32 mtr_base_lo, mtr_base_hi; - u32 so_base_lo, so_base_hi; + u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi; + u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi; u32 q_off; u32 nic_qm_err_cfg; - mtr_base_lo = lower_32_bits(CFG_BASE + - mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); - mtr_base_hi = upper_32_bits(CFG_BASE + + mtr_base_en_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_en_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); - so_base_lo = lower_32_bits(CFG_BASE + + so_base_en_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); - so_base_hi = upper_32_bits(CFG_BASE + + so_base_en_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0); + mtr_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + mtr_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); + so_base_ws_lo = lower_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); + so_base_ws_hi = upper_32_bits(CFG_BASE + + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0); q_off = nic_offset + qman_id * 4; @@ -2429,14 +2948,23 @@ static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset, WREG32(mmNIC0_QM0_PQ_PI_0 + q_off, 0); WREG32(mmNIC0_QM0_PQ_CI_0 + q_off, 0); - WREG32(mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 + q_off, 0x74); - WREG32(mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, 0x14); - WREG32(mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, 0x1C); + WREG32(mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 + q_off, + QMAN_LDMA_SIZE_OFFSET); + WREG32(mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off, + QMAN_LDMA_SRC_OFFSET); + WREG32(mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off, + QMAN_LDMA_DST_OFFSET); + + WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo); + WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi); + WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo); + WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi); - WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo); - WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi); - WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo); - WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi); + /* Configure NIC CP_MSG_BASE 2/3 for sync stream collective */ + WREG32(mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0 + q_off, mtr_base_ws_lo); + WREG32(mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0 + q_off, mtr_base_ws_hi); + WREG32(mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo); + WREG32(mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi); if (qman_id == 0) { /* Configure RAZWI IRQ */ @@ -7011,11 +7539,152 @@ static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev) return RREG32(mmHW_STATE); } +static int gaudi_internal_cb_pool_init(struct hl_device *hdev, + struct hl_ctx *ctx) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + bool flush_pte; + u64 va, pa; + s64 off; + int min_alloc_order, rc, collective_cb_size; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) + return 0; + + hdev->internal_cb_pool_virt_addr = + hdev->asic_funcs->asic_dma_alloc_coherent(hdev, + HOST_SPACE_INTERNAL_CB_SZ, + &hdev->internal_cb_pool_dma_addr, + GFP_KERNEL | __GFP_ZERO); + + if (!hdev->internal_cb_pool_virt_addr) + return -ENOMEM; + + collective_cb_size = sizeof(struct packet_msg_short) * 5 + + sizeof(struct packet_fence); + min_alloc_order = ilog2(collective_cb_size); + + hdev->internal_cb_pool = gen_pool_create(min_alloc_order, -1); + if (!hdev->internal_cb_pool) { + dev_err(hdev->dev, + "Failed to create internal CB pool\n"); + rc = -ENOMEM; + goto free_internal_cb_pool; + } + + rc = gen_pool_add(hdev->internal_cb_pool, + (uintptr_t) hdev->internal_cb_pool_virt_addr, + HOST_SPACE_INTERNAL_CB_SZ, -1); + if (rc) { + dev_err(hdev->dev, + "Failed to add memory to internal CB pool\n"); + rc = -EFAULT; + goto destroy_internal_cb_pool; + } + + hdev->internal_cb_va_base = VA_HOST_SPACE_INTERNAL_CB_START; + + mutex_lock(&ctx->mmu_lock); + + /* The mapping is done page by page since we can't assure allocated ptr + * is aligned to HOST_SPACE_INTERNAL_CB_SZ + */ + for (off = 0 ; off < HOST_SPACE_INTERNAL_CB_SZ ; off += PAGE_SIZE_4KB) { + va = VA_HOST_SPACE_INTERNAL_CB_START + off; + pa = hdev->internal_cb_pool_dma_addr + off; + flush_pte = (off + PAGE_SIZE_4KB) >= HOST_SPACE_INTERNAL_CB_SZ; + rc = hl_mmu_map(ctx, va, pa, PAGE_SIZE_4KB, flush_pte); + if (rc) { + dev_err(hdev->dev, + "Map failed for va 0x%llx to pa 0x%llx\n", + va, pa); + goto unmap; + } + } + + hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR); + + mutex_unlock(&ctx->mmu_lock); + + return 0; + +unmap: + for (; off >= 0 ; off -= PAGE_SIZE_4KB) { + va = VA_HOST_SPACE_INTERNAL_CB_START + off; + flush_pte = (off - (s32) PAGE_SIZE_4KB) < 0; + if (hl_mmu_unmap(ctx, va, PAGE_SIZE_4KB, flush_pte)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap va 0x%llx\n", va); + } + + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); + + mutex_unlock(&ctx->mmu_lock); + +destroy_internal_cb_pool: + gen_pool_destroy(hdev->internal_cb_pool); + +free_internal_cb_pool: + hdev->asic_funcs->asic_dma_free_coherent(hdev, + HOST_SPACE_INTERNAL_CB_SZ, + hdev->internal_cb_pool_virt_addr, + hdev->internal_cb_pool_dma_addr); + + return rc; +} + +static void gaudi_internal_cb_pool_fini(struct hl_device *hdev, + struct hl_ctx *ctx) +{ + struct gaudi_device *gaudi = hdev->asic_specific; + bool flush_pte = false; + u64 va, off; + + if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) + return; + + mutex_lock(&ctx->mmu_lock); + + for (off = 0 ; off < HOST_SPACE_INTERNAL_CB_SZ ; off += PAGE_SIZE_4KB) { + va = VA_HOST_SPACE_INTERNAL_CB_START + off; + + if (off + PAGE_SIZE_4KB >= HOST_SPACE_INTERNAL_CB_SZ) + flush_pte = true; + + if (hl_mmu_unmap(ctx, va, PAGE_SIZE_4KB, flush_pte)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap va 0x%llx\n", va); + } + + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); + + mutex_unlock(&ctx->mmu_lock); + + gen_pool_destroy(hdev->internal_cb_pool); + + hdev->asic_funcs->asic_dma_free_coherent(hdev, + HOST_SPACE_INTERNAL_CB_SZ, + hdev->internal_cb_pool_virt_addr, + hdev->internal_cb_pool_dma_addr); +} + static int gaudi_ctx_init(struct hl_ctx *ctx) { gaudi_mmu_prepare(ctx->hdev, ctx->asid); + return gaudi_internal_cb_pool_init(ctx->hdev, ctx); +} - return 0; +void gaudi_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + + /* Gaudi will NEVER support more then a single compute context. + * Therefore, don't clear anything unless it is the compute context + */ + if (hdev->compute_ctx != ctx) + return; + + gaudi_internal_cb_pool_fini(ctx->hdev, ctx); } static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx) @@ -7037,7 +7706,7 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev) } static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, - u32 size) + u32 size) { struct hl_cb *cb = (struct hl_cb *) data; struct packet_msg_short *pkt; @@ -7156,7 +7825,7 @@ static u32 gaudi_add_fence_pkt(struct packet_fence *pkt) static int gaudi_get_fence_addr(struct hl_device *hdev, u32 queue_id, u64 *addr) { - u32 offset; + u32 offset, nic_index; switch (queue_id) { case GAUDI_QUEUE_ID_DMA_0_0: @@ -7195,6 +7864,78 @@ static int gaudi_get_fence_addr(struct hl_device *hdev, u32 queue_id, u64 *addr) case GAUDI_QUEUE_ID_DMA_5_3: offset = mmDMA5_QM_CP_FENCE2_RDATA_3; break; + case GAUDI_QUEUE_ID_TPC_7_0: + offset = mmTPC7_QM_CP_FENCE2_RDATA_0; + break; + case GAUDI_QUEUE_ID_TPC_7_1: + offset = mmTPC7_QM_CP_FENCE2_RDATA_1; + break; + case GAUDI_QUEUE_ID_TPC_7_2: + offset = mmTPC7_QM_CP_FENCE2_RDATA_2; + break; + case GAUDI_QUEUE_ID_TPC_7_3: + offset = mmTPC7_QM_CP_FENCE2_RDATA_3; + break; + case GAUDI_QUEUE_ID_NIC_0_0: + case GAUDI_QUEUE_ID_NIC_1_0: + case GAUDI_QUEUE_ID_NIC_2_0: + case GAUDI_QUEUE_ID_NIC_3_0: + case GAUDI_QUEUE_ID_NIC_4_0: + case GAUDI_QUEUE_ID_NIC_5_0: + case GAUDI_QUEUE_ID_NIC_6_0: + case GAUDI_QUEUE_ID_NIC_7_0: + case GAUDI_QUEUE_ID_NIC_8_0: + case GAUDI_QUEUE_ID_NIC_9_0: + nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2; + offset = mmNIC0_QM0_CP_FENCE2_RDATA_0 + + (nic_index >> 1) * NIC_MACRO_QMAN_OFFSET + + (nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET; + break; + case GAUDI_QUEUE_ID_NIC_0_1: + case GAUDI_QUEUE_ID_NIC_1_1: + case GAUDI_QUEUE_ID_NIC_2_1: + case GAUDI_QUEUE_ID_NIC_3_1: + case GAUDI_QUEUE_ID_NIC_4_1: + case GAUDI_QUEUE_ID_NIC_5_1: + case GAUDI_QUEUE_ID_NIC_6_1: + case GAUDI_QUEUE_ID_NIC_7_1: + case GAUDI_QUEUE_ID_NIC_8_1: + case GAUDI_QUEUE_ID_NIC_9_1: + nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_1) >> 2; + offset = mmNIC0_QM0_CP_FENCE2_RDATA_1 + + (nic_index >> 1) * NIC_MACRO_QMAN_OFFSET + + (nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET; + break; + case GAUDI_QUEUE_ID_NIC_0_2: + case GAUDI_QUEUE_ID_NIC_1_2: + case GAUDI_QUEUE_ID_NIC_2_2: + case GAUDI_QUEUE_ID_NIC_3_2: + case GAUDI_QUEUE_ID_NIC_4_2: + case GAUDI_QUEUE_ID_NIC_5_2: + case GAUDI_QUEUE_ID_NIC_6_2: + case GAUDI_QUEUE_ID_NIC_7_2: + case GAUDI_QUEUE_ID_NIC_8_2: + case GAUDI_QUEUE_ID_NIC_9_2: + nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_2) >> 2; + offset = mmNIC0_QM0_CP_FENCE2_RDATA_2 + + (nic_index >> 1) * NIC_MACRO_QMAN_OFFSET + + (nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET; + break; + case GAUDI_QUEUE_ID_NIC_0_3: + case GAUDI_QUEUE_ID_NIC_1_3: + case GAUDI_QUEUE_ID_NIC_2_3: + case GAUDI_QUEUE_ID_NIC_3_3: + case GAUDI_QUEUE_ID_NIC_4_3: + case GAUDI_QUEUE_ID_NIC_5_3: + case GAUDI_QUEUE_ID_NIC_6_3: + case GAUDI_QUEUE_ID_NIC_7_3: + case GAUDI_QUEUE_ID_NIC_8_3: + case GAUDI_QUEUE_ID_NIC_9_3: + nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_3) >> 2; + offset = mmNIC0_QM0_CP_FENCE2_RDATA_3 + + (nic_index >> 1) * NIC_MACRO_QMAN_OFFSET + + (nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET; + break; default: return -EINVAL; } @@ -7360,6 +8101,7 @@ static const struct hl_asic_funcs gaudi_funcs = { .wreg = hl_wreg, .halt_coresight = gaudi_halt_coresight, .ctx_init = gaudi_ctx_init, + .ctx_fini = gaudi_ctx_fini, .get_clk_rate = gaudi_get_clk_rate, .get_queue_id_for_cq = gaudi_get_queue_id_for_cq, .read_device_fw_version = gaudi_read_device_fw_version, diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index 16871d9ff88b..297a96dbf4e5 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -23,6 +23,10 @@ NUMBER_OF_CPU_HW_QUEUES + \ NUMBER_OF_INT_HW_QUEUES) +/* 10 NIC QMANs, DMA5 QMAN, TPC7 QMAN */ +#define NUMBER_OF_COLLECTIVE_QUEUES 12 +#define NUMBER_OF_SOBS_IN_GRP 11 + /* * Number of MSI interrupts IDS: * Each completion queue has 1 ID @@ -149,10 +153,14 @@ /* Virtual address space */ #define VA_HOST_SPACE_START 0x1000000000000ull /* 256TB */ -#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 1TB */ +#define VA_HOST_SPACE_END 0x3FF7FFFE00000ull /* 1PB - 1TB */ #define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \ VA_HOST_SPACE_START) /* 767TB */ +#define VA_HOST_SPACE_INTERNAL_CB_START 0x3FF7FFFE00000ull /* 1PB - 1TB - 2MB */ +#define VA_HOST_SPACE_INTERNAL_CB_END 0x3FF8000000000ull /* 1PB - 1TB */ +#define HOST_SPACE_INTERNAL_CB_SZ SZ_2M + #define HW_CAP_PLL BIT(0) #define HW_CAP_HBM BIT(1) #define HW_CAP_MMU BIT(2) @@ -240,6 +248,34 @@ enum gaudi_nic_mask { GAUDI_NIC_MASK_ALL = 0x3FF }; +/* + * struct gaudi_hw_sob_group - H/W SOB group info. + * @hdev: habanalabs device structure. + * @kref: refcount of this SOB group. group will reset once refcount is zero. + * @base_sob_id: base sob id of this SOB group. + */ +struct gaudi_hw_sob_group { + struct hl_device *hdev; + struct kref kref; + u32 base_sob_id; +}; + +#define NUM_SOB_GROUPS (HL_RSVD_SOBS * QMAN_STREAMS) +/** + * struct gaudi_collective_properties - + * holds all SOB groups and queues info reserved for the collective + * @hw_sob_group: H/W SOB groups. + * @next_sob_group_val: the next value to use for the currently used SOB group. + * @curr_sob_group_idx: the index of the currently used SOB group. + * @mstr_sob_mask: pre-defined masks for collective master monitors + */ +struct gaudi_collective_properties { + struct gaudi_hw_sob_group hw_sob_group[NUM_SOB_GROUPS]; + u16 next_sob_group_val[QMAN_STREAMS]; + u8 curr_sob_group_idx[QMAN_STREAMS]; + u8 mstr_sob_mask[HL_COLLECTIVE_RSVD_MSTR_MONS]; +}; + /** * struct gaudi_internal_qman_info - Internal QMAN information. * @pq_kernel_addr: Kernel address of the PQ memory area in the host. @@ -285,6 +321,8 @@ struct gaudi_device { struct gaudi_internal_qman_info internal_qmans[GAUDI_QUEUE_ID_SIZE]; + struct gaudi_collective_properties collective_props; + u64 hbm_bar_cur_addr; u64 max_freq_value; diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 9332580b038d..374881ff551d 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5343,6 +5343,11 @@ int goya_collective_wait_create_jobs(struct hl_device *hdev, return -EINVAL; } +static void goya_ctx_fini(struct hl_ctx *ctx) +{ + +} + static const struct hl_asic_funcs goya_funcs = { .early_init = goya_early_init, .early_fini = goya_early_fini, @@ -5404,6 +5409,7 @@ static const struct hl_asic_funcs goya_funcs = { .wreg = hl_wreg, .halt_coresight = goya_halt_coresight, .ctx_init = goya_ctx_init, + .ctx_fini = goya_ctx_fini, .get_clk_rate = goya_get_clk_rate, .get_queue_id_for_cq = goya_get_queue_id_for_cq, .read_device_fw_version = goya_read_device_fw_version, diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 4661a74f0425..0185311b679b 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -18,8 +18,18 @@ #define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */ #define GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START 0x80 /* 128 bytes */ -#define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT 32 -#define GAUDI_FIRST_AVAILABLE_W_S_MONITOR 16 +/* + * 128 SOBs reserved for collective wait + * 16 SOBs reserved for sync stream + */ +#define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT 144 + +/* + * 64 monitors reserved for collective wait + * 8 monitors reserved for sync stream + */ +#define GAUDI_FIRST_AVAILABLE_W_S_MONITOR 72 + /* * Goya queue Numbering * -- cgit v1.2.3 From 3e6229965763129da2105f61cd4e0570ba53cb2b Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Sun, 18 Oct 2020 15:32:23 +0300 Subject: habanalabs/gaudi: monitor device memory usage In GAUDI we don't have an MMU towards the HBM device memory. Therefore, the user access that memory directly through physical address (via the different engines) without the need to go through the driver to allocate/free memory on the HBM. For system monitoring purposes, the driver will keep track of the HBM usage. This can be done as long as the user accurately reports the allocations and releases of HBM memory, through the existing MEMORY IOCTL uapi. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/memory.c | 56 +++++++++++++++++++++++++++++---- 1 file changed, 50 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 75dd18771868..f885812d9939 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -1236,18 +1236,35 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) switch (args->in.op) { case HL_MEM_OP_ALLOC: - if (!hdev->dram_supports_virtual_memory) { - dev_err(hdev->dev, "DRAM alloc is not supported\n"); - rc = -EINVAL; - goto out; - } - if (args->in.alloc.mem_size == 0) { dev_err(hdev->dev, "alloc size must be larger than 0\n"); rc = -EINVAL; goto out; } + + /* If DRAM does not support virtual memory the driver won't + * handle the allocation/freeing of that memory. However, for + * system administration/monitoring purposes, the driver will + * keep track of the amount of DRAM memory that is allocated + * and freed by the user. Because this code totally relies on + * the user's input, the driver can't ensure the validity + * of this accounting. + */ + if (!hdev->dram_supports_virtual_memory) { + atomic64_add(args->in.alloc.mem_size, + &ctx->dram_phys_mem); + atomic64_add(args->in.alloc.mem_size, + &hdev->dram_used_mem); + + dev_dbg(hdev->dev, "DRAM alloc is not supported\n"); + rc = 0; + + memset(args, 0, sizeof(*args)); + args->out.handle = 0; + goto out; + } + rc = alloc_device_memory(ctx, &args->in, &handle); memset(args, 0, sizeof(*args)); @@ -1255,6 +1272,26 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) break; case HL_MEM_OP_FREE: + /* If DRAM does not support virtual memory the driver won't + * handle the allocation/freeing of that memory. However, for + * system administration/monitoring purposes, the driver will + * keep track of the amount of DRAM memory that is allocated + * and freed by the user. Because this code totally relies on + * the user's input, the driver can't ensure the validity + * of this accounting. + */ + if (!hdev->dram_supports_virtual_memory) { + atomic64_sub(args->in.alloc.mem_size, + &ctx->dram_phys_mem); + atomic64_sub(args->in.alloc.mem_size, + &hdev->dram_used_mem); + + dev_dbg(hdev->dev, "DRAM alloc is not supported\n"); + rc = 0; + + goto out; + } + rc = free_device_memory(ctx, args->in.free.handle); break; @@ -1773,6 +1810,13 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) mutex_destroy(&ctx->mem_hash_lock); hl_mmu_ctx_fini(ctx); + + /* In this case we need to clear the global accounting of DRAM usage + * because the user notifies us on allocations. If the user is no more, + * all DRAM is available + */ + if (!ctx->hdev->dram_supports_virtual_memory) + atomic64_set(&ctx->hdev->dram_used_mem, 0); } /* -- cgit v1.2.3 From e716ad3c76c8cf1722f674e43103c74cad85438f Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Mon, 19 Oct 2020 09:06:18 +0300 Subject: habanalabs: make sure cs type is valid in cs_ioctl_signal_wait Although we get a valid cs type from the callee, in case new values will be added in the future, it is best to check the expected values in that function. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_submission.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 5ece52588ec6..8da0526d1d1f 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -1068,9 +1068,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_SIGNAL) rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type, q_idx); - else + else if (cs_type == CS_TYPE_COLLECTIVE_WAIT) rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx, cs, q_idx, collective_engine_id); + else + rc = -EINVAL; if (rc) goto free_cs_object; -- cgit v1.2.3 From 71a984f9ae0999ea2ea92003f56d306f03203ba0 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Mon, 19 Oct 2020 16:52:00 +0300 Subject: habanalabs/gaudi: remove unreachable code Remove unreachable code in gaudi collective flow. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index c8e59a8338fb..a60c4347c1c6 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -1220,10 +1220,8 @@ static int gaudi_collective_wait_create_jobs(struct hl_device *hdev, if (collective_engine_id == GAUDI_ENGINE_ID_DMA_5) collective_queue = GAUDI_QUEUE_ID_DMA_5_0 + stream; - else if (collective_engine_id == GAUDI_ENGINE_ID_TPC_7) - collective_queue = GAUDI_QUEUE_ID_TPC_7_0 + stream; else - return -EINVAL; + collective_queue = GAUDI_QUEUE_ID_TPC_7_0 + stream; num_jobs = NUMBER_OF_SOBS_IN_GRP + 1; nic_queue = GAUDI_QUEUE_ID_NIC_0_0 + stream; -- cgit v1.2.3 From 977d53a614fb7562a80cc2da2b64d426002d47c0 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Tue, 20 Oct 2020 18:37:56 +0300 Subject: habanalabs: initialize variable before use GCC 7.3.1 20180303 (Red Hat 7.3.1-5) complains that collective_engine_id might be used uninitialized. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_submission.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 8da0526d1d1f..26822cfd1491 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -941,7 +941,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, struct hl_cs_compl *sig_waitcs_cmpl; struct hl_cs *cs; enum hl_queue_type q_type; - u32 size_to_copy, q_idx, collective_engine_id; + u32 size_to_copy, q_idx, collective_engine_id = 0; u64 signal_seq; int rc; -- cgit v1.2.3 From 9bb86b63d86493ca10b005e9e49d1d08f998a0ac Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Tue, 20 Oct 2020 10:45:37 +0300 Subject: habanalabs: advanced FW loading Today driver is able to load a whole FW binary into a specific location on ASIC. We add support for loading sections from the same FW binary into different loactions. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 21 +++++++++++++++++---- drivers/misc/habanalabs/common/habanalabs.h | 2 +- drivers/misc/habanalabs/gaudi/gaudi.c | 4 ++-- drivers/misc/habanalabs/goya/goya.c | 4 ++-- 4 files changed, 22 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index 1340afa8ce3b..647606f6cc2a 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -20,16 +20,18 @@ * @hdev: pointer to hl_device structure. * @fw_name: the firmware image name * @dst: IO memory mapped address space to copy firmware to + * @src_offset: offset in src FW to copy from + * @size: amount of bytes to copy (0 to copy the whole binary) * * Copy fw code from firmware file to device memory. * * Return: 0 on success, non-zero for failure. */ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, - void __iomem *dst) + void __iomem *dst, u32 src_offset, u32 size) { const struct firmware *fw; - const u64 *fw_data; + const void *fw_data; size_t fw_size; int rc; @@ -57,9 +59,20 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, goto out; } - fw_data = (const u64 *) fw->data; + if (size - src_offset > fw_size) { + dev_err(hdev->dev, + "size to copy(%u) and offset(%u) are invalid\n", + size, src_offset); + rc = -EINVAL; + goto out; + } + + if (size) + fw_size = size; + + fw_data = (const void *) fw->data; - memcpy_toio(dst, fw_data, fw_size); + memcpy_toio(dst, fw_data + src_offset, fw_size); out: release_firmware(fw); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index d6eb5c6a2873..7f1522b101b4 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -2028,7 +2028,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev); void hl_mmu_v1_set_funcs(struct hl_device *hdev); int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, - void __iomem *dst); + void __iomem *dst, u32 src_offset, u32 size); int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode); int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, u16 len, u32 timeout, long *result); diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index a60c4347c1c6..ab8c9463932f 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -3529,7 +3529,7 @@ static int gaudi_load_firmware_to_device(struct hl_device *hdev) dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET; - return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst); + return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst, 0, 0); } static int gaudi_load_boot_fit_to_device(struct hl_device *hdev) @@ -3538,7 +3538,7 @@ static int gaudi_load_boot_fit_to_device(struct hl_device *hdev) dst = hdev->pcie_bar[SRAM_BAR_ID] + BOOT_FIT_SRAM_OFFSET; - return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst); + return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst, 0, 0); } static void gaudi_read_device_fw_version(struct hl_device *hdev, diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 374881ff551d..ec5dd159f54e 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -2315,7 +2315,7 @@ static int goya_load_firmware_to_device(struct hl_device *hdev) dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET; - return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst); + return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst, 0, 0); } /* @@ -2332,7 +2332,7 @@ static int goya_load_boot_fit_to_device(struct hl_device *hdev) dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + BOOT_FIT_SRAM_OFFSET; - return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst); + return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst, 0, 0); } /* -- cgit v1.2.3 From e753643d516c7c38f69f3d73169bb00cd70a60b9 Mon Sep 17 00:00:00 2001 From: farah kassabri Date: Mon, 12 Oct 2020 14:30:26 +0300 Subject: habanalabs: fix cs counters structure Fix cs counters structure in uapi to be one flat structure instead of two instances of the same other structure. use atomic read/increment for context counters so we could use one structure for both aggregated and context counters. Signed-off-by: farah kassabri Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 18 +++--- drivers/misc/habanalabs/common/habanalabs.h | 73 +++++++++++----------- drivers/misc/habanalabs/common/habanalabs_ioctl.c | 35 +++++++---- drivers/misc/habanalabs/common/hw_queue.c | 5 +- drivers/misc/habanalabs/gaudi/gaudi.c | 4 +- include/uapi/misc/habanalabs.h | 35 ++++++----- 6 files changed, 95 insertions(+), 75 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 26822cfd1491..e123101b74d6 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -462,7 +462,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, if (other && !completion_done(&other->completion)) { dev_dbg_ratelimited(hdev->dev, "Rejecting CS because of too many in-flights CS\n"); - ctx->cs_counters.max_cs_in_flight_drop_cnt++; + atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt); atomic64_inc(&cntr->max_cs_in_flight_drop_cnt); rc = -EAGAIN; goto free_fence; @@ -720,7 +720,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, rc = validate_queue_index(hdev, chunk, &queue_type, &is_kernel_allocated_cb); if (rc) { - hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt); atomic64_inc(&cntr->parsing_drop_cnt); goto free_cs_object; } @@ -728,7 +728,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, if (is_kernel_allocated_cb) { cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk); if (!cb) { - hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc( + &hpriv->ctx->cs_counters.parsing_drop_cnt); atomic64_inc(&cntr->parsing_drop_cnt); rc = -EINVAL; goto free_cs_object; @@ -743,7 +744,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, job = hl_cs_allocate_job(hdev, queue_type, is_kernel_allocated_cb); if (!job) { - hpriv->ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc( + &hpriv->ctx->cs_counters.out_of_mem_drop_cnt); atomic64_inc(&cntr->out_of_mem_drop_cnt); dev_err(hdev->dev, "Failed to allocate a new job\n"); rc = -ENOMEM; @@ -777,7 +779,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, rc = cs_parser(hpriv, job); if (rc) { - hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt); atomic64_inc(&cntr->parsing_drop_cnt); dev_err(hdev->dev, "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", @@ -787,7 +789,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, } if (int_queues_only) { - hpriv->ctx->cs_counters.parsing_drop_cnt++; + atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt); atomic64_inc(&cntr->parsing_drop_cnt); dev_err(hdev->dev, "Reject CS %d.%llu because only internal queues jobs are present\n", @@ -880,7 +882,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev, job = hl_cs_allocate_job(hdev, q_type, true); if (!job) { - ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); atomic64_inc(&cntr->out_of_mem_drop_cnt); dev_err(hdev->dev, "Failed to allocate a new job\n"); return -ENOMEM; @@ -894,7 +896,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev, cb = hl_cb_kernel_create(hdev, cb_size, q_type == QUEUE_TYPE_HW && hdev->mmu_enable); if (!cb) { - ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); atomic64_inc(&cntr->out_of_mem_drop_cnt); kfree(job); return -EFAULT; diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 7f1522b101b4..b1f20f225ff9 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -936,6 +936,22 @@ struct hl_va_range { u64 end_addr; }; +/** + * struct hl_cs_counters_atomic - command submission counters + * @out_of_mem_drop_cnt: dropped due to memory allocation issue + * @parsing_drop_cnt: dropped due to error in packet parsing + * @queue_full_drop_cnt: dropped due to queue full + * @device_in_reset_drop_cnt: dropped due to device in reset + * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight + */ +struct hl_cs_counters_atomic { + atomic64_t out_of_mem_drop_cnt; + atomic64_t parsing_drop_cnt; + atomic64_t queue_full_drop_cnt; + atomic64_t device_in_reset_drop_cnt; + atomic64_t max_cs_in_flight_drop_cnt; +}; + /** * struct hl_ctx - user/kernel context. * @mem_hash: holds mapping from virtual address to virtual memory area @@ -954,6 +970,7 @@ struct hl_va_range { * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the * MMU hash or walking the PGT requires talking this lock. * @debugfs_list: node in debugfs list of contexts. + * @cs_counters: context command submission counters. * @cb_va_pool: device VA pool for command buffers which are mapped to the * device's MMU. * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed @@ -976,26 +993,26 @@ struct hl_va_range { struct hl_ctx { DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS); DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS); - struct hl_fpriv *hpriv; - struct hl_device *hdev; - struct kref refcount; - struct hl_fence **cs_pending; - struct hl_va_range *host_va_range; - struct hl_va_range *host_huge_va_range; - struct hl_va_range *dram_va_range; - struct mutex mem_hash_lock; - struct mutex mmu_lock; - struct list_head debugfs_list; - struct hl_cs_counters cs_counters; - struct gen_pool *cb_va_pool; - u64 cs_sequence; - u64 *dram_default_hops; - spinlock_t cs_lock; - atomic64_t dram_phys_mem; - atomic_t thread_ctx_switch_token; - u32 thread_ctx_switch_wait_token; - u32 asid; - u32 handle; + struct hl_fpriv *hpriv; + struct hl_device *hdev; + struct kref refcount; + struct hl_fence **cs_pending; + struct hl_va_range *host_va_range; + struct hl_va_range *host_huge_va_range; + struct hl_va_range *dram_va_range; + struct mutex mem_hash_lock; + struct mutex mmu_lock; + struct list_head debugfs_list; + struct hl_cs_counters_atomic cs_counters; + struct gen_pool *cb_va_pool; + u64 cs_sequence; + u64 *dram_default_hops; + spinlock_t cs_lock; + atomic64_t dram_phys_mem; + atomic_t thread_ctx_switch_token; + u32 thread_ctx_switch_wait_token; + u32 asid; + u32 handle; }; /** @@ -1164,22 +1181,6 @@ struct hl_cs_parser { u8 contains_dma_pkt; }; -/** - * struct hl_info_cs_counters - command submission counters - * @out_of_mem_drop_cnt: dropped due to memory allocation issue - * @parsing_drop_cnt: dropped due to error in packet parsing - * @queue_full_drop_cnt: dropped due to queue full - * @device_in_reset_drop_cnt: dropped due to device in reset - * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight - */ -struct hl_cs_counters_atomic { - atomic64_t out_of_mem_drop_cnt; - atomic64_t parsing_drop_cnt; - atomic64_t queue_full_drop_cnt; - atomic64_t device_in_reset_drop_cnt; - atomic64_t max_cs_in_flight_drop_cnt; -}; - /* * MEMORY STRUCTURE */ diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 350a768309bd..1d8bea626e78 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -315,7 +315,7 @@ static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args) static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) { void __user *out = (void __user *) (uintptr_t) args->return_pointer; - struct hl_info_cs_counters cs_counters = { {0} }; + struct hl_info_cs_counters cs_counters = {0}; struct hl_device *hdev = hpriv->hdev; struct hl_cs_counters_atomic *cntr; u32 max_size = args->return_size; @@ -325,23 +325,34 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) if ((!max_size) || (!out)) return -EINVAL; - memcpy(&cs_counters.cs_counters, &hdev->aggregated_cs_counters, - sizeof(struct hl_cs_counters)); - - cs_counters.cs_counters.out_of_mem_drop_cnt = + cs_counters.total_out_of_mem_drop_cnt = atomic64_read(&cntr->out_of_mem_drop_cnt); - cs_counters.cs_counters.parsing_drop_cnt = + cs_counters.total_parsing_drop_cnt = atomic64_read(&cntr->parsing_drop_cnt); - cs_counters.cs_counters.queue_full_drop_cnt = + cs_counters.total_queue_full_drop_cnt = atomic64_read(&cntr->queue_full_drop_cnt); - cs_counters.cs_counters.device_in_reset_drop_cnt = + cs_counters.total_device_in_reset_drop_cnt = atomic64_read(&cntr->device_in_reset_drop_cnt); - cs_counters.cs_counters.max_cs_in_flight_drop_cnt = + cs_counters.total_max_cs_in_flight_drop_cnt = atomic64_read(&cntr->max_cs_in_flight_drop_cnt); - if (hpriv->ctx) - memcpy(&cs_counters.ctx_cs_counters, &hpriv->ctx->cs_counters, - sizeof(struct hl_cs_counters)); + if (hpriv->ctx) { + cs_counters.ctx_out_of_mem_drop_cnt = + atomic64_read( + &hpriv->ctx->cs_counters.out_of_mem_drop_cnt); + cs_counters.ctx_parsing_drop_cnt = + atomic64_read( + &hpriv->ctx->cs_counters.parsing_drop_cnt); + cs_counters.ctx_queue_full_drop_cnt = + atomic64_read( + &hpriv->ctx->cs_counters.queue_full_drop_cnt); + cs_counters.ctx_device_in_reset_drop_cnt = + atomic64_read( + &hpriv->ctx->cs_counters.device_in_reset_drop_cnt); + cs_counters.ctx_max_cs_in_flight_drop_cnt = + atomic64_read( + &hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt); + } return copy_to_user(out, &cs_counters, min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0; diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index d9448375beac..44155a6e557f 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -523,7 +523,7 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) hdev->asic_funcs->hw_queues_lock(hdev); if (hl_device_disabled_or_in_reset(hdev)) { - ctx->cs_counters.device_in_reset_drop_cnt++; + atomic64_inc(&ctx->cs_counters.device_in_reset_drop_cnt); atomic64_inc(&cntr->device_in_reset_drop_cnt); dev_err(hdev->dev, "device is disabled or in reset, CS rejected!\n"); @@ -557,7 +557,8 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) } if (rc) { - ctx->cs_counters.queue_full_drop_cnt++; + atomic64_inc( + &ctx->cs_counters.queue_full_drop_cnt); atomic64_inc(&cntr->queue_full_drop_cnt); goto unroll_cq_resv; } diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index ab8c9463932f..e640c9fcc932 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -1137,7 +1137,7 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev, hw_queue_prop = &hdev->asic_prop.hw_queues_props[queue_id]; job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true); if (!job) { - ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); atomic64_inc(&cntr->out_of_mem_drop_cnt); dev_err(hdev->dev, "Failed to allocate a new job\n"); return -ENOMEM; @@ -1147,7 +1147,7 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev, cb = hl_cb_kernel_create(hdev, cb_size, hdev->mmu_enable && !patched_cb); if (!cb) { - ctx->cs_counters.out_of_mem_drop_cnt++; + atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); atomic64_inc(&cntr->out_of_mem_drop_cnt); kfree(job); return -EFAULT; diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 0185311b679b..61f8f9144b54 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -407,23 +407,28 @@ struct hl_info_sync_manager { /** * struct hl_info_cs_counters - command submission counters - * @out_of_mem_drop_cnt: dropped due to memory allocation issue - * @parsing_drop_cnt: dropped due to error in packet parsing - * @queue_full_drop_cnt: dropped due to queue full - * @device_in_reset_drop_cnt: dropped due to device in reset - * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight + * @total_out_of_mem_drop_cnt: total dropped due to memory allocation issue + * @ctx_out_of_mem_drop_cnt: context dropped due to memory allocation issue + * @total_parsing_drop_cnt: total dropped due to error in packet parsing + * @ctx_parsing_drop_cnt: context dropped due to error in packet parsing + * @total_queue_full_drop_cnt: total dropped due to queue full + * @ctx_queue_full_drop_cnt: context dropped due to queue full + * @total_device_in_reset_drop_cnt: total dropped due to device in reset + * @ctx_device_in_reset_drop_cnt: context dropped due to device in reset + * @total_max_cs_in_flight_drop_cnt: total dropped due to maximum CS in-flight + * @ctx_max_cs_in_flight_drop_cnt: context dropped due to maximum CS in-flight */ -struct hl_cs_counters { - __u64 out_of_mem_drop_cnt; - __u64 parsing_drop_cnt; - __u64 queue_full_drop_cnt; - __u64 device_in_reset_drop_cnt; - __u64 max_cs_in_flight_drop_cnt; -}; - struct hl_info_cs_counters { - struct hl_cs_counters cs_counters; - struct hl_cs_counters ctx_cs_counters; + __u64 total_out_of_mem_drop_cnt; + __u64 ctx_out_of_mem_drop_cnt; + __u64 total_parsing_drop_cnt; + __u64 ctx_parsing_drop_cnt; + __u64 total_queue_full_drop_cnt; + __u64 ctx_queue_full_drop_cnt; + __u64 total_device_in_reset_drop_cnt; + __u64 ctx_device_in_reset_drop_cnt; + __u64 total_max_cs_in_flight_drop_cnt; + __u64 ctx_max_cs_in_flight_drop_cnt; }; enum gaudi_dcores { -- cgit v1.2.3 From 323b726706be3b1e547d2662dd77611b8bd82a3a Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Sun, 4 Oct 2020 09:09:19 +0300 Subject: habanalabs: fetch security indication from FW Add support for fetching security indication from FW. This indication is needed in order to skip unnecessary initializations done by FW. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 60 +++++++++++++++--- drivers/misc/habanalabs/common/habanalabs.h | 23 +++++-- drivers/misc/habanalabs/common/pci.c | 10 ++- drivers/misc/habanalabs/gaudi/gaudi.c | 15 +++-- drivers/misc/habanalabs/goya/goya.c | 17 +++-- drivers/misc/habanalabs/include/common/cpucp_if.h | 30 +++++++++ .../misc/habanalabs/include/common/hl_boot_if.h | 74 ++++++++++++++++++++++ .../misc/habanalabs/include/gaudi/gaudi_reg_map.h | 2 + .../misc/habanalabs/include/goya/goya_reg_map.h | 2 + 9 files changed, 204 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index 647606f6cc2a..8de6a8690b1b 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -448,9 +448,10 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy) return rc; } -static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg) +static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg, + u32 cpu_security_boot_status_reg) { - u32 err_val; + u32 err_val, security_val; /* Some of the firmware status codes are deprecated in newer f/w * versions. In those versions, the errors are reported @@ -485,6 +486,11 @@ static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg) if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) dev_err(hdev->dev, "Device boot error - NIC F/W initialization failed\n"); + + security_val = RREG32(cpu_security_boot_status_reg); + if (security_val & CPU_BOOT_DEV_STS0_ENABLED) + dev_info(hdev->dev, "Device security status %#x\n", + security_val); } static void detect_cpu_boot_status(struct hl_device *hdev, u32 status) @@ -537,10 +543,12 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status) } } -int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg, - u32 boot_err0_reg, u32 timeout) +int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg, + u32 cpu_security_boot_status_reg, u32 boot_err0_reg, + u32 timeout) { - u32 status; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u32 status, security_status; int rc; if (!hdev->cpu_enable) @@ -570,19 +578,43 @@ int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg, if (rc) { dev_err(hdev->dev, "Failed to read preboot version\n"); detect_cpu_boot_status(hdev, status); - fw_read_errors(hdev, boot_err0_reg); + fw_read_errors(hdev, boot_err0_reg, + cpu_security_boot_status_reg); return -EIO; } hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT); + security_status = RREG32(cpu_security_boot_status_reg); + + /* We read security status multiple times during boot: + * 1. preboot - we check if fw security feature is supported + * 2. boot cpu - we get boot cpu security status + * 3. FW application - we get FW application security status + * + * Preboot: + * Check security status bit (CPU_BOOT_DEV_STS0_ENABLED), if it is set + * check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN) + */ + if (security_status & CPU_BOOT_DEV_STS0_ENABLED) { + hdev->asic_prop.fw_security_status_valid = 1; + prop->fw_security_disabled = + !(security_status & CPU_BOOT_DEV_STS0_SECURITY_EN); + } else { + hdev->asic_prop.fw_security_status_valid = 0; + prop->fw_security_disabled = true; + } + + dev_info(hdev->dev, "firmware-level security is %s\n", + prop->fw_security_disabled ? "disabled" : "enabled"); + return 0; } int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, u32 msg_to_cpu_reg, u32 cpu_msg_status_reg, - u32 boot_err0_reg, bool skip_bmc, - u32 cpu_timeout, u32 boot_fit_timeout) + u32 cpu_security_boot_status_reg, u32 boot_err0_reg, + bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout) { u32 status; int rc; @@ -652,6 +684,11 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, /* Read U-Boot version now in case we will later fail */ hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT); + /* Read boot_cpu security bits */ + if (hdev->asic_prop.fw_security_status_valid) + hdev->asic_prop.fw_boot_cpu_security_map = + RREG32(cpu_security_boot_status_reg); + if (rc) { detect_cpu_boot_status(hdev, status); rc = -EIO; @@ -720,10 +757,15 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, goto out; } + /* Read FW application security bits */ + if (hdev->asic_prop.fw_security_status_valid) + hdev->asic_prop.fw_app_security_map = + RREG32(cpu_security_boot_status_reg); + dev_info(hdev->dev, "Successfully loaded firmware to device\n"); out: - fw_read_errors(hdev, boot_err0_reg); + fw_read_errors(hdev, boot_err0_reg, cpu_security_boot_status_reg); return rc; } diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index b1f20f225ff9..0b2773fc4768 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -372,6 +372,12 @@ struct hl_mmu_properties { * @cb_pool_cb_size: size of each CB in the CB pool. * @max_pending_cs: maximum of concurrent pending command submissions * @max_queues: maximum amount of queues in the system + * @fw_boot_cpu_security_map: bitmap representation of boot cpu security status + * reported by FW, bit description can be found in + * CPU_BOOT_DEV_STS* + * @fw_app_security_map: bitmap representation of application security status + * reported by FW, bit description can be found in + * CPU_BOOT_DEV_STS* * @collective_first_sob: first sync object available for collective use * @collective_first_mon: first monitor available for collective use * @sync_stream_first_sob: first sync object available for sync stream use @@ -382,6 +388,8 @@ struct hl_mmu_properties { * @completion_queues_count: number of completion queues. * @fw_security_disabled: true if security measures are disabled in firmware, * false otherwise + * @fw_security_status_valid: security status bits are valid and can be fetched + * from BOOT_DEV_STS0 */ struct asic_fixed_properties { struct hw_queue_properties *hw_queues_props; @@ -426,6 +434,8 @@ struct asic_fixed_properties { u32 cb_pool_cb_size; u32 max_pending_cs; u32 max_queues; + u32 fw_boot_cpu_security_map; + u32 fw_app_security_map; u16 collective_first_sob; u16 collective_first_mon; u16 sync_stream_first_sob; @@ -435,6 +445,7 @@ struct asic_fixed_properties { u8 tpc_enabled_mask; u8 completion_queues_count; u8 fw_security_disabled; + u8 fw_security_status_valid; }; /** @@ -2050,10 +2061,11 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy); int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, u32 msg_to_cpu_reg, u32 cpu_msg_status_reg, - u32 boot_err0_reg, bool skip_bmc, - u32 cpu_timeout, u32 boot_fit_timeout); -int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg, - u32 boot_err0_reg, u32 timeout); + u32 cpu_security_boot_status_reg, u32 boot_err0_reg, + bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout); +int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg, + u32 cpu_security_boot_status_reg, u32 boot_err0_reg, + u32 timeout); int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3], bool is_wc[3]); @@ -2063,7 +2075,8 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region, int hl_pci_set_outbound_region(struct hl_device *hdev, struct hl_outbound_pci_region *pci_region); int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg, - u32 boot_err0_reg, u32 preboot_ver_timeout); + u32 cpu_security_boot_status_reg, u32 boot_err0_reg, + u32 preboot_ver_timeout); void hl_pci_fini(struct hl_device *hdev); long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr); diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c index 4327e5704ebb..211f3190f8d7 100644 --- a/drivers/misc/habanalabs/common/pci.c +++ b/drivers/misc/habanalabs/common/pci.c @@ -339,6 +339,8 @@ static int hl_pci_set_dma_mask(struct hl_device *hdev) * hl_pci_init() - PCI initialization code. * @hdev: Pointer to hl_device structure. * @cpu_boot_status_reg: status register of the device's CPU + * @cpu_security_boot_status_reg: status register of device's CPU security + * configuration * @boot_err0_reg: boot error register of the device's CPU * @preboot_ver_timeout: how much to wait before bailing out on reading * the preboot version @@ -348,7 +350,8 @@ static int hl_pci_set_dma_mask(struct hl_device *hdev) * Return: 0 on success, non-zero for failure. */ int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg, - u32 boot_err0_reg, u32 preboot_ver_timeout) + u32 cpu_security_boot_status_reg, u32 boot_err0_reg, + u32 preboot_ver_timeout) { struct pci_dev *pdev = hdev->pdev; int rc; @@ -384,8 +387,9 @@ int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg, * version to determine whether we run with a security-enabled firmware * The check will be done in each ASIC's specific code */ - rc = hl_fw_read_preboot_ver(hdev, cpu_boot_status_reg, boot_err0_reg, - preboot_ver_timeout); + rc = hl_fw_read_preboot_status(hdev, cpu_boot_status_reg, + cpu_security_boot_status_reg, boot_err0_reg, + preboot_ver_timeout); if (rc) goto unmap_pci_bars; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index e640c9fcc932..d5ca52ae854e 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -514,6 +514,10 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) prop->sync_stream_first_mon + (num_sync_stream_queues * HL_RSVD_MONS); + /* disable fw security for now, set it in a later stage */ + prop->fw_security_disabled = true; + prop->fw_security_status_valid = false; + return 0; } @@ -638,13 +642,13 @@ static int gaudi_early_init(struct hl_device *hdev) prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID); rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, - mmCPU_BOOT_ERR0, GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC); + mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0, + GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC); if (rc) goto free_queue_props; - /* GAUDI Firmware does not yet support security */ - prop->fw_security_disabled = true; - dev_info(hdev->dev, "firmware-level security is disabled\n"); + dev_info(hdev->dev, "firmware-level security is %s\n", + hdev->asic_prop.fw_security_disabled ? "disabled" : "enabled"); return 0; @@ -2315,7 +2319,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev) int tpc_id, i; gaudi_init_e2e(hdev); - gaudi_init_hbm_cred(hdev); hdev->asic_funcs->disable_clock_gating(hdev); @@ -3596,7 +3599,7 @@ static int gaudi_init_cpu(struct hl_device *hdev) rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, mmCPU_CMD_STATUS_TO_HOST, - mmCPU_BOOT_ERR0, + mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0, !hdev->bmc_enable, GAUDI_CPU_TIMEOUT_USEC, GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index ec5dd159f54e..9fc141976052 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -456,6 +456,10 @@ int goya_get_fixed_properties(struct hl_device *hdev) prop->max_pending_cs = GOYA_MAX_PENDING_CS; + /* disable fw security for now, set it in a later stage */ + prop->fw_security_disabled = true; + prop->fw_security_status_valid = false; + return 0; } @@ -601,14 +605,11 @@ static int goya_early_init(struct hl_device *hdev) prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID); rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, - mmCPU_BOOT_ERR0, GOYA_BOOT_FIT_REQ_TIMEOUT_USEC); + mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0, + GOYA_BOOT_FIT_REQ_TIMEOUT_USEC); if (rc) goto free_queue_props; - /* Goya Firmware does not support security */ - prop->fw_security_disabled = true; - dev_info(hdev->dev, "firmware-level security is disabled\n"); - if (!hdev->pldm) { val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS); if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK) @@ -616,6 +617,9 @@ static int goya_early_init(struct hl_device *hdev) "PCI strap is not configured correctly, PCI bus errors may occur\n"); } + dev_info(hdev->dev, "firmware-level security is %s\n", + hdev->asic_prop.fw_security_disabled ? "disabled" : "enabled"); + return 0; free_queue_props: @@ -2397,7 +2401,8 @@ static int goya_init_cpu(struct hl_device *hdev) rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, - mmCPU_CMD_STATUS_TO_HOST, mmCPU_BOOT_ERR0, + mmCPU_CMD_STATUS_TO_HOST, + mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0, false, GOYA_CPU_TIMEOUT_USEC, GOYA_BOOT_FIT_REQ_TIMEOUT_USEC); diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h index 782b8b8636be..1c1e2b394457 100644 --- a/drivers/misc/habanalabs/include/common/cpucp_if.h +++ b/drivers/misc/habanalabs/include/common/cpucp_if.h @@ -420,6 +420,29 @@ enum cpucp_card_types { cpucp_card_type_pmc }; +#define CPUCP_SEC_CONF_ENABLED_SHIFT 0 +#define CPUCP_SEC_CONF_ENABLED_MASK 0x00000001 + +#define CPUCP_SEC_CONF_FLASH_WP_SHIFT 1 +#define CPUCP_SEC_CONF_FLASH_WP_MASK 0x00000002 + +#define CPUCP_SEC_CONF_EEPROM_WP_SHIFT 2 +#define CPUCP_SEC_CONF_EEPROM_WP_MASK 0x00000004 + +/** + * struct cpucp_security_info - Security information. + * @config: configuration bit field + * @keys_num: number of stored keys + * @revoked_keys: revoked keys bit field + * @min_svn: minimal security version + */ +struct cpucp_security_info { + __u8 config; + __u8 keys_num; + __u8 revoked_keys; + __u8 min_svn; +}; + /** * struct cpucp_info - Info from CpuCP that is necessary to the host's driver * @sensors: available sensors description. @@ -435,6 +458,7 @@ enum cpucp_card_types { * @cpucp_version: CpuCP S/W version. * @dram_size: available DRAM size. * @card_name: card name that will be displayed in HWMON subsystem on the host + * @sec_info: security information */ struct cpucp_info { struct cpucp_sensor sensors[CPUCP_MAX_SENSORS]; @@ -450,6 +474,12 @@ struct cpucp_info { __le32 reserved2; __le64 dram_size; char card_name[CARD_NAME_MAX_LEN]; + __le64 reserved3; + __le64 reserved4; + __u8 reserved5; + __u8 pad[7]; + struct cpucp_security_info sec_info; + __le32 reserved6; }; struct cpucp_mac_addr { diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h index bb67cafc6e00..d928ad93cd80 100644 --- a/drivers/misc/habanalabs/include/common/hl_boot_if.h +++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h @@ -53,6 +53,9 @@ * trust), boot authentication (chain of * trust), data packets authentication. * + * CPU_BOOT_ERR0_EFUSE_FAIL Reading from eFuse failed. + * The PCI device ID might be wrong. + * * CPU_BOOT_ERR0_ENABLED Error registers enabled. * This is a main indication that the * running FW populates the error @@ -68,8 +71,79 @@ #define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << 6) #define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << 7) #define CPU_BOOT_ERR0_SECURITY_FAIL (1 << 8) +#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << 9) #define CPU_BOOT_ERR0_ENABLED (1 << 31) +/* + * BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers + * + * CPU_BOOT_DEV_STS0_SECURITY_EN Security is Enabled. + * This is an indication for security + * enabled in FW, which means that + * all conditions for security are met: + * device is indicated as security enabled, + * registers are protected, and device + * uses keys for image verification. + * Initialized at: preboot + * + * CPU_BOOT_DEV_STS0_DEBUG_EN Debug is enabled. + * Enabled when JTAG or DEBUG is enabled + * in FW. + * Initialized at: preboot + * + * CPU_BOOT_DEV_STS0_WATCHDOG_EN Watchdog is enabled. + * Watchdog is enabled in FW. + * Initialized at: preboot + * + * CPU_BOOT_DEV_STS0_DRAM_INIT_EN DRAM initialization is enabled. + * DRAM initialization has been done in FW. + * Initialized at: u-boot + * + * CPU_BOOT_DEV_STS0_BMC_WAIT_EN Waiting for BMC data enabled. + * If set, it means that during boot, + * FW waited for BMC data. + * Initialized at: u-boot + * + * CPU_BOOT_DEV_STS0_E2E_CRED_EN E2E credits initialized. + * FW initialized E2E credits. + * Initialized at: u-boot + * + * CPU_BOOT_DEV_STS0_HBM_CRED_EN HBM credits initialized. + * FW initialized HBM credits. + * Initialized at: u-boot + * + * CPU_BOOT_DEV_STS0_RL_EN Rate limiter initialized. + * FW initialized rate limiter. + * Initialized at: u-boot + * + * CPU_BOOT_DEV_STS0_SRAM_SCR_EN SRAM scrambler enabled. + * FW initialized SRAM scrambler. + * Initialized at: linux + * + * CPU_BOOT_DEV_STS0_DRAM_SCR_EN DRAM scrambler enabled. + * FW initialized DRAM scrambler. + * Initialized at: u-boot + * + * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled. + * This is a main indication that the + * running FW populates the device status + * register. Meaning the device status + * bits are not garbage, but actual + * statuses. + * Initialized at: preboot + */ +#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << 0) +#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << 1) +#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << 2) +#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << 3) +#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << 4) +#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << 5) +#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << 6) +#define CPU_BOOT_DEV_STS0_RL_EN (1 << 7) +#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << 8) +#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9) +#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31) + enum cpu_boot_status { CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */ CPU_BOOT_STATUS_IN_WFE = 1, diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h index 977fb341a6e7..137afedf5f15 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h @@ -12,6 +12,8 @@ * PSOC scratch-pad registers */ #define mmHW_STATE mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 +#define mmCPU_BOOT_DEV_STS0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 +#define mmCPU_BOOT_DEV_STS1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 #define mmFUSE_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 #define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 #define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/misc/habanalabs/include/goya/goya_reg_map.h index e56124265a05..f3ab282cafa4 100644 --- a/drivers/misc/habanalabs/include/goya/goya_reg_map.h +++ b/drivers/misc/habanalabs/include/goya/goya_reg_map.h @@ -22,6 +22,8 @@ #define mmCPU_CQ_BASE_ADDR_LOW mmPSOC_GLOBAL_CONF_SCRATCHPAD_8 #define mmCPU_CQ_BASE_ADDR_HIGH mmPSOC_GLOBAL_CONF_SCRATCHPAD_9 #define mmCPU_CQ_LENGTH mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 +#define mmCPU_BOOT_DEV_STS0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 +#define mmCPU_BOOT_DEV_STS1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 #define mmFUSE_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 #define mmCPU_CMD_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 #define mmCPU_BOOT_ERR0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 -- cgit v1.2.3 From c692dec70379526b0bb09f94467bbd456859dcad Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Sun, 4 Oct 2020 17:34:37 +0300 Subject: habanalabs/gaudi: add support for FW security Skip relevant HW configurations once FW security is enabled because these configurations are being performed by FW. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 123 ++++++++++++++++--------- drivers/misc/habanalabs/gaudi/gaudi_security.c | 83 +++++++++-------- 2 files changed, 128 insertions(+), 78 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index d5ca52ae854e..a7321c789d39 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -1406,7 +1406,8 @@ static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev) hdev->cpu_pci_msb_addr = GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address); - GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address); + if (hdev->asic_prop.fw_security_disabled) + GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address); free_dma_mem_arr: for (j = 0 ; j < i ; j++) @@ -1560,8 +1561,9 @@ static int gaudi_sw_init(struct hl_device *hdev) free_cpu_accessible_dma_pool: gen_pool_destroy(hdev->cpu_accessible_dma_pool); free_cpu_dma_mem: - GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, - hdev->cpu_pci_msb_addr); + if (hdev->asic_prop.fw_security_disabled) + GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, + hdev->cpu_pci_msb_addr); hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem, @@ -1581,8 +1583,10 @@ static int gaudi_sw_fini(struct hl_device *hdev) gen_pool_destroy(hdev->cpu_accessible_dma_pool); - GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, + if (hdev->asic_prop.fw_security_disabled) + GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, hdev->cpu_pci_msb_addr); + hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem, @@ -1768,6 +1772,14 @@ static void gaudi_init_scrambler_sram(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; + if (!hdev->asic_prop.fw_security_disabled) + return; + + if (hdev->asic_prop.fw_security_status_valid && + (hdev->asic_prop.fw_app_security_map & + CPU_BOOT_DEV_STS0_SRAM_SCR_EN)) + return; + if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER) return; @@ -1832,6 +1844,14 @@ static void gaudi_init_scrambler_hbm(struct hl_device *hdev) { struct gaudi_device *gaudi = hdev->asic_specific; + if (!hdev->asic_prop.fw_security_disabled) + return; + + if (hdev->asic_prop.fw_security_status_valid && + (hdev->asic_prop.fw_boot_cpu_security_map & + CPU_BOOT_DEV_STS0_DRAM_SCR_EN)) + return; + if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER) return; @@ -1894,6 +1914,14 @@ static void gaudi_init_scrambler_hbm(struct hl_device *hdev) static void gaudi_init_e2e(struct hl_device *hdev) { + if (!hdev->asic_prop.fw_security_disabled) + return; + + if (hdev->asic_prop.fw_security_status_valid && + (hdev->asic_prop.fw_boot_cpu_security_map & + CPU_BOOT_DEV_STS0_E2E_CRED_EN)) + return; + WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 247 >> 3); WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 785 >> 3); WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 49); @@ -2261,6 +2289,14 @@ static void gaudi_init_hbm_cred(struct hl_device *hdev) { uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd; + if (!hdev->asic_prop.fw_security_disabled) + return; + + if (hdev->asic_prop.fw_security_status_valid && + (hdev->asic_prop.fw_boot_cpu_security_map & + CPU_BOOT_DEV_STS0_HBM_CRED_EN)) + return; + hbm0_wr = 0x33333333; hbm0_rd = 0x77777777; hbm1_wr = 0x55555555; @@ -3594,7 +3630,8 @@ static int gaudi_init_cpu(struct hl_device *hdev) * The device CPU works with 40 bits addresses. * This register sets the extension to 50 bits. */ - WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr); + if (hdev->asic_prop.fw_security_disabled) + WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr); rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, @@ -3679,17 +3716,19 @@ static void gaudi_pre_hw_init(struct hl_device *hdev) /* Perform read from the device to make sure device is up */ RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); - /* Set the access through PCI bars (Linux driver only) as - * secured - */ - WREG32(mmPCIE_WRAP_LBW_PROT_OVR, - (PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK | - PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK)); + if (hdev->asic_prop.fw_security_disabled) { + /* Set the access through PCI bars (Linux driver only) as + * secured + */ + WREG32(mmPCIE_WRAP_LBW_PROT_OVR, + (PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK | + PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK)); - /* Perform read to flush the waiting writes to ensure - * configuration was set in the device - */ - RREG32(mmPCIE_WRAP_LBW_PROT_OVR); + /* Perform read to flush the waiting writes to ensure + * configuration was set in the device + */ + RREG32(mmPCIE_WRAP_LBW_PROT_OVR); + } /* * Let's mark in the H/W that we have reached this point. We check @@ -3698,32 +3737,33 @@ static void gaudi_pre_hw_init(struct hl_device *hdev) * cleared by the H/W upon H/W reset */ WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY); + if (hdev->asic_prop.fw_security_disabled) { + /* Configure the reset registers. Must be done as early as + * possible in case we fail during H/W initialization + */ + WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H, + (CFG_RST_H_DMA_MASK | + CFG_RST_H_MME_MASK | + CFG_RST_H_SM_MASK | + CFG_RST_H_TPC_7_MASK)); - /* Configure the reset registers. Must be done as early as possible - * in case we fail during H/W initialization - */ - WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H, - (CFG_RST_H_DMA_MASK | - CFG_RST_H_MME_MASK | - CFG_RST_H_SM_MASK | - CFG_RST_H_TPC_7_MASK)); - - WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK); - - WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H, - (CFG_RST_H_HBM_MASK | - CFG_RST_H_TPC_7_MASK | - CFG_RST_H_NIC_MASK | - CFG_RST_H_SM_MASK | - CFG_RST_H_DMA_MASK | - CFG_RST_H_MME_MASK | - CFG_RST_H_CPU_MASK | - CFG_RST_H_MMU_MASK)); - - WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L, - (CFG_RST_L_IF_MASK | - CFG_RST_L_PSOC_MASK | - CFG_RST_L_TPC_MASK)); + WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK); + + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H, + (CFG_RST_H_HBM_MASK | + CFG_RST_H_TPC_7_MASK | + CFG_RST_H_NIC_MASK | + CFG_RST_H_SM_MASK | + CFG_RST_H_DMA_MASK | + CFG_RST_H_MME_MASK | + CFG_RST_H_CPU_MASK | + CFG_RST_H_MMU_MASK)); + + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L, + (CFG_RST_L_IF_MASK | + CFG_RST_L_PSOC_MASK | + CFG_RST_L_TPC_MASK)); + } } static int gaudi_hw_init(struct hl_device *hdev) @@ -3839,7 +3879,8 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2); /* Restart BTL/BLR upon hard-reset */ - WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1); + if (hdev->asic_prop.fw_security_disabled) + WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1); WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST, 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT); diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c index 8a921ab56557..e10181692d0b 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi_security.c +++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c @@ -1448,21 +1448,23 @@ static void gaudi_init_dma_protection_bits(struct hl_device *hdev) u32 pb_addr, mask; u8 word_offset; - gaudi_pb_set_block(hdev, mmDMA_IF_E_S_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH0_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH1_BASE); - gaudi_pb_set_block(hdev, mmDMA_E_PLL_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_BASE); - - gaudi_pb_set_block(hdev, mmDMA_IF_W_N_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH0_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH1_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_BASE); - - gaudi_pb_set_block(hdev, mmDMA_IF_E_N_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH0_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH1_BASE); - gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_BASE); + if (hdev->asic_prop.fw_security_disabled) { + gaudi_pb_set_block(hdev, mmDMA_IF_E_S_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH0_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_CH1_BASE); + gaudi_pb_set_block(hdev, mmDMA_E_PLL_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_S_DOWN_BASE); + + gaudi_pb_set_block(hdev, mmDMA_IF_W_N_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH0_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_CH1_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_W_N_DOWN_BASE); + + gaudi_pb_set_block(hdev, mmDMA_IF_E_N_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH0_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_CH1_BASE); + gaudi_pb_set_block(hdev, mmDMA_IF_E_N_DOWN_BASE); + } WREG32(mmDMA0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); WREG32(mmDMA1_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); @@ -9133,14 +9135,16 @@ static void gaudi_init_tpc_protection_bits(struct hl_device *hdev) u32 pb_addr, mask; u8 word_offset; - gaudi_pb_set_block(hdev, mmTPC0_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC1_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC2_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC3_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC4_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC5_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC6_E2E_CRED_BASE); - gaudi_pb_set_block(hdev, mmTPC7_E2E_CRED_BASE); + if (hdev->asic_prop.fw_security_disabled) { + gaudi_pb_set_block(hdev, mmTPC0_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC1_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC2_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC3_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC4_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC5_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC6_E2E_CRED_BASE); + gaudi_pb_set_block(hdev, mmTPC7_E2E_CRED_BASE); + } WREG32(mmTPC0_QM_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); WREG32(mmTPC0_CFG_BASE - CFG_BASE + PROT_BITS_OFFS + 0x7C, 0); @@ -12822,11 +12826,13 @@ static void gaudi_init_protection_bits(struct hl_device *hdev) * secured */ - gaudi_pb_set_block(hdev, mmIF_E_PLL_BASE); - gaudi_pb_set_block(hdev, mmMESH_W_PLL_BASE); - gaudi_pb_set_block(hdev, mmSRAM_W_PLL_BASE); - gaudi_pb_set_block(hdev, mmMESH_E_PLL_BASE); - gaudi_pb_set_block(hdev, mmSRAM_E_PLL_BASE); + if (hdev->asic_prop.fw_security_disabled) { + gaudi_pb_set_block(hdev, mmIF_E_PLL_BASE); + gaudi_pb_set_block(hdev, mmMESH_W_PLL_BASE); + gaudi_pb_set_block(hdev, mmSRAM_W_PLL_BASE); + gaudi_pb_set_block(hdev, mmMESH_E_PLL_BASE); + gaudi_pb_set_block(hdev, mmSRAM_E_PLL_BASE); + } gaudi_init_dma_protection_bits(hdev); @@ -13025,17 +13031,20 @@ void gaudi_init_security(struct hl_device *hdev) * property configuration of MME SBAB and ACC to be non-privileged and * non-secured */ - WREG32(mmMME0_SBAB_PROT, 0x2); - WREG32(mmMME0_ACC_PROT, 0x2); - WREG32(mmMME1_SBAB_PROT, 0x2); - WREG32(mmMME1_ACC_PROT, 0x2); - WREG32(mmMME2_SBAB_PROT, 0x2); - WREG32(mmMME2_ACC_PROT, 0x2); - WREG32(mmMME3_SBAB_PROT, 0x2); - WREG32(mmMME3_ACC_PROT, 0x2); + if (hdev->asic_prop.fw_security_disabled) { + WREG32(mmMME0_SBAB_PROT, 0x2); + WREG32(mmMME0_ACC_PROT, 0x2); + WREG32(mmMME1_SBAB_PROT, 0x2); + WREG32(mmMME1_ACC_PROT, 0x2); + WREG32(mmMME2_SBAB_PROT, 0x2); + WREG32(mmMME2_ACC_PROT, 0x2); + WREG32(mmMME3_SBAB_PROT, 0x2); + WREG32(mmMME3_ACC_PROT, 0x2); + } /* On RAZWI, 0 will be returned from RR and 0xBABA0BAD from PB */ - WREG32(0xC01B28, 0x1); + if (hdev->asic_prop.fw_security_disabled) + WREG32(0xC01B28, 0x1); gaudi_init_range_registers_lbw(hdev); -- cgit v1.2.3 From 03df136bc5dd5be111c6c4b8dc265615e190fe75 Mon Sep 17 00:00:00 2001 From: farah kassabri Date: Wed, 6 May 2020 11:17:38 +0300 Subject: habanalabs/gaudi: scrub all memory upon closing FD In cases of multi-tenants, administrators may want to prevent data leakage between users running on the same device one after another. To do that the driver can scrub the internal memory (both SRAM and DRAM) after a user finish to use the memory. Because in GAUDI the driver allows only one application to use the device at a time, it can scrub the memory when user app close FD. In future devices where we have MMU on the DRAM, we can scrub the DRAM memory with a finer granularity (page granularity) when the user allocates the memory. This feature is not supported in Goya. To allow users that want to debug their applications, we add a kernel module parameter to load the driver with this feature disabled. Signed-off-by: farah kassabri Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/context.c | 3 + drivers/misc/habanalabs/common/habanalabs.h | 5 + drivers/misc/habanalabs/common/habanalabs_drv.c | 6 ++ drivers/misc/habanalabs/common/memory.c | 23 ++++- drivers/misc/habanalabs/gaudi/gaudi.c | 131 +++++++++++++++++++++--- drivers/misc/habanalabs/goya/goya.c | 6 ++ drivers/misc/habanalabs/goya/goyaP.h | 1 + 7 files changed, 161 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c index 2077bbe3606a..f65e6559149b 100644 --- a/drivers/misc/habanalabs/common/context.c +++ b/drivers/misc/habanalabs/common/context.c @@ -45,6 +45,9 @@ static void hl_ctx_fini(struct hl_ctx *ctx) hl_vm_ctx_fini(ctx); hl_asid_free(hdev, ctx->asid); + /* Scrub both SRAM and DRAM */ + hdev->asic_funcs->scrub_device_mem(hdev, 0, 0); + if ((!hdev->pldm) && (hdev->pdev) && (!hdev->asic_funcs->is_device_idle(hdev, &idle_mask, NULL))) diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 0b2773fc4768..f7a786dba9bc 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -746,6 +746,7 @@ enum div_select_defs { * dma_free_coherent(). This is ASIC function because * its implementation is not trivial when the driver * is loaded in simulation mode (not upstreamed). + * @scrub_device_mem: Scrub device memory given an address and size * @get_int_queue_base: get the internal queue base address. * @test_queues: run simple test on all queues for sanity check. * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool. @@ -837,6 +838,7 @@ struct hl_asic_funcs { dma_addr_t *dma_handle, gfp_t flag); void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size, void *cpu_addr, dma_addr_t dma_handle); + int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size); void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id, dma_addr_t *dma_handle, u16 *queue_len); int (*test_queues)(struct hl_device *hdev); @@ -1698,6 +1700,8 @@ struct hl_mmu_funcs { * otherwise. * @dram_supports_virtual_memory: is MMU enabled towards DRAM. * @dram_default_page_mapping: is DRAM default page mapping enabled. + * @memory_scrub: true to perform device memory scrub in various locations, + * such as context-switch, context close, page free, etc. * @pmmu_huge_range: is a different virtual addresses range used for PMMU with * huge pages. * @init_done: is the initialization of the device done. @@ -1802,6 +1806,7 @@ struct hl_device { u8 reset_on_lockup; u8 dram_supports_virtual_memory; u8 dram_default_page_mapping; + u8 memory_scrub; u8 pmmu_huge_range; u8 init_done; u8 device_cpu_disabled; diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c index 4c49d6cefa98..20458bd82c5a 100644 --- a/drivers/misc/habanalabs/common/habanalabs_drv.c +++ b/drivers/misc/habanalabs/common/habanalabs_drv.c @@ -29,6 +29,7 @@ static DEFINE_MUTEX(hl_devs_idr_lock); static int timeout_locked = 5; static int reset_on_lockup = 1; +static int memory_scrub = 1; module_param(timeout_locked, int, 0444); MODULE_PARM_DESC(timeout_locked, @@ -38,6 +39,10 @@ module_param(reset_on_lockup, int, 0444); MODULE_PARM_DESC(reset_on_lockup, "Do device reset on lockup (0 = no, 1 = yes, default yes)"); +module_param(memory_scrub, int, 0444); +MODULE_PARM_DESC(memory_scrub, + "Scrub device memory in various states (0 = no, 1 = yes, default yes)"); + #define PCI_VENDOR_ID_HABANALABS 0x1da3 #define PCI_IDS_GOYA 0x0001 @@ -284,6 +289,7 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev, hdev->major = hl_major; hdev->reset_on_lockup = reset_on_lockup; + hdev->memory_scrub = memory_scrub; hdev->pldm = 0; set_driver_behavior_per_device(hdev); diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index f885812d9939..75e269bc42a7 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -46,7 +46,7 @@ * @ret_handle : result handle * * This function does the following: - * - Allocate the requested size rounded up to 2MB pages + * - Allocate the requested size rounded up to 'dram_page_size' pages * - Return unique handle */ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, @@ -81,6 +81,16 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, num_pgs, total_size); return -ENOMEM; } + + if (hdev->memory_scrub) { + rc = hdev->asic_funcs->scrub_device_mem(hdev, paddr, + total_size); + if (rc) { + dev_err(hdev->dev, + "Failed to scrub contiguous device memory\n"); + goto pages_pack_err; + } + } } phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); @@ -118,6 +128,17 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, goto page_err; } + if (hdev->memory_scrub) { + rc = hdev->asic_funcs->scrub_device_mem(hdev, + phys_pg_pack->pages[i], + page_size); + if (rc) { + dev_err(hdev->dev, + "Failed to scrub device memory\n"); + goto page_err; + } + } + num_curr_pgs++; } } diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index a7321c789d39..f5b211416e7f 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -103,6 +103,8 @@ BIT(GAUDI_ENGINE_ID_MME_2) |\ GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0)) +#define HBM_SCRUBBING_TIMEOUT_US 1000000 /* 1s */ + static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3", "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3", @@ -4389,6 +4391,121 @@ static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size, dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle); } +static int gaudi_hbm_scrubbing(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 cur_addr = DRAM_BASE_ADDR_USER; + u32 val; + u32 chunk_size; + int rc, dma_id; + + while (cur_addr < prop->dram_end_address) { + for (dma_id = 0 ; dma_id < DMA_NUMBER_OF_CHANNELS ; dma_id++) { + u32 dma_offset = dma_id * DMA_CORE_OFFSET; + + chunk_size = + min((u64)SZ_2G, prop->dram_end_address - cur_addr); + + dev_dbg(hdev->dev, + "Doing HBM scrubbing for 0x%09llx - 0x%09llx\n", + cur_addr, cur_addr + chunk_size); + + WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0); + WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0); + WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset, + lower_32_bits(cur_addr)); + WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset, + upper_32_bits(cur_addr)); + WREG32(mmDMA0_CORE_DST_TSIZE_0 + dma_offset, + chunk_size); + WREG32(mmDMA0_CORE_COMMIT + dma_offset, + ((1 << DMA0_CORE_COMMIT_LIN_SHIFT) | + (1 << DMA0_CORE_COMMIT_MEM_SET_SHIFT))); + + cur_addr += chunk_size; + + if (cur_addr == prop->dram_end_address) + break; + } + + for (dma_id = 0 ; dma_id < DMA_NUMBER_OF_CHANNELS ; dma_id++) { + u32 dma_offset = dma_id * DMA_CORE_OFFSET; + + rc = hl_poll_timeout( + hdev, + mmDMA0_CORE_STS0 + dma_offset, + val, + ((val & DMA0_CORE_STS0_BUSY_MASK) == 0), + 1000, + HBM_SCRUBBING_TIMEOUT_US); + + if (rc) { + dev_err(hdev->dev, + "DMA Timeout during HBM scrubbing of DMA #%d\n", + dma_id); + return -EIO; + } + } + } + + return 0; +} + +static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct gaudi_device *gaudi = hdev->asic_specific; + u64 idle_mask = 0; + int rc = 0; + u64 val = 0; + + if (!hdev->memory_scrub) + return 0; + + if (!addr && !size) { + /* Wait till device is idle */ + rc = hl_poll_timeout( + hdev, + mmDMA0_CORE_STS0/* dummy */, + val/* dummy */, + (hdev->asic_funcs->is_device_idle(hdev, + &idle_mask, NULL)), + 1000, + HBM_SCRUBBING_TIMEOUT_US); + if (rc) { + dev_err(hdev->dev, "waiting for idle timeout\n"); + return -EIO; + } + + /* Scrub SRAM */ + addr = prop->sram_user_base_address; + size = hdev->pldm ? 0x10000 : + (prop->sram_size - SRAM_USER_BASE_OFFSET); + val = 0x7777777777777777ull; + + rc = gaudi_memset_device_memory(hdev, addr, size, val); + if (rc) { + dev_err(hdev->dev, + "Failed to clear SRAM in mem scrub all\n"); + return rc; + } + + mutex_lock(&gaudi->clk_gate_mutex); + hdev->asic_funcs->disable_clock_gating(hdev); + + /* Scrub HBM using all DMA channels in parallel */ + rc = gaudi_hbm_scrubbing(hdev); + if (rc) + dev_err(hdev->dev, + "Failed to clear HBM in mem scrub all\n"); + + hdev->asic_funcs->set_clock_gating(hdev); + mutex_unlock(&gaudi->clk_gate_mutex); + } + + return rc; +} + static void *gaudi_get_int_queue_base(struct hl_device *hdev, u32 queue_id, dma_addr_t *dma_handle, u16 *queue_len) @@ -5489,19 +5606,6 @@ static void gaudi_restore_user_registers(struct hl_device *hdev) static int gaudi_context_switch(struct hl_device *hdev, u32 asid) { - struct asic_fixed_properties *prop = &hdev->asic_prop; - u64 addr = prop->sram_user_base_address; - u32 size = hdev->pldm ? 0x10000 : - (prop->sram_size - SRAM_USER_BASE_OFFSET); - u64 val = 0x7777777777777777ull; - int rc; - - rc = gaudi_memset_device_memory(hdev, addr, size, val); - if (rc) { - dev_err(hdev->dev, "Failed to clear SRAM in context switch\n"); - return rc; - } - gaudi_restore_user_registers(hdev); return 0; @@ -8099,6 +8203,7 @@ static const struct hl_asic_funcs gaudi_funcs = { .pqe_write = gaudi_pqe_write, .asic_dma_alloc_coherent = gaudi_dma_alloc_coherent, .asic_dma_free_coherent = gaudi_dma_free_coherent, + .scrub_device_mem = gaudi_scrub_device_mem, .get_int_queue_base = gaudi_get_int_queue_base, .test_queues = gaudi_test_queues, .asic_dma_pool_zalloc = gaudi_dma_pool_zalloc, diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 9fc141976052..16046dd31e72 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -2795,6 +2795,11 @@ static void goya_dma_free_coherent(struct hl_device *hdev, size_t size, dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle); } +int goya_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size) +{ + return 0; +} + void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, dma_addr_t *dma_handle, u16 *queue_len) { @@ -5370,6 +5375,7 @@ static const struct hl_asic_funcs goya_funcs = { .pqe_write = goya_pqe_write, .asic_dma_alloc_coherent = goya_dma_alloc_coherent, .asic_dma_free_coherent = goya_dma_free_coherent, + .scrub_device_mem = goya_scrub_device_mem, .get_int_queue_base = goya_get_int_queue_base, .test_queues = goya_test_queues, .asic_dma_pool_zalloc = goya_dma_pool_zalloc, diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h index def86c75e035..ef4298f84a0a 100644 --- a/drivers/misc/habanalabs/goya/goyaP.h +++ b/drivers/misc/habanalabs/goya/goyaP.h @@ -221,6 +221,7 @@ void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address, u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec, bool eb); int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser); +int goya_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size); void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, dma_addr_t *dma_handle, u16 *queue_len); u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt); -- cgit v1.2.3 From a6722d6a97de26f81c86d28ffd535b8bc005d436 Mon Sep 17 00:00:00 2001 From: Moti Haimovski Date: Mon, 5 Oct 2020 19:33:10 +0300 Subject: habanalabs: fix MMU print message This commit fixes an incorrect error message Signed-off-by: Moti Haimovski Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c index b5058798aeb9..451148959431 100644 --- a/drivers/misc/habanalabs/common/mmu.c +++ b/drivers/misc/habanalabs/common/mmu.c @@ -216,7 +216,7 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, real_page_size = mmu_prop->page_size; } else { dev_err(hdev->dev, - "page size of %u is not %uKB aligned, can't unmap\n", + "page size of %u is not %uKB aligned, can't map\n", page_size, mmu_prop->page_size >> 10); return -EFAULT; -- cgit v1.2.3 From ccf979ee330b195961116f74cd75ba677fdc4cd7 Mon Sep 17 00:00:00 2001 From: Moti Haimovski Date: Mon, 5 Oct 2020 17:59:29 +0300 Subject: habanalabs: refactor MMU to support dual residency MMU This commit refactors the MMU code to support PCI MMU page tables residing on host and DCORE MMU residing on the device DRAM at the same time. This is needed for future devices as on GAUDI and GOYA we have a single MMU where its page tables always reside on DRAM. Signed-off-by: Moti Haimovski Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs.h | 57 +++++++++++++-- drivers/misc/habanalabs/common/mmu.c | 105 ++++++++++++++++++++-------- drivers/misc/habanalabs/common/mmu_v1.c | 28 ++++---- 3 files changed, 143 insertions(+), 47 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index f7a786dba9bc..06aff84ad8f7 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -61,6 +61,18 @@ /* MMU */ #define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */ +/** + * enum hl_mmu_page_table_locaion - mmu page table location + * @MMU_DR_PGT: page-table is located on device DRAM. + * @MMU_HR_PGT: page-table is located on host memory. + * @MMU_NUM_PGT_LOCATIONS: number of page-table locations currently supported. + */ +enum hl_mmu_page_table_location { + MMU_DR_PGT = 0, /* device-dram-resident MMU PGT */ + MMU_HR_PGT, /* host resident MMU PGT */ + MMU_NUM_PGT_LOCATIONS /* num of PGT locations */ +}; + /* * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream @@ -303,6 +315,8 @@ enum hl_device_hw_state { * @hop5_mask: mask to get the PTE address in hop 5. * @page_size: default page size used to allocate memory. * @num_hops: The amount of hops supported by the translation table. + * @host_resident: Should the MMU page table reside in host memory or in the + * device DRAM. */ struct hl_mmu_properties { u64 start_addr; @@ -321,6 +335,7 @@ struct hl_mmu_properties { u64 hop5_mask; u32 page_size; u32 num_hops; + u8 host_resident; }; /** @@ -1572,17 +1587,51 @@ struct hl_device_idle_busy_ts { ktime_t busy_to_idle_ts; }; +/** + * struct hr_mmu_hop_addrs - used for holding per-device host-resident mmu hop + * information. + * @virt_addr: the virtual address of the hop. + * @phys-addr: the physical address of the hop (used by the device-mmu). + * @shadow_addr: The shadow of the hop used by the driver for walking the hops. + */ +struct hr_mmu_hop_addrs { + u64 virt_addr; + u64 phys_addr; + u64 shadow_addr; +}; /** - * struct hl_mmu_priv - used for holding per-device mmu internal information. + * struct hl_mmu_hr_pgt_priv - used for holding per-device mmu host-resident + * page-table internal information. * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops. * @mmu_shadow_hop0: shadow array of hop0 tables. */ -struct hl_mmu_priv { +struct hl_mmu_hr_priv { + struct gen_pool *mmu_pgt_pool; + struct hr_mmu_hop_addrs *mmu_shadow_hop0; +}; + +/** + * struct hl_mmu_dr_pgt_priv - used for holding per-device mmu device-resident + * page-table internal information. + * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops. + * @mmu_shadow_hop0: shadow array of hop0 tables. + */ +struct hl_mmu_dr_priv { struct gen_pool *mmu_pgt_pool; void *mmu_shadow_hop0; }; +/** + * struct hl_mmu_priv - used for holding per-device mmu internal information. + * @dr: information on the device-resident MMU, when exists. + * @hr: information on the host-resident MMU, when exists. + */ +struct hl_mmu_priv { + struct hl_mmu_dr_priv dr; + struct hl_mmu_hr_priv hr; +}; + /** * struct hl_mmu_funcs - Device related MMU functions. * @init: initialize the MMU module. @@ -1779,7 +1828,7 @@ struct hl_device { struct hl_cs_counters_atomic aggregated_cs_counters; struct hl_mmu_priv mmu_priv; - struct hl_mmu_funcs mmu_func; + struct hl_mmu_funcs mmu_func[MMU_NUM_PGT_LOCATIONS]; atomic64_t dram_used_mem; u64 timeout_jiffies; @@ -2042,7 +2091,7 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, void hl_mmu_swap_out(struct hl_ctx *ctx); void hl_mmu_swap_in(struct hl_ctx *ctx); int hl_mmu_if_set_funcs(struct hl_device *hdev); -void hl_mmu_v1_set_funcs(struct hl_device *hdev); +void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu); int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, void __iomem *dst, u32 src_offset, u32 size); diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c index 451148959431..6f535c81478d 100644 --- a/drivers/misc/habanalabs/common/mmu.c +++ b/drivers/misc/habanalabs/common/mmu.c @@ -22,18 +22,25 @@ static bool is_dram_va(struct hl_device *hdev, u64 virt_addr) * hl_mmu_init() - initialize the MMU module. * @hdev: habanalabs device structure. * - * This function does the following: - * - Create a pool of pages for pgt_infos. - * - Create a shadow table for pgt - * * Return: 0 for success, non-zero for failure. */ int hl_mmu_init(struct hl_device *hdev) { - if (hdev->mmu_enable) - return hdev->mmu_func.init(hdev); + int rc = -EOPNOTSUPP; - return 0; + if (!hdev->mmu_enable) + return 0; + + if (hdev->mmu_func[MMU_DR_PGT].init != NULL) { + rc = hdev->mmu_func[MMU_DR_PGT].init(hdev); + if (rc) + return rc; + } + + if (hdev->mmu_func[MMU_HR_PGT].init != NULL) + rc = hdev->mmu_func[MMU_HR_PGT].init(hdev); + + return rc; } /** @@ -48,8 +55,14 @@ int hl_mmu_init(struct hl_device *hdev) */ void hl_mmu_fini(struct hl_device *hdev) { - if (hdev->mmu_enable) - hdev->mmu_func.fini(hdev); + if (!hdev->mmu_enable) + return; + + if (hdev->mmu_func[MMU_DR_PGT].fini != NULL) + hdev->mmu_func[MMU_DR_PGT].fini(hdev); + + if (hdev->mmu_func[MMU_HR_PGT].fini != NULL) + hdev->mmu_func[MMU_HR_PGT].fini(hdev); } /** @@ -63,11 +76,21 @@ void hl_mmu_fini(struct hl_device *hdev) int hl_mmu_ctx_init(struct hl_ctx *ctx) { struct hl_device *hdev = ctx->hdev; + int rc = -EOPNOTSUPP; - if (hdev->mmu_enable) - return hdev->mmu_func.ctx_init(ctx); + if (!hdev->mmu_enable) + return 0; - return 0; + if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) { + rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx); + if (rc) + return rc; + } + + if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL) + rc = hdev->mmu_func[MMU_HR_PGT].ctx_init(ctx); + + return rc; } /* @@ -84,8 +107,14 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx) { struct hl_device *hdev = ctx->hdev; - if (hdev->mmu_enable) - hdev->mmu_func.ctx_fini(ctx); + if (!hdev->mmu_enable) + return; + + if (hdev->mmu_func[MMU_DR_PGT].ctx_fini != NULL) + hdev->mmu_func[MMU_DR_PGT].ctx_fini(ctx); + + if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL) + hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx); } /* @@ -117,7 +146,7 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, struct hl_mmu_properties *mmu_prop; u64 real_virt_addr; u32 real_page_size, npages; - int i, rc = 0; + int i, rc = 0, pgt_residency; bool is_dram_addr; if (!hdev->mmu_enable) @@ -132,6 +161,8 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, else mmu_prop = &prop->pmmu; + pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; + /* * The H/W handles mapping of specific page sizes. Hence if the page * size is bigger, we break it to sub-pages and unmap them separately. @@ -150,7 +181,8 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, real_virt_addr = virt_addr; for (i = 0 ; i < npages ; i++) { - rc = hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr); + rc = hdev->mmu_func[pgt_residency].unmap(ctx, + real_virt_addr, is_dram_addr); if (rc) break; @@ -158,7 +190,7 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, } if (flush_pte) - hdev->mmu_func.flush(ctx); + hdev->mmu_func[pgt_residency].flush(ctx); return rc; } @@ -193,9 +225,10 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, struct hl_mmu_properties *mmu_prop; u64 real_virt_addr, real_phys_addr; u32 real_page_size, npages; - int i, rc, mapped_cnt = 0; + int i, rc, pgt_residency, mapped_cnt = 0; bool is_dram_addr; + if (!hdev->mmu_enable) return 0; @@ -208,6 +241,8 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, else mmu_prop = &prop->pmmu; + pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; + /* * The H/W handles mapping of specific page sizes. Hence if the page * size is bigger, we break it to sub-pages and map them separately. @@ -231,8 +266,9 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, real_phys_addr = phys_addr; for (i = 0 ; i < npages ; i++) { - rc = hdev->mmu_func.map(ctx, real_virt_addr, real_phys_addr, - real_page_size, is_dram_addr); + rc = hdev->mmu_func[pgt_residency].map(ctx, + real_virt_addr, real_phys_addr, + real_page_size, is_dram_addr); if (rc) goto err; @@ -242,21 +278,22 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, } if (flush_pte) - hdev->mmu_func.flush(ctx); + hdev->mmu_func[pgt_residency].flush(ctx); return 0; err: real_virt_addr = virt_addr; for (i = 0 ; i < mapped_cnt ; i++) { - if (hdev->mmu_func.unmap(ctx, real_virt_addr, is_dram_addr)) + if (hdev->mmu_func[pgt_residency].unmap(ctx, + real_virt_addr, is_dram_addr)) dev_warn_ratelimited(hdev->dev, "failed to unmap va: 0x%llx\n", real_virt_addr); real_virt_addr += real_page_size; } - hdev->mmu_func.flush(ctx); + hdev->mmu_func[pgt_residency].flush(ctx); return rc; } @@ -271,8 +308,14 @@ void hl_mmu_swap_out(struct hl_ctx *ctx) { struct hl_device *hdev = ctx->hdev; - if (hdev->mmu_enable) - hdev->mmu_func.swap_out(ctx); + if (!hdev->mmu_enable) + return; + + if (hdev->mmu_func[MMU_DR_PGT].swap_out != NULL) + hdev->mmu_func[MMU_DR_PGT].swap_out(ctx); + + if (hdev->mmu_func[MMU_HR_PGT].swap_out != NULL) + hdev->mmu_func[MMU_HR_PGT].swap_out(ctx); } /* @@ -285,8 +328,14 @@ void hl_mmu_swap_in(struct hl_ctx *ctx) { struct hl_device *hdev = ctx->hdev; - if (hdev->mmu_enable) - hdev->mmu_func.swap_in(ctx); + if (!hdev->mmu_enable) + return; + + if (hdev->mmu_func[MMU_DR_PGT].swap_in != NULL) + hdev->mmu_func[MMU_DR_PGT].swap_in(ctx); + + if (hdev->mmu_func[MMU_HR_PGT].swap_in != NULL) + hdev->mmu_func[MMU_HR_PGT].swap_in(ctx); } int hl_mmu_if_set_funcs(struct hl_device *hdev) @@ -297,7 +346,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev) switch (hdev->asic_type) { case ASIC_GOYA: case ASIC_GAUDI: - hl_mmu_v1_set_funcs(hdev); + hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]); break; default: dev_err(hdev->dev, "Unrecognized ASIC type %d\n", diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c index 8d1eb5265419..ec7e8a3c37b8 100644 --- a/drivers/misc/habanalabs/common/mmu_v1.c +++ b/drivers/misc/habanalabs/common/mmu_v1.c @@ -29,7 +29,7 @@ static void _free_hop(struct hl_ctx *ctx, struct pgt_info *pgt_info) { struct hl_device *hdev = ctx->hdev; - gen_pool_free(hdev->mmu_priv.mmu_pgt_pool, pgt_info->phys_addr, + gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, pgt_info->phys_addr, hdev->asic_prop.mmu_hop_table_size); hash_del(&pgt_info->node); kfree((u64 *) (uintptr_t) pgt_info->shadow_addr); @@ -54,7 +54,7 @@ static u64 alloc_hop(struct hl_ctx *ctx) if (!pgt_info) return ULLONG_MAX; - phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.mmu_pgt_pool, + phys_addr = (u64) gen_pool_alloc(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_hop_table_size); if (!phys_addr) { dev_err(hdev->dev, "failed to allocate page\n"); @@ -75,7 +75,7 @@ static u64 alloc_hop(struct hl_ctx *ctx) return shadow_addr; shadow_err: - gen_pool_free(hdev->mmu_priv.mmu_pgt_pool, phys_addr, + gen_pool_free(hdev->mmu_priv.dr.mmu_pgt_pool, phys_addr, prop->mmu_hop_table_size); pool_add_err: kfree(pgt_info); @@ -91,7 +91,7 @@ static inline u64 get_phys_hop0_addr(struct hl_ctx *ctx) static inline u64 get_hop0_addr(struct hl_ctx *ctx) { - return (u64) (uintptr_t) ctx->hdev->mmu_priv.mmu_shadow_hop0 + + return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 + (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); } @@ -419,15 +419,15 @@ static int hl_mmu_v1_init(struct hl_device *hdev) struct asic_fixed_properties *prop = &hdev->asic_prop; int rc; - hdev->mmu_priv.mmu_pgt_pool = + hdev->mmu_priv.dr.mmu_pgt_pool = gen_pool_create(__ffs(prop->mmu_hop_table_size), -1); - if (!hdev->mmu_priv.mmu_pgt_pool) { + if (!hdev->mmu_priv.dr.mmu_pgt_pool) { dev_err(hdev->dev, "Failed to create page gen pool\n"); return -ENOMEM; } - rc = gen_pool_add(hdev->mmu_priv.mmu_pgt_pool, prop->mmu_pgt_addr + + rc = gen_pool_add(hdev->mmu_priv.dr.mmu_pgt_pool, prop->mmu_pgt_addr + prop->mmu_hop0_tables_total_size, prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size, -1); @@ -436,10 +436,10 @@ static int hl_mmu_v1_init(struct hl_device *hdev) goto err_pool_add; } - hdev->mmu_priv.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid, + hdev->mmu_priv.dr.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid, prop->mmu_hop_table_size, GFP_KERNEL | __GFP_ZERO); - if (ZERO_OR_NULL_PTR(hdev->mmu_priv.mmu_shadow_hop0)) { + if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) { rc = -ENOMEM; goto err_pool_add; } @@ -449,7 +449,7 @@ static int hl_mmu_v1_init(struct hl_device *hdev) return 0; err_pool_add: - gen_pool_destroy(hdev->mmu_priv.mmu_pgt_pool); + gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); return rc; } @@ -468,8 +468,8 @@ static void hl_mmu_v1_fini(struct hl_device *hdev) { /* MMU H/W fini was already done in device hw_fini() */ - kvfree(hdev->mmu_priv.mmu_shadow_hop0); - gen_pool_destroy(hdev->mmu_priv.mmu_pgt_pool); + kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0); + gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); } /** @@ -847,10 +847,8 @@ static void hl_mmu_v1_swap_in(struct hl_ctx *ctx) * * @hdev: pointer to the device structure */ -void hl_mmu_v1_set_funcs(struct hl_device *hdev) +void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu) { - struct hl_mmu_funcs *mmu = &hdev->mmu_func; - mmu->init = hl_mmu_v1_init; mmu->fini = hl_mmu_v1_fini; mmu->ctx_init = hl_mmu_v1_ctx_init; -- cgit v1.2.3 From 1cbca899fa57637036aa3199bd3f2208db2efff5 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Mon, 5 Oct 2020 11:36:00 +0300 Subject: habanalabs/gaudi: fetch PLL info from FW Once FW security is enabled there is no access to PLL registers, need to read values from FW using a dedicated interface. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 26 ++++++++++++++++++ drivers/misc/habanalabs/common/habanalabs.h | 4 +++ drivers/misc/habanalabs/gaudi/gaudi.c | 41 ++++++++++++++++++++++------ 3 files changed, 62 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index 8de6a8690b1b..d84a70ec0ce1 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -448,6 +448,32 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy) return rc; } +int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, + enum cpucp_pll_type_attributes pll_type, + enum cpucp_pll_reg_attributes pll_reg, + u32 *pll_info) +{ + struct cpucp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_REG_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + pkt.pll_type = __cpu_to_le16(pll_type); + pkt.pll_reg = __cpu_to_le16(pll_reg); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_CPUCP_INFO_TIMEOUT_USEC, &result); + if (rc) + dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc); + + *pll_info = result; + + return rc; +} + static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg, u32 cpu_security_boot_status_reg) { diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 06aff84ad8f7..033f6809980f 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -2113,6 +2113,10 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, struct hl_info_pci_counters *counters); int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy); +int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, + enum cpucp_pll_type_attributes pll_type, + enum cpucp_pll_reg_attributes pll_reg, + u32 *pll_info); int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, u32 msg_to_cpu_reg, u32 cpu_msg_status_reg, u32 cpu_security_boot_status_reg, u32 boot_err0_reg, diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index f5b211416e7f..65b7e20b8f4a 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -673,16 +673,33 @@ static int gaudi_early_fini(struct hl_device *hdev) * @hdev: pointer to hl_device structure * */ -static void gaudi_fetch_psoc_frequency(struct hl_device *hdev) +static int gaudi_fetch_psoc_frequency(struct hl_device *hdev) { struct asic_fixed_properties *prop = &hdev->asic_prop; - u32 trace_freq = 0; - u32 pll_clk = 0; - u32 div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2); - u32 div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2); - u32 nr = RREG32(mmPSOC_CPU_PLL_NR); - u32 nf = RREG32(mmPSOC_CPU_PLL_NF); - u32 od = RREG32(mmPSOC_CPU_PLL_OD); + u32 trace_freq = 0, pll_clk = 0; + u32 div_fctr, div_sel, nr, nf, od; + int rc; + + if (hdev->asic_prop.fw_security_disabled) { + div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2); + div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2); + nr = RREG32(mmPSOC_CPU_PLL_NR); + nf = RREG32(mmPSOC_CPU_PLL_NF); + od = RREG32(mmPSOC_CPU_PLL_OD); + } else { + rc = hl_fw_cpucp_pll_info_get(hdev, cpucp_pll_cpu, + cpucp_pll_div_factor_reg, &div_fctr); + rc |= hl_fw_cpucp_pll_info_get(hdev, cpucp_pll_cpu, + cpucp_pll_div_sel_reg, &div_sel); + rc |= hl_fw_cpucp_pll_info_get(hdev, cpucp_pll_cpu, + cpucp_pll_nr_reg, &nr); + rc |= hl_fw_cpucp_pll_info_get(hdev, cpucp_pll_cpu, + cpucp_pll_nf_reg, &nf); + rc |= hl_fw_cpucp_pll_info_get(hdev, cpucp_pll_cpu, + cpucp_pll_od_reg, &od); + if (rc) + return rc; + } if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) { if (div_sel == DIV_SEL_REF_CLK) @@ -706,6 +723,8 @@ static void gaudi_fetch_psoc_frequency(struct hl_device *hdev) prop->psoc_pci_pll_nf = nf; prop->psoc_pci_pll_od = od; prop->psoc_pci_pll_div_factor = div_fctr; + + return 0; } static int _gaudi_init_tpc_mem(struct hl_device *hdev, @@ -1315,7 +1334,11 @@ static int gaudi_late_init(struct hl_device *hdev) WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER); - gaudi_fetch_psoc_frequency(hdev); + rc = gaudi_fetch_psoc_frequency(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to fetch psoc frequency\n"); + goto disable_pci_access; + } rc = gaudi_mmu_clear_pgt_range(hdev); if (rc) { -- cgit v1.2.3 From 6de3d769fde0303a746a5e540bf23fa4b07d0ad5 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Sun, 19 Jul 2020 21:07:15 +0300 Subject: habanalabs: Small refactoring of CS IOCTL handling Refactor the CS IOCTL handling by gathering common code into sub-functions, in order to ease future additions of new CS types. Signed-off-by: Tomer Tayar Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 417 +++++++++++---------- 1 file changed, 223 insertions(+), 194 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index e123101b74d6..fc1586e09ed0 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -11,8 +11,6 @@ #include #include -#define HL_CS_FLAGS_SIG_WAIT (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT) - static void job_wq_completion(struct work_struct *work); static long _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq); @@ -660,44 +658,114 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, return job; } -static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, - u32 num_chunks, u64 *cs_seq) +static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags) +{ + if (cs_type_flags & HL_CS_FLAGS_SIGNAL) + return CS_TYPE_SIGNAL; + else if (cs_type_flags & HL_CS_FLAGS_WAIT) + return CS_TYPE_WAIT; + else if (cs_type_flags & HL_CS_FLAGS_COLLECTIVE_WAIT) + return CS_TYPE_COLLECTIVE_WAIT; + else + return CS_TYPE_DEFAULT; +} + +static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args) { struct hl_device *hdev = hpriv->hdev; - struct hl_cs_chunk *cs_chunk_array; - struct hl_cs_counters_atomic *cntr; - struct hl_cs_job *job; - struct hl_cs *cs; - struct hl_cb *cb; - bool int_queues_only = true; - u32 size_to_copy; - int rc, i; + struct hl_ctx *ctx = hpriv->ctx; + u32 cs_type_flags, num_chunks; + enum hl_cs_type cs_type; - cntr = &hdev->aggregated_cs_counters; - *cs_seq = ULLONG_MAX; + if (hl_device_disabled_or_in_reset(hdev)) { + dev_warn_ratelimited(hdev->dev, + "Device is %s. Can't submit new CS\n", + atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + return -EBUSY; + } + + cs_type_flags = args->in.cs_flags & ~HL_CS_FLAGS_FORCE_RESTORE; + + if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) { + dev_err(hdev->dev, + "CS type flags are mutually exclusive, context %d\n", + ctx->asid); + return -EINVAL; + } + + cs_type = hl_cs_get_cs_type(cs_type_flags); + num_chunks = args->in.num_chunks_execute; + + if (unlikely((cs_type != CS_TYPE_DEFAULT) && + !hdev->supports_sync_stream)) { + dev_err(hdev->dev, "Sync stream CS is not supported\n"); + return -EINVAL; + } + + if (cs_type == CS_TYPE_DEFAULT) { + if (!num_chunks) { + dev_err(hdev->dev, + "Got execute CS with 0 chunks, context %d\n", + ctx->asid); + return -EINVAL; + } + } else if (num_chunks != 1) { + dev_err(hdev->dev, + "Sync stream CS mandates one chunk only, context %d\n", + ctx->asid); + return -EINVAL; + } + + return 0; +} + +static int hl_cs_copy_chunk_array(struct hl_device *hdev, + struct hl_cs_chunk **cs_chunk_array, + void __user *chunks, u32 num_chunks) +{ + u32 size_to_copy; if (num_chunks > HL_MAX_JOBS_PER_CS) { dev_err(hdev->dev, "Number of chunks can NOT be larger than %d\n", HL_MAX_JOBS_PER_CS); - rc = -EINVAL; - goto out; + return -EINVAL; } - cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array), + *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array), GFP_ATOMIC); - if (!cs_chunk_array) { - rc = -ENOMEM; - goto out; - } + if (!*cs_chunk_array) + return -ENOMEM; size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); - if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) { + if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) { dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); - rc = -EFAULT; - goto free_cs_chunk_array; + kfree(*cs_chunk_array); + return -EFAULT; } + return 0; +} + +static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, + u32 num_chunks, u64 *cs_seq) +{ + bool int_queues_only = true; + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_chunk *cs_chunk_array; + struct hl_cs_counters_atomic *cntr; + struct hl_cs_job *job; + struct hl_cs *cs; + struct hl_cb *cb; + int rc, i; + + cntr = &hdev->aggregated_cs_counters; + *cs_seq = ULLONG_MAX; + + rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks); + if (rc) + goto out; + /* increment refcnt for context */ hl_ctx_get(hdev, hpriv->ctx); @@ -828,6 +896,108 @@ out: return rc; } +static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, + u64 *cs_seq) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_ctx *ctx = hpriv->ctx; + bool need_soft_reset = false; + int rc = 0, do_ctx_switch; + void __user *chunks; + u32 num_chunks, tmp; + long ret; + + do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0); + + if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { + mutex_lock(&hpriv->restore_phase_mutex); + + if (do_ctx_switch) { + rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); + if (rc) { + dev_err_ratelimited(hdev->dev, + "Failed to switch to context %d, rejecting CS! %d\n", + ctx->asid, rc); + /* + * If we timedout, or if the device is not IDLE + * while we want to do context-switch (-EBUSY), + * we need to soft-reset because QMAN is + * probably stuck. However, we can't call to + * reset here directly because of deadlock, so + * need to do it at the very end of this + * function + */ + if ((rc == -ETIMEDOUT) || (rc == -EBUSY)) + need_soft_reset = true; + mutex_unlock(&hpriv->restore_phase_mutex); + goto out; + } + } + + hdev->asic_funcs->restore_phase_topology(hdev); + + chunks = (void __user *) (uintptr_t) args->in.chunks_restore; + num_chunks = args->in.num_chunks_restore; + + if (!num_chunks) { + dev_dbg(hdev->dev, + "Need to run restore phase but restore CS is empty\n"); + rc = 0; + } else { + rc = cs_ioctl_default(hpriv, chunks, num_chunks, + cs_seq); + } + + mutex_unlock(&hpriv->restore_phase_mutex); + + if (rc) { + dev_err(hdev->dev, + "Failed to submit restore CS for context %d (%d)\n", + ctx->asid, rc); + goto out; + } + + /* Need to wait for restore completion before execution phase */ + if (num_chunks) { +wait_again: + ret = _hl_cs_wait_ioctl(hdev, ctx, + jiffies_to_usecs(hdev->timeout_jiffies), + *cs_seq); + if (ret <= 0) { + if (ret == -ERESTARTSYS) { + usleep_range(100, 200); + goto wait_again; + } + + dev_err(hdev->dev, + "Restore CS for context %d failed to complete %ld\n", + ctx->asid, ret); + rc = -ENOEXEC; + goto out; + } + } + + ctx->thread_ctx_switch_wait_token = 1; + + } else if (!ctx->thread_ctx_switch_wait_token) { + rc = hl_poll_timeout_memory(hdev, + &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1), + 100, jiffies_to_usecs(hdev->timeout_jiffies), false); + + if (rc == -ETIMEDOUT) { + dev_err(hdev->dev, + "context switch phase timeout (%d)\n", tmp); + goto out; + } + } + +out: + if ((rc == -ETIMEDOUT || rc == -EBUSY) && (need_soft_reset)) + hl_device_reset(hdev, false, false); + + return rc; +} + static int cs_ioctl_extract_signal_seq(struct hl_device *hdev, struct hl_cs_chunk *chunk, u64 *signal_seq) { @@ -935,41 +1105,23 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, void __user *chunks, u32 num_chunks, u64 *cs_seq) { - struct hl_device *hdev = hpriv->hdev; - struct hl_ctx *ctx = hpriv->ctx; struct hl_cs_chunk *cs_chunk_array, *chunk; struct hw_queue_properties *hw_queue_prop; - struct hl_fence *sig_fence = NULL; + struct hl_device *hdev = hpriv->hdev; struct hl_cs_compl *sig_waitcs_cmpl; - struct hl_cs *cs; + u32 q_idx, collective_engine_id = 0; + struct hl_fence *sig_fence = NULL; + struct hl_ctx *ctx = hpriv->ctx; enum hl_queue_type q_type; - u32 size_to_copy, q_idx, collective_engine_id = 0; + struct hl_cs *cs; u64 signal_seq; int rc; *cs_seq = ULLONG_MAX; - if (num_chunks > HL_MAX_JOBS_PER_CS) { - dev_err(hdev->dev, - "Number of chunks can NOT be larger than %d\n", - HL_MAX_JOBS_PER_CS); - rc = -EINVAL; - goto out; - } - - cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array), - GFP_ATOMIC); - if (!cs_chunk_array) { - rc = -ENOMEM; + rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks); + if (rc) goto out; - } - - size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); - if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) { - dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); - rc = -EFAULT; - goto free_cs_chunk_array; - } /* currently it is guaranteed to have only one chunk */ chunk = &cs_chunk_array[0]; @@ -1106,158 +1258,38 @@ out: int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) { - struct hl_device *hdev = hpriv->hdev; union hl_cs_args *args = data; - struct hl_ctx *ctx = hpriv->ctx; - void __user *chunks_execute, *chunks_restore; enum hl_cs_type cs_type; - u32 num_chunks_execute, num_chunks_restore, sig_wait_flags; u64 cs_seq = ULONG_MAX; - int rc, do_ctx_switch; - bool need_soft_reset = false; - - if (hl_device_disabled_or_in_reset(hdev)) { - dev_warn_ratelimited(hdev->dev, - "Device is %s. Can't submit new CS\n", - atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); - rc = -EBUSY; - goto out; - } - - sig_wait_flags = args->in.cs_flags & HL_CS_FLAGS_SIG_WAIT; - - if (unlikely(sig_wait_flags == HL_CS_FLAGS_SIG_WAIT)) { - dev_err(hdev->dev, - "Signal and wait CS flags are mutually exclusive, context %d\n", - ctx->asid); - rc = -EINVAL; - goto out; - } + void __user *chunks; + u32 num_chunks; + int rc; - if (unlikely((sig_wait_flags & HL_CS_FLAGS_SIG_WAIT) && - (!hdev->supports_sync_stream))) { - dev_err(hdev->dev, "Sync stream CS is not supported\n"); - rc = -EINVAL; + rc = hl_cs_sanity_checks(hpriv, args); + if (rc) goto out; - } - if (args->in.cs_flags & HL_CS_FLAGS_SIGNAL) - cs_type = CS_TYPE_SIGNAL; - else if (args->in.cs_flags & HL_CS_FLAGS_WAIT) - cs_type = CS_TYPE_WAIT; - else if (args->in.cs_flags & HL_CS_FLAGS_COLLECTIVE_WAIT) - cs_type = CS_TYPE_COLLECTIVE_WAIT; - else - cs_type = CS_TYPE_DEFAULT; - - chunks_execute = (void __user *) (uintptr_t) args->in.chunks_execute; - num_chunks_execute = args->in.num_chunks_execute; - - if (cs_type == CS_TYPE_DEFAULT) { - if (!num_chunks_execute) { - dev_err(hdev->dev, - "Got execute CS with 0 chunks, context %d\n", - ctx->asid); - rc = -EINVAL; - goto out; - } - } else if (num_chunks_execute != 1) { - dev_err(hdev->dev, - "Sync stream CS mandates one chunk only, context %d\n", - ctx->asid); - rc = -EINVAL; + rc = hl_cs_ctx_switch(hpriv, args, &cs_seq); + if (rc) goto out; - } - - do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0); - - if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { - long ret; - - chunks_restore = - (void __user *) (uintptr_t) args->in.chunks_restore; - num_chunks_restore = args->in.num_chunks_restore; - - mutex_lock(&hpriv->restore_phase_mutex); - - if (do_ctx_switch) { - rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); - if (rc) { - dev_err_ratelimited(hdev->dev, - "Failed to switch to context %d, rejecting CS! %d\n", - ctx->asid, rc); - /* - * If we timedout, or if the device is not IDLE - * while we want to do context-switch (-EBUSY), - * we need to soft-reset because QMAN is - * probably stuck. However, we can't call to - * reset here directly because of deadlock, so - * need to do it at the very end of this - * function - */ - if ((rc == -ETIMEDOUT) || (rc == -EBUSY)) - need_soft_reset = true; - mutex_unlock(&hpriv->restore_phase_mutex); - goto out; - } - } - - hdev->asic_funcs->restore_phase_topology(hdev); - - if (!num_chunks_restore) { - dev_dbg(hdev->dev, - "Need to run restore phase but restore CS is empty\n"); - rc = 0; - } else { - rc = cs_ioctl_default(hpriv, chunks_restore, - num_chunks_restore, &cs_seq); - } - mutex_unlock(&hpriv->restore_phase_mutex); - - if (rc) { - dev_err(hdev->dev, - "Failed to submit restore CS for context %d (%d)\n", - ctx->asid, rc); - goto out; - } - - /* Need to wait for restore completion before execution phase */ - if (num_chunks_restore) { - ret = _hl_cs_wait_ioctl(hdev, ctx, - jiffies_to_usecs(hdev->timeout_jiffies), - cs_seq); - if (ret <= 0) { - dev_err(hdev->dev, - "Restore CS for context %d failed to complete %ld\n", - ctx->asid, ret); - rc = -ENOEXEC; - goto out; - } - } - - ctx->thread_ctx_switch_wait_token = 1; - } else if (!ctx->thread_ctx_switch_wait_token) { - u32 tmp; - - rc = hl_poll_timeout_memory(hdev, - &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1), - 100, jiffies_to_usecs(hdev->timeout_jiffies), false); - - if (rc == -ETIMEDOUT) { - dev_err(hdev->dev, - "context switch phase timeout (%d)\n", tmp); - goto out; - } + cs_type = hl_cs_get_cs_type(args->in.cs_flags & + ~HL_CS_FLAGS_FORCE_RESTORE); + chunks = (void __user *) (uintptr_t) args->in.chunks_execute; + num_chunks = args->in.num_chunks_execute; + + switch (cs_type) { + case CS_TYPE_SIGNAL: + case CS_TYPE_WAIT: + case CS_TYPE_COLLECTIVE_WAIT: + rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks, + &cs_seq); + break; + default: + rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq); + break; } - if (cs_type == CS_TYPE_DEFAULT) - rc = cs_ioctl_default(hpriv, chunks_execute, num_chunks_execute, - &cs_seq); - else - rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks_execute, - num_chunks_execute, &cs_seq); - out: if (rc != -EAGAIN) { memset(args, 0, sizeof(*args)); @@ -1265,9 +1297,6 @@ out: args->out.seq = cs_seq; } - if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset)) - hl_device_reset(hdev, false, false); - return rc; } -- cgit v1.2.3 From ea6ee260cbcdd1e00d8eb62057d432dddd2a71d1 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Mon, 27 Jul 2020 23:49:41 +0300 Subject: habanalabs: Small refactoring of cs_do_release() Slightly refactor the cs_do_release() function, to reduce nesting level and to ease the handling of future CS types. Signed-off-by: Tomer Tayar Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 99 +++++++++++----------- 1 file changed, 49 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index fc1586e09ed0..56dc18f0bc27 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -281,8 +281,7 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) static void cs_do_release(struct kref *ref) { - struct hl_cs *cs = container_of(ref, struct hl_cs, - refcount); + struct hl_cs *cs = container_of(ref, struct hl_cs, refcount); struct hl_device *hdev = cs->ctx->hdev; struct hl_cs_job *job, *tmp; @@ -299,69 +298,69 @@ static void cs_do_release(struct kref *ref) list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) free_job(hdev, job); - /* We also need to update CI for internal queues */ - if (cs->submitted) { - hdev->asic_funcs->hw_queues_lock(hdev); + if (!cs->submitted) { + /* In case the wait for signal CS was submitted, the put occurs + * in init_signal_wait_cs() right before hanging on the PQ. + */ + if (cs->type == CS_TYPE_WAIT) + hl_fence_put(cs->signal_fence); - hdev->cs_active_cnt--; - if (!hdev->cs_active_cnt) { - struct hl_device_idle_busy_ts *ts; + goto out; + } - ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++]; - ts->busy_to_idle_ts = ktime_get(); + hdev->asic_funcs->hw_queues_lock(hdev); - if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE) - hdev->idle_busy_ts_idx = 0; - } else if (hdev->cs_active_cnt < 0) { - dev_crit(hdev->dev, "CS active cnt %d is negative\n", - hdev->cs_active_cnt); - } + hdev->cs_active_cnt--; + if (!hdev->cs_active_cnt) { + struct hl_device_idle_busy_ts *ts; - hdev->asic_funcs->hw_queues_unlock(hdev); + ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++]; + ts->busy_to_idle_ts = ktime_get(); - hl_int_hw_queue_update_ci(cs); + if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE) + hdev->idle_busy_ts_idx = 0; + } else if (hdev->cs_active_cnt < 0) { + dev_crit(hdev->dev, "CS active cnt %d is negative\n", + hdev->cs_active_cnt); + } - spin_lock(&hdev->hw_queues_mirror_lock); - /* remove CS from hw_queues mirror list */ - list_del_init(&cs->mirror_node); - spin_unlock(&hdev->hw_queues_mirror_lock); + hdev->asic_funcs->hw_queues_unlock(hdev); - /* - * Don't cancel TDR in case this CS was timedout because we - * might be running from the TDR context - */ - if ((!cs->timedout) && - (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) { - struct hl_cs *next; + /* Need to update CI for internal queues */ + hl_int_hw_queue_update_ci(cs); - if (cs->tdr_active) - cancel_delayed_work_sync(&cs->work_tdr); + spin_lock(&hdev->hw_queues_mirror_lock); + /* remove CS from hw_queues mirror list */ + list_del_init(&cs->mirror_node); + spin_unlock(&hdev->hw_queues_mirror_lock); - spin_lock(&hdev->hw_queues_mirror_lock); + /* Don't cancel TDR in case this CS was timedout because we might be + * running from the TDR context + */ + if (!cs->timedout && + hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) { + struct hl_cs *next; - /* queue TDR for next CS */ - next = list_first_entry_or_null( - &hdev->hw_queues_mirror_list, - struct hl_cs, mirror_node); + if (cs->tdr_active) + cancel_delayed_work_sync(&cs->work_tdr); - if ((next) && (!next->tdr_active)) { - next->tdr_active = true; - schedule_delayed_work(&next->work_tdr, - hdev->timeout_jiffies); - } + spin_lock(&hdev->hw_queues_mirror_lock); + + /* queue TDR for next CS */ + next = list_first_entry_or_null(&hdev->hw_queues_mirror_list, + struct hl_cs, mirror_node); - spin_unlock(&hdev->hw_queues_mirror_lock); + if (next && !next->tdr_active) { + next->tdr_active = true; + schedule_delayed_work(&next->work_tdr, + hdev->timeout_jiffies); } - } else if (cs->type == CS_TYPE_WAIT) { - /* - * In case the wait for signal CS was submitted, the put occurs - * in init_signal_wait_cs() right before hanging on the PQ. - */ - hl_fence_put(cs->signal_fence); + + spin_unlock(&hdev->hw_queues_mirror_lock); } - /* - * Must be called before hl_ctx_put because inside we use ctx to get +out: + /* Must be called before hl_ctx_put because inside we use ctx to get * the device */ hl_debugfs_remove_cs(cs); -- cgit v1.2.3 From 4ba1b227b6c77252f49f96aa77cf1b50b9d2542c Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Tue, 28 Jul 2020 00:28:51 +0300 Subject: habanalabs: Skip updating CI of internal queues if not in use There are no internal queues if H/W queues are being used. In this case we can skip the redundant traversal over the queues array, looking for internal queues. Signed-off-by: Tomer Tayar Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/hw_queue.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index 44155a6e557f..e808e668a007 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -48,6 +48,11 @@ void hl_int_hw_queue_update_ci(struct hl_cs *cs) return; q = &hdev->kernel_queues[0]; + + /* There are no internal queues if H/W queues are being used */ + if (!hdev->asic_prop.max_queues || q->queue_type == QUEUE_TYPE_HW) + return; + for (i = 0 ; i < hdev->asic_prop.max_queues ; i++, q++) { if (q->queue_type == QUEUE_TYPE_INT) atomic_add(cs->jobs_in_queue_cnt[i], &q->ci); -- cgit v1.2.3 From c1d505a922fe89ff44db4de5f88f20037a571c7a Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Sun, 25 Oct 2020 09:36:08 +0200 Subject: habanalabs: release signal if collective wait was dropped As in standard wait cs, we must release a signal fence once a collective wait cs was dropped and not submitted. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_submission.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 56dc18f0bc27..536e09542e22 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -300,9 +300,11 @@ static void cs_do_release(struct kref *ref) if (!cs->submitted) { /* In case the wait for signal CS was submitted, the put occurs - * in init_signal_wait_cs() right before hanging on the PQ. + * in init_signal_wait_cs() or collective_wait_init_cs() + * right before hanging on the PQ. */ - if (cs->type == CS_TYPE_WAIT) + if (cs->type == CS_TYPE_WAIT || + cs->type == CS_TYPE_COLLECTIVE_WAIT) hl_fence_put(cs->signal_fence); goto out; -- cgit v1.2.3 From ba7e389c30c6bfb78857ba7a6f8d2cd4bbf5bde7 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Sun, 25 Oct 2020 17:47:22 +0200 Subject: habanalabs: Move repeatedly included headers to habanalabs.h Several header files are repeatedly included in many files. Move these files to habanalabs.h which is included by all. Signed-off-by: Tomer Tayar Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_buffer.c | 1 - drivers/misc/habanalabs/common/device.c | 1 - drivers/misc/habanalabs/common/firmware_if.c | 2 -- drivers/misc/habanalabs/common/habanalabs.h | 4 ++++ drivers/misc/habanalabs/common/memory.c | 1 - drivers/misc/habanalabs/common/mmu_v1.c | 1 - drivers/misc/habanalabs/gaudi/gaudi.c | 2 -- drivers/misc/habanalabs/gaudi/gaudi_coresight.c | 2 -- drivers/misc/habanalabs/goya/goya.c | 2 -- drivers/misc/habanalabs/goya/goya_coresight.c | 2 -- 10 files changed, 4 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c index ada570f35a41..075679626c7b 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -11,7 +11,6 @@ #include #include #include -#include static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) { diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 20572224099a..421e37123b03 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -10,7 +10,6 @@ #include "habanalabs.h" #include -#include #include #include diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index d84a70ec0ce1..fb9d901d5059 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -9,8 +9,6 @@ #include "../include/common/hl_boot_if.h" #include -#include -#include #include #define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */ diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 033f6809980f..f00891ddc3ad 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -19,6 +19,10 @@ #include #include #include +#include +#include +#include +#include #define HL_NAME "habanalabs" diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 75e269bc42a7..5c1dae6aaf4d 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -11,7 +11,6 @@ #include #include -#include #define HL_MMU_DEBUG 0 diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c index ec7e8a3c37b8..ac3784523baa 100644 --- a/drivers/misc/habanalabs/common/mmu_v1.c +++ b/drivers/misc/habanalabs/common/mmu_v1.c @@ -8,7 +8,6 @@ #include "habanalabs.h" #include "../include/hw_ip/mmu/mmu_general.h" -#include #include static inline u64 get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr); diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 65b7e20b8f4a..ecfcfdfdd6b0 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -17,8 +17,6 @@ #include #include #include -#include -#include #include #include diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c index 3d2b0f0f4650..2e3612e1ee28 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c +++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c @@ -11,8 +11,6 @@ #include "../include/gaudi/gaudi_masks.h" #include -#include - #define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET #define SPMU_EVENT_TYPES_OFFSET 0x400 #define SPMU_MAX_COUNTERS 6 diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 16046dd31e72..ab467943cef3 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -12,9 +12,7 @@ #include "../include/goya/goya_reg_map.h" #include -#include #include -#include #include #include diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c index 4027a6a334d7..6fa03933b438 100644 --- a/drivers/misc/habanalabs/goya/goya_coresight.c +++ b/drivers/misc/habanalabs/goya/goya_coresight.c @@ -12,8 +12,6 @@ #include -#include - #define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100) #define SPMU_SECTION_SIZE DMA_CH_0_CS_SPMU_MAX_OFFSET -- cgit v1.2.3 From eb10b897e45968740fde22626843177b7b2f2191 Mon Sep 17 00:00:00 2001 From: farah kassabri Date: Wed, 14 Oct 2020 15:17:36 +0300 Subject: habanalabs: reset device upon fw read failure failure in reading pre-boot verion is not handled correctly, upon failure we need to reset the device in order to be able to reinstall the driver. Signed-off-by: farah kassabri Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 4 +++- drivers/misc/habanalabs/common/habanalabs.h | 2 +- drivers/misc/habanalabs/common/pci.c | 5 ++++- drivers/misc/habanalabs/gaudi/gaudi.c | 27 ++++++++++++++++----------- drivers/misc/habanalabs/goya/goya.c | 22 ++++++++++++++-------- 5 files changed, 38 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index fb9d901d5059..2fc12e529241 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -607,7 +607,9 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg, return -EIO; } - hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT); + rc = hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT); + if (rc) + return rc; security_status = RREG32(cpu_security_boot_status_reg); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index f00891ddc3ad..0f4f8ef2cfce 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -927,7 +927,7 @@ struct hl_asic_funcs { void (*ctx_fini)(struct hl_ctx *ctx); int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk); u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx); - void (*read_device_fw_version)(struct hl_device *hdev, + int (*read_device_fw_version)(struct hl_device *hdev, enum hl_fw_component fwc); int (*load_firmware_to_device)(struct hl_device *hdev); int (*load_boot_fit_to_device)(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c index 211f3190f8d7..02152d85cf19 100644 --- a/drivers/misc/habanalabs/common/pci.c +++ b/drivers/misc/habanalabs/common/pci.c @@ -390,8 +390,11 @@ int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg, rc = hl_fw_read_preboot_status(hdev, cpu_boot_status_reg, cpu_security_boot_status_reg, boot_err0_reg, preboot_ver_timeout); - if (rc) + if (rc) { + dev_err(hdev->dev, "Failed to read preboot version\n"); + hdev->asic_funcs->hw_fini(hdev, true); goto unmap_pci_bars; + } return 0; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index ecfcfdfdd6b0..6aa3e38e3598 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -3603,7 +3603,7 @@ static int gaudi_load_boot_fit_to_device(struct hl_device *hdev) return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst, 0, 0); } -static void gaudi_read_device_fw_version(struct hl_device *hdev, +static int gaudi_read_device_fw_version(struct hl_device *hdev, enum hl_fw_component fwc) { const char *name; @@ -3623,7 +3623,7 @@ static void gaudi_read_device_fw_version(struct hl_device *hdev, break; default: dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc); - return; + return -EIO; } ver_off &= ~((u32)SRAM_BASE_ADDR); @@ -3635,7 +3635,10 @@ static void gaudi_read_device_fw_version(struct hl_device *hdev, dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n", name, ver_off); strcpy(dest, "unavailable"); + return -EIO; } + + return 0; } static int gaudi_init_cpu(struct hl_device *hdev) @@ -3925,16 +3928,18 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap); - gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | - HW_CAP_HBM | HW_CAP_PCI_DMA | - HW_CAP_MME | HW_CAP_TPC_MASK | - HW_CAP_HBM_DMA | HW_CAP_PLL | - HW_CAP_NIC_MASK | HW_CAP_MMU | - HW_CAP_SRAM_SCRAMBLER | - HW_CAP_HBM_SCRAMBLER | - HW_CAP_CLK_GATE); + if (gaudi) { + gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | + HW_CAP_HBM | HW_CAP_PCI_DMA | + HW_CAP_MME | HW_CAP_TPC_MASK | + HW_CAP_HBM_DMA | HW_CAP_PLL | + HW_CAP_NIC_MASK | HW_CAP_MMU | + HW_CAP_SRAM_SCRAMBLER | + HW_CAP_HBM_SCRAMBLER | + HW_CAP_CLK_GATE); - memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat)); + memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat)); + } } static int gaudi_suspend(struct hl_device *hdev) diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index ab467943cef3..a0580b8e1f05 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -2341,7 +2341,7 @@ static int goya_load_boot_fit_to_device(struct hl_device *hdev) * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx. * The version string should be located by that offset. */ -static void goya_read_device_fw_version(struct hl_device *hdev, +static int goya_read_device_fw_version(struct hl_device *hdev, enum hl_fw_component fwc) { const char *name; @@ -2361,7 +2361,7 @@ static void goya_read_device_fw_version(struct hl_device *hdev, break; default: dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc); - return; + return -EIO; } ver_off &= ~((u32)SRAM_BASE_ADDR); @@ -2373,7 +2373,11 @@ static void goya_read_device_fw_version(struct hl_device *hdev, dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n", name, ver_off); strcpy(dest, "unavailable"); + + return -EIO; } + + return 0; } static int goya_init_cpu(struct hl_device *hdev) @@ -2644,12 +2648,14 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset) WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM, 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT); - goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | - HW_CAP_DDR_0 | HW_CAP_DDR_1 | - HW_CAP_DMA | HW_CAP_MME | - HW_CAP_MMU | HW_CAP_TPC_MBIST | - HW_CAP_GOLDEN | HW_CAP_TPC); - memset(goya->events_stat, 0, sizeof(goya->events_stat)); + if (goya) { + goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | + HW_CAP_DDR_0 | HW_CAP_DDR_1 | + HW_CAP_DMA | HW_CAP_MME | + HW_CAP_MMU | HW_CAP_TPC_MBIST | + HW_CAP_GOLDEN | HW_CAP_TPC); + memset(goya->events_stat, 0, sizeof(goya->events_stat)); + } } int goya_suspend(struct hl_device *hdev) -- cgit v1.2.3 From 0da5698bf4b8c699e5e5e23d067f5a1bb1bb2401 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Tue, 27 Oct 2020 09:34:44 +0200 Subject: habanalabs/gaudi: increase MAX CS to 16K We need to have the MAX CS be much larger than the size of the different queues. In GAUDI we have around 8 groups of queues, and each group has 1K queue size. To prevent head-of-the-line blocking, we need to make sure there is sufficient number of available CS allocations even if one or more of those queues are full. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudiP.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index 297a96dbf4e5..e882cf7f8cf7 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -60,7 +60,7 @@ #define GAUDI_DEFAULT_CARD_NAME "HL2000" -#define GAUDI_MAX_PENDING_CS 1024 +#define GAUDI_MAX_PENDING_CS SZ_16K #if !IS_MAX_PENDING_CS_VALID(GAUDI_MAX_PENDING_CS) #error "GAUDI_MAX_PENDING_CS must be power of 2 and greater than 1" -- cgit v1.2.3 From 649c4592124b0cf1faf044b5f14de208d08ebb50 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Mon, 10 Aug 2020 17:30:35 +0300 Subject: habanalabs: Separate CS job completion from its deallocation Current CS jobs are no longer needed after their completion. However, jobs of future workload might be in use even after they are completed. To allow that, the patch adds a refcount to the job object, and decouples its completion handling from its deallocation. Signed-off-by: Tomer Tayar Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 25 ++++++++++++++++------ drivers/misc/habanalabs/common/debugfs.c | 13 +++++------ drivers/misc/habanalabs/common/habanalabs.h | 2 ++ 3 files changed, 28 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 536e09542e22..91bdd6d8b020 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -155,6 +155,18 @@ static void cs_put(struct hl_cs *cs) kref_put(&cs->refcount, cs_do_release); } +static void cs_job_do_release(struct kref *ref) +{ + struct hl_cs_job *job = container_of(ref, struct hl_cs_job, refcount); + + kfree(job); +} + +static void cs_job_put(struct hl_cs_job *job) +{ + kref_put(&job->refcount, cs_job_do_release); +} + static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) { /* @@ -227,7 +239,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) return rc; } -static void free_job(struct hl_device *hdev, struct hl_cs_job *job) +static void complete_job(struct hl_device *hdev, struct hl_cs_job *job) { struct hl_cs *cs = job->cs; @@ -276,7 +288,7 @@ static void free_job(struct hl_device *hdev, struct hl_cs_job *job) job->queue_type == QUEUE_TYPE_HW) cs_put(cs); - kfree(job); + cs_job_put(job); } static void cs_do_release(struct kref *ref) @@ -290,13 +302,13 @@ static void cs_do_release(struct kref *ref) /* * Although if we reached here it means that all external jobs have * finished, because each one of them took refcnt to CS, we still - * need to go over the internal jobs and free them. Otherwise, we + * need to go over the internal jobs and complete them. Otherwise, we * will have leaked memory and what's worse, the CS object (and * potentially the CTX object) could be released, while the JOB * still holds a pointer to them (but no reference). */ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) - free_job(hdev, job); + complete_job(hdev, job); if (!cs->submitted) { /* In case the wait for signal CS was submitted, the put occurs @@ -507,7 +519,7 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) struct hl_cs_job *job, *tmp; list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) - free_job(hdev, job); + complete_job(hdev, job); } void hl_cs_rollback_all(struct hl_device *hdev) @@ -539,7 +551,7 @@ static void job_wq_completion(struct work_struct *work) struct hl_device *hdev = cs->ctx->hdev; /* job is no longer needed */ - free_job(hdev, job); + complete_job(hdev, job); } static int validate_queue_index(struct hl_device *hdev, @@ -647,6 +659,7 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, if (!job) return NULL; + kref_init(&job->refcount); job->queue_type = queue_type; job->is_kernel_allocated_cb = is_kernel_allocated_cb; diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 912ddfa360b1..b44193ec3d12 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -168,18 +168,19 @@ static int command_submission_jobs_show(struct seq_file *s, void *data) if (first) { first = false; seq_puts(s, "\n"); - seq_puts(s, " JOB ID CS ID CTX ASID H/W Queue\n"); - seq_puts(s, "---------------------------------------\n"); + seq_puts(s, " JOB ID CS ID CTX ASID JOB RefCnt H/W Queue\n"); + seq_puts(s, "----------------------------------------------------\n"); } if (job->cs) seq_printf(s, - " %02d %llu %d %d\n", + " %02d %llu %d %d %d\n", job->id, job->cs->sequence, job->cs->ctx->asid, - job->hw_queue_id); + kref_read(&job->refcount), job->hw_queue_id); else seq_printf(s, - " %02d 0 %d %d\n", - job->id, HL_KERNEL_ASID_ID, job->hw_queue_id); + " %02d 0 %d %d %d\n", + job->id, HL_KERNEL_ASID_ID, + kref_read(&job->refcount), job->hw_queue_id); } spin_unlock(&dev_entry->cs_job_spinlock); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 0f4f8ef2cfce..81ff340b6a5b 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1142,6 +1142,7 @@ struct hl_cs { * @userptr_list: linked-list of userptr mappings that belong to this job and * wait for completion. * @debugfs_list: node in debugfs list of command submission jobs. + * @refcount: reference counter for usage of the CS job. * @queue_type: the type of the H/W queue this job is submitted to. * @id: the id of this job inside a CS. * @hw_queue_id: the id of the H/W queue this job is submitted to. @@ -1165,6 +1166,7 @@ struct hl_cs_job { struct work_struct finish_work; struct list_head userptr_list; struct list_head debugfs_list; + struct kref refcount; enum hl_queue_type queue_type; u32 id; u32 hw_queue_id; -- cgit v1.2.3 From 66bfcccdb893f2dfefe3fb19795034ac213667ad Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Wed, 28 Oct 2020 21:05:20 +0200 Subject: habanalabs: remove duplicate print We print twice the firmware status regarding security, once in common code and once in asic code. Remove the print in asic code and leave the common code print. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 3 --- drivers/misc/habanalabs/goya/goya.c | 3 --- 2 files changed, 6 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 6aa3e38e3598..18c7da970cf5 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -647,9 +647,6 @@ static int gaudi_early_init(struct hl_device *hdev) if (rc) goto free_queue_props; - dev_info(hdev->dev, "firmware-level security is %s\n", - hdev->asic_prop.fw_security_disabled ? "disabled" : "enabled"); - return 0; free_queue_props: diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index a0580b8e1f05..d7ceda7dd048 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -615,9 +615,6 @@ static int goya_early_init(struct hl_device *hdev) "PCI strap is not configured correctly, PCI bus errors may occur\n"); } - dev_info(hdev->dev, "firmware-level security is %s\n", - hdev->asic_prop.fw_security_disabled ? "disabled" : "enabled"); - return 0; free_queue_props: -- cgit v1.2.3 From b726a2f7c0bcdb6ff02d4fa257df7e8a9509c686 Mon Sep 17 00:00:00 2001 From: Igor Grinberg Date: Thu, 29 Oct 2020 14:06:54 +0200 Subject: habanalabs/gaudi: remove pcie_en strap toggle Since the very large grace period is over and this functionality prevents us to implement the new reset sequence and apply security settings, we need to remove the code toggling the PCIE_EN bit in the straps register. Remove it for good. Signed-off-by: Igor Grinberg Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 18c7da970cf5..770cd1b94e4f 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -3859,7 +3859,7 @@ disable_queues: static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) { struct gaudi_device *gaudi = hdev->asic_specific; - u32 status, reset_timeout_ms, cpu_timeout_ms, boot_strap = 0; + u32 status, reset_timeout_ms, cpu_timeout_ms; if (!hard_reset) { dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n"); @@ -3891,16 +3891,6 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) /* Tell ASIC not to re-initialize PCIe */ WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC); - boot_strap = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS); - - /* H/W bug WA: - * rdata[31:0] = strap_read_val; - * wdata[31:0] = rdata[30:21],1'b0,rdata[20:0] - */ - boot_strap = (((boot_strap & 0x7FE00000) << 1) | - (boot_strap & 0x001FFFFF)); - WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap & ~0x2); - /* Restart BTL/BLR upon hard-reset */ if (hdev->asic_prop.fw_security_disabled) WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1); @@ -3923,8 +3913,6 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) "Timeout while waiting for device to reset 0x%x\n", status); - WREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS, boot_strap); - if (gaudi) { gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | HW_CAP_HBM | HW_CAP_PCI_DMA | -- cgit v1.2.3 From f2d032ee1381ae1eda8846800a304d76d2d8b735 Mon Sep 17 00:00:00 2001 From: Omer Shpigelman Date: Sat, 31 Oct 2020 22:03:55 +0200 Subject: habanalabs: fix hard reset print and comment One of the first steps of a hard reset flow is to close all open user contexts. This user process teradown might take some time due to long cleanup in our driver or some other reason even before our cleanup flow. Hence fix the relevant print and comment to be more accurate. Signed-off-by: Omer Shpigelman Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/device.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 421e37123b03..3b82020648c7 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -793,17 +793,20 @@ static int device_kill_open_processes(struct hl_device *hdev) mutex_unlock(&hdev->fpriv_list_lock); - /* We killed the open users, but because the driver cleans up after the - * user contexts are closed (e.g. mmu mappings), we need to wait again - * to make sure the cleaning phase is finished before continuing with - * the reset + /* + * We killed the open users, but that doesn't mean they are closed. + * It could be that they are running a long cleanup phase in the driver + * e.g. MMU unmappings, or running other long teardown flow even before + * our cleanup. + * Therefore we need to wait again to make sure they are closed before + * continuing with the reset. */ pending_cnt = pending_total; while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) { dev_info(hdev->dev, - "Waiting for all unmap operations to finish before hard reset\n"); + "Waiting for all user contexts to get closed before hard reset\n"); pending_cnt--; -- cgit v1.2.3 From 66a76401c50b2638fd95dd31f365fd64be307d6a Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Mon, 5 Oct 2020 14:40:10 +0300 Subject: habanalabs: add 'needs reset' state in driver The new state indicates that device should be reset in order to re-gain funcionality. This unique state can occur if reset_on_lockup is disabled and an actual lockup has occurred. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_buffer.c | 5 ++-- .../misc/habanalabs/common/command_submission.c | 7 +++-- drivers/misc/habanalabs/common/debugfs.c | 6 ++-- drivers/misc/habanalabs/common/device.c | 33 ++++++++++++++++------ drivers/misc/habanalabs/common/habanalabs.h | 14 +++++++-- drivers/misc/habanalabs/common/habanalabs_drv.c | 17 ++++++++--- drivers/misc/habanalabs/common/habanalabs_ioctl.c | 12 +++++--- drivers/misc/habanalabs/common/hw_queue.c | 6 ++-- drivers/misc/habanalabs/common/hwmon.c | 4 +-- drivers/misc/habanalabs/common/memory.c | 5 ++-- drivers/misc/habanalabs/common/sysfs.c | 8 ++++-- drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c | 8 +++--- drivers/misc/habanalabs/goya/goya_hwmgr.c | 28 +++++++++--------- include/uapi/misc/habanalabs.h | 3 +- 14 files changed, 101 insertions(+), 55 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c index 075679626c7b..03ffcead1855 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -379,13 +379,14 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) { union hl_cb_args *args = data; struct hl_device *hdev = hpriv->hdev; + enum hl_device_status status; u64 handle = 0; int rc; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, &status)) { dev_warn_ratelimited(hdev->dev, "Device is %s. Can't execute CB IOCTL\n", - atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + hdev->status[status]); return -EBUSY; } diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 91bdd6d8b020..20b34fb054ee 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -427,6 +427,8 @@ static void cs_timedout(struct work_struct *work) if (hdev->reset_on_lockup) hl_device_reset(hdev, false, false); + else + hdev->needs_reset = true; } static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, @@ -689,12 +691,13 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args) struct hl_device *hdev = hpriv->hdev; struct hl_ctx *ctx = hpriv->ctx; u32 cs_type_flags, num_chunks; + enum hl_device_status status; enum hl_cs_type cs_type; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, &status)) { dev_warn_ratelimited(hdev->dev, "Device is %s. Can't submit new CS\n", - atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + hdev->status[status]); return -EBUSY; } diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index b44193ec3d12..104b9686e57b 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -24,7 +24,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, struct cpucp_packet pkt; int rc; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -EBUSY; memset(&pkt, 0, sizeof(pkt)); @@ -50,7 +50,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, struct cpucp_packet pkt; int rc; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -EBUSY; memset(&pkt, 0, sizeof(pkt)); @@ -76,7 +76,7 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state) struct cpucp_packet pkt; int rc; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return; memset(&pkt, 0, sizeof(pkt)); diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 3b82020648c7..59308a612b36 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -15,14 +15,6 @@ #define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10) -bool hl_device_disabled_or_in_reset(struct hl_device *hdev) -{ - if ((hdev->disabled) || (atomic_read(&hdev->in_reset))) - return true; - else - return false; -} - enum hl_device_status hl_device_status(struct hl_device *hdev) { enum hl_device_status status; @@ -31,12 +23,34 @@ enum hl_device_status hl_device_status(struct hl_device *hdev) status = HL_DEVICE_STATUS_MALFUNCTION; else if (atomic_read(&hdev->in_reset)) status = HL_DEVICE_STATUS_IN_RESET; + else if (hdev->needs_reset) + status = HL_DEVICE_STATUS_NEEDS_RESET; else status = HL_DEVICE_STATUS_OPERATIONAL; return status; } +bool hl_device_operational(struct hl_device *hdev, + enum hl_device_status *status) +{ + enum hl_device_status current_status; + + current_status = hl_device_status(hdev); + if (status) + *status = current_status; + + switch (current_status) { + case HL_DEVICE_STATUS_IN_RESET: + case HL_DEVICE_STATUS_MALFUNCTION: + case HL_DEVICE_STATUS_NEEDS_RESET: + return false; + case HL_DEVICE_STATUS_OPERATIONAL: + default: + return true; + } +} + static void hpriv_release(struct kref *ref) { struct hl_fpriv *hpriv; @@ -411,7 +425,7 @@ static void hl_device_heartbeat(struct work_struct *work) struct hl_device *hdev = container_of(work, struct hl_device, work_heartbeat.work); - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) goto reschedule; if (!hdev->asic_funcs->send_heartbeat(hdev)) @@ -1091,6 +1105,7 @@ again: } atomic_set(&hdev->in_reset, 0); + hdev->needs_reset = false; if (hard_reset) hdev->hard_reset_cnt++; diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 81ff340b6a5b..9c7594d0ca07 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1432,6 +1432,10 @@ struct hl_dbg_device_entry { * DEVICES */ +#define HL_STR_MAX 32 + +#define HL_DEV_STS_MAX (HL_DEVICE_STATUS_NEEDS_RESET + 1) + /* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards. */ @@ -1706,6 +1710,7 @@ struct hl_mmu_funcs { * @hwmon_dev: H/W monitor device. * @pm_mng_profile: current power management profile. * @hl_chip_info: ASIC's sensors information. + * @device_status_description: device status description. * @hl_debugfs: device's debugfs manager. * @cb_pool: list of preallocated CBs. * @cb_pool_lock: protects the CB pool. @@ -1774,6 +1779,8 @@ struct hl_mmu_funcs { * @supports_coresight: is CoreSight supported. * @supports_soft_reset: is soft reset supported. * @supports_cb_mapping: is mapping a CB to the device's MMU supported. + * @needs_reset: true if reset_on_lockup is false and device should be reset + * due to lockup. */ struct hl_device { struct pci_dev *pdev; @@ -1786,7 +1793,8 @@ struct hl_device { struct device *dev_ctrl; struct delayed_work work_freq; struct delayed_work work_heartbeat; - char asic_name[32]; + char asic_name[HL_STR_MAX]; + char status[HL_DEV_STS_MAX][HL_STR_MAX]; enum hl_asic_type asic_type; struct hl_cq *completion_queue; struct workqueue_struct **cq_wq; @@ -1876,6 +1884,7 @@ struct hl_device { u8 supports_coresight; u8 supports_soft_reset; u8 supports_cb_mapping; + u8 needs_reset; /* Parameters for bring-up */ u64 nic_ports_mask; @@ -1978,7 +1987,8 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size, int hl_device_open(struct inode *inode, struct file *filp); int hl_device_open_ctrl(struct inode *inode, struct file *filp); -bool hl_device_disabled_or_in_reset(struct hl_device *hdev); +bool hl_device_operational(struct hl_device *hdev, + enum hl_device_status *status); enum hl_device_status hl_device_status(struct hl_device *hdev); int hl_device_set_debug_mode(struct hl_device *hdev, bool enable); int create_hdev(struct hl_device **dev, struct pci_dev *pdev, diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c index 20458bd82c5a..aac798f3296e 100644 --- a/drivers/misc/habanalabs/common/habanalabs_drv.c +++ b/drivers/misc/habanalabs/common/habanalabs_drv.c @@ -92,6 +92,7 @@ static enum hl_asic_type get_asic_type(u16 device) */ int hl_device_open(struct inode *inode, struct file *filp) { + enum hl_device_status status; struct hl_device *hdev; struct hl_fpriv *hpriv; int rc; @@ -124,10 +125,10 @@ int hl_device_open(struct inode *inode, struct file *filp) mutex_lock(&hdev->fpriv_list_lock); - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, &status)) { dev_err_ratelimited(hdev->dev, - "Can't open %s because it is disabled or in reset\n", - dev_name(hdev->dev)); + "Can't open %s because it is %s\n", + dev_name(hdev->dev), hdev->status[status]); rc = -EPERM; goto out_err; } @@ -204,7 +205,7 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp) mutex_lock(&hdev->fpriv_list_lock); - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { dev_err_ratelimited(hdev->dev_ctrl, "Can't open %s because it is disabled or in reset\n", dev_name(hdev->dev_ctrl)); @@ -287,6 +288,14 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev, hdev->asic_type = asic_type; } + /* Assign status description string */ + strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION], + "disabled", HL_STR_MAX); + strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET], + "in reset", HL_STR_MAX); + strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET], + "needs reset", HL_STR_MAX); + hdev->major = hl_major; hdev->reset_on_lockup = reset_on_lockup; hdev->memory_scrub = memory_scrub; diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 1d8bea626e78..0729cd43f297 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -406,8 +406,10 @@ static int total_energy_consumption_info(struct hl_fpriv *hpriv, static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, struct device *dev) { + enum hl_device_status status; struct hl_info_args *args = data; struct hl_device *hdev = hpriv->hdev; + int rc; /* @@ -428,10 +430,10 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, break; } - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, &status)) { dev_warn_ratelimited(dev, "Device is %s. Can't execute INFO IOCTL\n", - atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + hdev->status[status]); return -EBUSY; } @@ -501,12 +503,14 @@ static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data) { struct hl_debug_args *args = data; struct hl_device *hdev = hpriv->hdev; + enum hl_device_status status; + int rc = 0; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, &status)) { dev_warn_ratelimited(hdev->dev, "Device is %s. Can't execute DEBUG IOCTL\n", - atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + hdev->status[status]); return -EBUSY; } diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index e808e668a007..f9550fcf5500 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -515,6 +515,7 @@ static void init_signal_wait_cs(struct hl_cs *cs) */ int hl_hw_queue_schedule_cs(struct hl_cs *cs) { + enum hl_device_status status; struct hl_cs_counters_atomic *cntr; struct hl_ctx *ctx = cs->ctx; struct hl_device *hdev = ctx->hdev; @@ -527,11 +528,10 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) hdev->asic_funcs->hw_queues_lock(hdev); - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, &status)) { atomic64_inc(&ctx->cs_counters.device_in_reset_drop_cnt); - atomic64_inc(&cntr->device_in_reset_drop_cnt); dev_err(hdev->dev, - "device is disabled or in reset, CS rejected!\n"); + "device is %s, CS rejected!\n", hdev->status[status]); rc = -EPERM; goto out; } diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c index 892a5e2b0b9d..ab96401c3752 100644 --- a/drivers/misc/habanalabs/common/hwmon.c +++ b/drivers/misc/habanalabs/common/hwmon.c @@ -114,7 +114,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type, struct hl_device *hdev = dev_get_drvdata(dev); int rc; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; switch (type) { @@ -192,7 +192,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type, { struct hl_device *hdev = dev_get_drvdata(dev); - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; switch (type) { diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 5c1dae6aaf4d..e00ad11dc5f7 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -1237,6 +1237,7 @@ out: int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) { + enum hl_device_status status; union hl_mem_args *args = data; struct hl_device *hdev = hpriv->hdev; struct hl_ctx *ctx = hpriv->ctx; @@ -1244,10 +1245,10 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) u32 handle = 0; int rc; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, &status)) { dev_warn_ratelimited(hdev->dev, "Device is %s. Can't execute MEMORY IOCTL\n", - atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + hdev->status[status]); return -EBUSY; } diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c index 3ceae87016b1..94ca68e62000 100644 --- a/drivers/misc/habanalabs/common/sysfs.c +++ b/drivers/misc/habanalabs/common/sysfs.c @@ -276,6 +276,8 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr, str = "In reset"; else if (hdev->disabled) str = "Malfunction"; + else if (hdev->needs_reset) + str = "Needs Reset"; else str = "Operational"; @@ -304,7 +306,7 @@ static ssize_t max_power_show(struct device *dev, struct device_attribute *attr, struct hl_device *hdev = dev_get_drvdata(dev); long val; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; val = hl_get_max_power(hdev); @@ -319,7 +321,7 @@ static ssize_t max_power_store(struct device *dev, unsigned long value; int rc; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto out; } @@ -347,7 +349,7 @@ static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj, char *data; int rc; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; if (!max_size) diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c index 1076b4932ce2..8c49da4bcbd5 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c +++ b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c @@ -20,7 +20,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk) { long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, MME_PLL, false); @@ -54,7 +54,7 @@ static ssize_t clk_max_freq_mhz_show(struct device *dev, struct gaudi_device *gaudi = hdev->asic_specific; long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, MME_PLL, false); @@ -72,7 +72,7 @@ static ssize_t clk_max_freq_mhz_store(struct device *dev, int rc; u64 value; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto fail; } @@ -97,7 +97,7 @@ static ssize_t clk_cur_freq_mhz_show(struct device *dev, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, MME_PLL, true); diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c index cdd4903e48fa..3acb36a1a902 100644 --- a/drivers/misc/habanalabs/goya/goya_hwmgr.c +++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c @@ -36,7 +36,7 @@ int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk) { long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, MME_PLL, false); @@ -69,7 +69,7 @@ static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, MME_PLL, false); @@ -88,7 +88,7 @@ static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr, int rc; long value; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto fail; } @@ -118,7 +118,7 @@ static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, TPC_PLL, false); @@ -137,7 +137,7 @@ static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr, int rc; long value; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto fail; } @@ -167,7 +167,7 @@ static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, IC_PLL, false); @@ -186,7 +186,7 @@ static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr, int rc; long value; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto fail; } @@ -216,7 +216,7 @@ static ssize_t mme_clk_curr_show(struct device *dev, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, MME_PLL, true); @@ -233,7 +233,7 @@ static ssize_t tpc_clk_curr_show(struct device *dev, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, TPC_PLL, true); @@ -250,7 +250,7 @@ static ssize_t ic_clk_curr_show(struct device *dev, struct hl_device *hdev = dev_get_drvdata(dev); long value; - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; value = hl_get_frequency(hdev, IC_PLL, true); @@ -266,7 +266,7 @@ static ssize_t pm_mng_profile_show(struct device *dev, { struct hl_device *hdev = dev_get_drvdata(dev); - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; return sprintf(buf, "%s\n", @@ -280,7 +280,7 @@ static ssize_t pm_mng_profile_store(struct device *dev, { struct hl_device *hdev = dev_get_drvdata(dev); - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto out; } @@ -335,7 +335,7 @@ static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr, { struct hl_device *hdev = dev_get_drvdata(dev); - if (hl_device_disabled_or_in_reset(hdev)) + if (!hl_device_operational(hdev, NULL)) return -ENODEV; return sprintf(buf, "%u\n", hdev->high_pll); @@ -348,7 +348,7 @@ static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr, long value; int rc; - if (hl_device_disabled_or_in_reset(hdev)) { + if (!hl_device_operational(hdev, NULL)) { count = -ENODEV; goto out; } diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 61f8f9144b54..d9cc782aba21 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -242,7 +242,8 @@ enum gaudi_engine_id { enum hl_device_status { HL_DEVICE_STATUS_OPERATIONAL, HL_DEVICE_STATUS_IN_RESET, - HL_DEVICE_STATUS_MALFUNCTION + HL_DEVICE_STATUS_MALFUNCTION, + HL_DEVICE_STATUS_NEEDS_RESET }; /* Opcode for management ioctl -- cgit v1.2.3 From 28e052c95292410922d487c48d2f841ccd6495e7 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Thu, 29 Oct 2020 18:38:31 +0200 Subject: habanalabs: restore vm_pgoff after mmap Due to using dma_mmap_coherent() to perform mmap of dma memory, we had to clear the vm_pgoff field before calling that function. However, that broke the userspace (profiler tool) as they relied on searching the /proc/self/maps for these values to correctly "disassemble" the topology recipe. To re-enable that functionality, the driver can simply restore the value of vm_pgoff before returning to userspace but after calling dma_mmap_coherent(). Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_buffer.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c index 03ffcead1855..0c482358f350 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -517,6 +517,7 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) } cb->mmap_size = cb->size; + vma->vm_pgoff = handle; return 0; -- cgit v1.2.3 From d1ddd90551b2ec3ed3b72daa09ecf38c174c1f61 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Mon, 19 Oct 2020 17:04:20 +0300 Subject: habanalabs: move HW dirty check to a proper location Driver must verify if HW is dirty before trying to fetch preboot information. Hence, we move this validation to a prior stage of the boot sequence. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/device.c | 7 ----- drivers/misc/habanalabs/common/habanalabs.h | 6 ++-- drivers/misc/habanalabs/common/habanalabs_drv.c | 12 ++++---- drivers/misc/habanalabs/common/pci.c | 23 +-------------- drivers/misc/habanalabs/gaudi/gaudi.c | 35 +++++++++++++++++------ drivers/misc/habanalabs/goya/goya.c | 38 ++++++++++++++++++------- 6 files changed, 63 insertions(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 59308a612b36..348faf31668b 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -1278,13 +1278,6 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) hl_debugfs_add_device(hdev); - if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) { - dev_info(hdev->dev, - "H/W state is dirty, must reset before initializing\n"); - hdev->asic_funcs->halt_engines(hdev, true); - hdev->asic_funcs->hw_fini(hdev, true); - } - /* * From this point, in case of an error, add char devices and create * sysfs nodes as part of the error flow, to allow debugging. diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 9c7594d0ca07..42988f12fb00 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -917,7 +917,6 @@ struct hl_asic_funcs { size_t max_size); int (*send_cpu_message)(struct hl_device *hdev, u32 *msg, u16 len, u32 timeout, long *result); - enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev); int (*pci_bars_map)(struct hl_device *hdev); int (*init_iatu)(struct hl_device *hdev); u32 (*rreg)(struct hl_device *hdev, u32 reg); @@ -1901,6 +1900,7 @@ struct hl_device { u8 hard_reset_on_fw_events; u8 bmc_enable; u8 rl_enable; + u8 reset_on_preboot_fail; }; @@ -2148,9 +2148,7 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region, struct hl_inbound_pci_region *pci_region); int hl_pci_set_outbound_region(struct hl_device *hdev, struct hl_outbound_pci_region *pci_region); -int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg, - u32 cpu_security_boot_status_reg, u32 boot_err0_reg, - u32 preboot_ver_timeout); +int hl_pci_init(struct hl_device *hdev); void hl_pci_fini(struct hl_device *hdev); long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr); diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c index aac798f3296e..6bbb6bca6860 100644 --- a/drivers/misc/habanalabs/common/habanalabs_drv.c +++ b/drivers/misc/habanalabs/common/habanalabs_drv.c @@ -234,20 +234,20 @@ out_err: static void set_driver_behavior_per_device(struct hl_device *hdev) { - hdev->mmu_enable = 1; hdev->cpu_enable = 1; - hdev->fw_loading = 1; + hdev->fw_loading = FW_TYPE_ALL_TYPES; hdev->cpu_queues_enable = 1; hdev->heartbeat = 1; + hdev->mmu_enable = 1; hdev->clock_gating_mask = ULONG_MAX; - - hdev->reset_pcilink = 0; - hdev->axi_drain = 0; hdev->sram_scrambler_enable = 1; hdev->dram_scrambler_enable = 1; hdev->bmc_enable = 1; hdev->hard_reset_on_fw_events = 1; - hdev->fw_loading = FW_TYPE_ALL_TYPES; + hdev->reset_on_preboot_fail = 1; + + hdev->reset_pcilink = 0; + hdev->axi_drain = 0; } /* diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c index 02152d85cf19..923b2606e29f 100644 --- a/drivers/misc/habanalabs/common/pci.c +++ b/drivers/misc/habanalabs/common/pci.c @@ -338,20 +338,12 @@ static int hl_pci_set_dma_mask(struct hl_device *hdev) /** * hl_pci_init() - PCI initialization code. * @hdev: Pointer to hl_device structure. - * @cpu_boot_status_reg: status register of the device's CPU - * @cpu_security_boot_status_reg: status register of device's CPU security - * configuration - * @boot_err0_reg: boot error register of the device's CPU - * @preboot_ver_timeout: how much to wait before bailing out on reading - * the preboot version * * Set DMA masks, initialize the PCI controller and map the PCI BARs. * * Return: 0 on success, non-zero for failure. */ -int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg, - u32 cpu_security_boot_status_reg, u32 boot_err0_reg, - u32 preboot_ver_timeout) +int hl_pci_init(struct hl_device *hdev) { struct pci_dev *pdev = hdev->pdev; int rc; @@ -383,19 +375,6 @@ int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg, if (rc) goto unmap_pci_bars; - /* Before continuing in the initialization, we need to read the preboot - * version to determine whether we run with a security-enabled firmware - * The check will be done in each ASIC's specific code - */ - rc = hl_fw_read_preboot_status(hdev, cpu_boot_status_reg, - cpu_security_boot_status_reg, boot_err0_reg, - preboot_ver_timeout); - if (rc) { - dev_err(hdev->dev, "Failed to read preboot version\n"); - hdev->asic_funcs->hw_fini(hdev, true); - goto unmap_pci_bars; - } - return 0; unmap_pci_bars: diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 770cd1b94e4f..eb47aff92e01 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -604,6 +604,11 @@ done: return rc; } +static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev) +{ + return RREG32(mmHW_STATE); +} + static int gaudi_early_init(struct hl_device *hdev) { struct asic_fixed_properties *prop = &hdev->asic_prop; @@ -641,14 +646,32 @@ static int gaudi_early_init(struct hl_device *hdev) prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID); - rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, - mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0, - GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC); + rc = hl_pci_init(hdev); if (rc) goto free_queue_props; + if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) { + dev_info(hdev->dev, + "H/W state is dirty, must reset before initializing\n"); + hdev->asic_funcs->hw_fini(hdev, true); + } + + /* Before continuing in the initialization, we need to read the preboot + * version to determine whether we run with a security-enabled firmware + */ + rc = hl_fw_read_preboot_status(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, + mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0, + GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC); + if (rc) { + if (hdev->reset_on_preboot_fail) + hdev->asic_funcs->hw_fini(hdev, true); + goto pci_fini; + } + return 0; +pci_fini: + hl_pci_fini(hdev); free_queue_props: kfree(hdev->asic_prop.hw_queues_props); return rc; @@ -7691,11 +7714,6 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, return 0; } -static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev) -{ - return RREG32(mmHW_STATE); -} - static int gaudi_internal_cb_pool_init(struct hl_device *hdev, struct hl_ctx *ctx) { @@ -8252,7 +8270,6 @@ static const struct hl_asic_funcs gaudi_funcs = { .get_pci_id = gaudi_get_pci_id, .get_eeprom_data = gaudi_get_eeprom_data, .send_cpu_message = gaudi_send_cpu_message, - .get_hw_state = gaudi_get_hw_state, .pci_bars_map = gaudi_pci_bars_map, .init_iatu = gaudi_init_iatu, .rreg = hl_rreg, diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index d7ceda7dd048..002fc53fb39d 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -553,6 +553,11 @@ done: return rc; } +static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev) +{ + return RREG32(mmHW_STATE); +} + /* * goya_early_init - GOYA early initialization code * @@ -602,12 +607,28 @@ static int goya_early_init(struct hl_device *hdev) prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID); - rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, - mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0, - GOYA_BOOT_FIT_REQ_TIMEOUT_USEC); + rc = hl_pci_init(hdev); if (rc) goto free_queue_props; + if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) { + dev_info(hdev->dev, + "H/W state is dirty, must reset before initializing\n"); + hdev->asic_funcs->hw_fini(hdev, true); + } + + /* Before continuing in the initialization, we need to read the preboot + * version to determine whether we run with a security-enabled firmware + */ + rc = hl_fw_read_preboot_status(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS, + mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0, + GOYA_BOOT_FIT_REQ_TIMEOUT_USEC); + if (rc) { + if (hdev->reset_on_preboot_fail) + hdev->asic_funcs->hw_fini(hdev, true); + goto pci_fini; + } + if (!hdev->pldm) { val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS); if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK) @@ -617,6 +638,8 @@ static int goya_early_init(struct hl_device *hdev) return 0; +pci_fini: + hl_pci_fini(hdev); free_queue_props: kfree(hdev->asic_prop.hw_queues_props); return rc; @@ -2630,7 +2653,7 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset) "Timeout while waiting for device to reset 0x%x\n", status); - if (!hard_reset) { + if (!hard_reset && goya) { goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME | HW_CAP_GOLDEN | HW_CAP_TPC); WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, @@ -2651,6 +2674,7 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset) HW_CAP_DMA | HW_CAP_MME | HW_CAP_MMU | HW_CAP_TPC_MBIST | HW_CAP_GOLDEN | HW_CAP_TPC); + memset(goya->events_stat, 0, sizeof(goya->events_stat)); } } @@ -5274,11 +5298,6 @@ static int goya_get_eeprom_data(struct hl_device *hdev, void *data, return hl_fw_get_eeprom_data(hdev, data, max_size); } -static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev) -{ - return RREG32(mmHW_STATE); -} - static int goya_ctx_init(struct hl_ctx *ctx) { return 0; @@ -5414,7 +5433,6 @@ static const struct hl_asic_funcs goya_funcs = { .get_pci_id = goya_get_pci_id, .get_eeprom_data = goya_get_eeprom_data, .send_cpu_message = goya_send_cpu_message, - .get_hw_state = goya_get_hw_state, .pci_bars_map = goya_pci_bars_map, .init_iatu = goya_init_iatu, .rreg = hl_rreg, -- cgit v1.2.3 From 784b916dad2f9968bf732b061914390db24ddf8a Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 22 Oct 2020 11:05:55 +0300 Subject: habanalabs: refactor mmu va_range db structure Use an array of va_ranges instead of keeping each va_range separately, we do this for better readability and in order to support access to a specific range in a much elegant manner. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs.h | 24 +++-- drivers/misc/habanalabs/common/memory.c | 130 ++++++++++++++-------------- 2 files changed, 82 insertions(+), 72 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 42988f12fb00..40b566c4b791 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -953,18 +953,33 @@ struct hl_asic_funcs { #define HL_KERNEL_ASID_ID 0 +/** + * enum hl_va_range_type - virtual address range type. + * @HL_VA_RANGE_TYPE_HOST: range type of host pages + * @HL_VA_RANGE_TYPE_HOST_HUGE: range type of host huge pages + * @HL_VA_RANGE_TYPE_DRAM: range type of dram pages + */ +enum hl_va_range_type { + HL_VA_RANGE_TYPE_HOST, + HL_VA_RANGE_TYPE_HOST_HUGE, + HL_VA_RANGE_TYPE_DRAM, + HL_VA_RANGE_TYPE_MAX +}; + /** * struct hl_va_range - virtual addresses range. * @lock: protects the virtual addresses list. * @list: list of virtual addresses blocks available for mappings. * @start_addr: range start address. * @end_addr: range end address. + * @page_size: page size of this va range. */ struct hl_va_range { struct mutex lock; struct list_head list; u64 start_addr; u64 end_addr; + u32 page_size; }; /** @@ -993,10 +1008,7 @@ struct hl_cs_counters_atomic { * @refcount: reference counter for the context. Context is released only when * this hits 0l. It is incremented on CS and CS_WAIT. * @cs_pending: array of hl fence objects representing pending CS. - * @host_va_range: holds available virtual addresses for host mappings. - * @host_huge_va_range: holds available virtual addresses for host mappings - * with huge pages. - * @dram_va_range: holds available virtual addresses for DRAM mappings. + * @va_range: holds available virtual addresses for host and dram mappings. * @mem_hash_lock: protects the mem_hash. * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the * MMU hash or walking the PGT requires talking this lock. @@ -1028,9 +1040,7 @@ struct hl_ctx { struct hl_device *hdev; struct kref refcount; struct hl_fence **cs_pending; - struct hl_va_range *host_va_range; - struct hl_va_range *host_huge_va_range; - struct hl_va_range *dram_va_range; + struct hl_va_range *va_range[HL_VA_RANGE_TYPE_MAX]; struct mutex mem_hash_lock; struct mutex mmu_lock; struct list_head debugfs_list; diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index e00ad11dc5f7..02233899336f 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -908,7 +908,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, /* get required alignment */ if (phys_pg_pack->page_size == page_size) { - va_range = ctx->host_va_range; + va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST]; /* * huge page alignment may be needed in case of regular @@ -923,7 +923,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, * huge page alignment is needed in case of huge page * mapping */ - va_range = ctx->host_huge_va_range; + va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]; va_block_align = huge_page_size; } } else { @@ -948,7 +948,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, hint_addr = args->map_device.hint_addr; /* DRAM VA alignment is the same as the DRAM page size */ - va_range = ctx->dram_va_range; + va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM]; va_block_align = hdev->asic_prop.dmmu.page_size; } @@ -1093,12 +1093,12 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free) if (phys_pg_pack->page_size == hdev->asic_prop.pmmu.page_size) - va_range = ctx->host_va_range; + va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST]; else - va_range = ctx->host_huge_va_range; + va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]; } else if (*vm_type == VM_TYPE_PHYS_PACK) { is_userptr = false; - va_range = ctx->dram_va_range; + va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM]; phys_pg_pack = hnode->ptr; } else { dev_warn(hdev->dev, @@ -1556,7 +1556,7 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, * addresses. */ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range, - u64 start, u64 end) + u64 start, u64 end, u32 page_size) { int rc; @@ -1586,6 +1586,7 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range, va_range->start_addr = start; va_range->end_addr = end; + va_range->page_size = page_size; return 0; } @@ -1598,8 +1599,7 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range, * This function does the following: * - Frees the virtual addresses block list and its lock */ -static void va_range_fini(struct hl_device *hdev, - struct hl_va_range *va_range) +static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range) { mutex_lock(&va_range->lock); clear_va_list_locked(hdev, &va_range->list); @@ -1629,101 +1629,96 @@ static void va_range_fini(struct hl_device *hdev, static int vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start, u64 host_range_end, + u32 host_page_size, u64 host_huge_range_start, u64 host_huge_range_end, + u32 host_huge_page_size, u64 dram_range_start, - u64 dram_range_end) + u64 dram_range_end, + u32 dram_page_size) { struct hl_device *hdev = ctx->hdev; - int rc; - - ctx->host_va_range = kzalloc(sizeof(*ctx->host_va_range), GFP_KERNEL); - if (!ctx->host_va_range) - return -ENOMEM; - - ctx->host_huge_va_range = kzalloc(sizeof(*ctx->host_huge_va_range), - GFP_KERNEL); - if (!ctx->host_huge_va_range) { - rc = -ENOMEM; - goto host_huge_va_range_err; - } - - ctx->dram_va_range = kzalloc(sizeof(*ctx->dram_va_range), GFP_KERNEL); - if (!ctx->dram_va_range) { - rc = -ENOMEM; - goto dram_va_range_err; + int i, rc; + + for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) { + ctx->va_range[i] = + kzalloc(sizeof(struct hl_va_range), GFP_KERNEL); + if (!ctx->va_range[i]) { + rc = -ENOMEM; + goto free_va_range; + } } rc = hl_mmu_ctx_init(ctx); if (rc) { dev_err(hdev->dev, "failed to init context %d\n", ctx->asid); - goto mmu_ctx_err; + goto free_va_range; } mutex_init(&ctx->mem_hash_lock); hash_init(ctx->mem_hash); - mutex_init(&ctx->host_va_range->lock); + mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock); - rc = va_range_init(hdev, ctx->host_va_range, host_range_start, - host_range_end); + rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST], + host_range_start, host_range_end, host_page_size); if (rc) { dev_err(hdev->dev, "failed to init host vm range\n"); - goto host_page_range_err; + goto mmu_ctx_fini; } if (hdev->pmmu_huge_range) { - mutex_init(&ctx->host_huge_va_range->lock); + mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock); - rc = va_range_init(hdev, ctx->host_huge_va_range, - host_huge_range_start, - host_huge_range_end); + rc = va_range_init(hdev, + ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE], + host_huge_range_start, host_huge_range_end, + host_huge_page_size); if (rc) { dev_err(hdev->dev, "failed to init host huge vm range\n"); - goto host_hpage_range_err; + goto clear_host_va_range; } } else { - ctx->host_huge_va_range = ctx->host_va_range; + ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] = + ctx->va_range[HL_VA_RANGE_TYPE_HOST]; } - mutex_init(&ctx->dram_va_range->lock); + mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock); - rc = va_range_init(hdev, ctx->dram_va_range, dram_range_start, - dram_range_end); + rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM], + dram_range_start, dram_range_end, dram_page_size); if (rc) { dev_err(hdev->dev, "failed to init dram vm range\n"); - goto dram_vm_err; + goto clear_host_huge_va_range; } hl_debugfs_add_ctx_mem_hash(hdev, ctx); return 0; -dram_vm_err: - mutex_destroy(&ctx->dram_va_range->lock); +clear_host_huge_va_range: + mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock); if (hdev->pmmu_huge_range) { - mutex_lock(&ctx->host_huge_va_range->lock); - clear_va_list_locked(hdev, &ctx->host_huge_va_range->list); - mutex_unlock(&ctx->host_huge_va_range->lock); + mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock); + clear_va_list_locked(hdev, + &ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list); + mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock); } -host_hpage_range_err: +clear_host_va_range: if (hdev->pmmu_huge_range) - mutex_destroy(&ctx->host_huge_va_range->lock); - mutex_lock(&ctx->host_va_range->lock); - clear_va_list_locked(hdev, &ctx->host_va_range->list); - mutex_unlock(&ctx->host_va_range->lock); -host_page_range_err: - mutex_destroy(&ctx->host_va_range->lock); + mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock); + mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock); + clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list); + mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock); +mmu_ctx_fini: + mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock); mutex_destroy(&ctx->mem_hash_lock); hl_mmu_ctx_fini(ctx); -mmu_ctx_err: - kfree(ctx->dram_va_range); -dram_va_range_err: - kfree(ctx->host_huge_va_range); -host_huge_va_range_err: - kfree(ctx->host_va_range); +free_va_range: + for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) + kfree(ctx->va_range[i]); return rc; } @@ -1733,6 +1728,7 @@ int hl_vm_ctx_init(struct hl_ctx *ctx) struct asic_fixed_properties *prop = &ctx->hdev->asic_prop; u64 host_range_start, host_range_end, host_huge_range_start, host_huge_range_end, dram_range_start, dram_range_end; + u32 host_page_size, host_huge_page_size, dram_page_size; atomic64_set(&ctx->dram_phys_mem, 0); @@ -1748,14 +1744,18 @@ int hl_vm_ctx_init(struct hl_ctx *ctx) dram_range_start = prop->dmmu.start_addr; dram_range_end = prop->dmmu.end_addr; + dram_page_size = prop->dmmu.page_size; host_range_start = prop->pmmu.start_addr; host_range_end = prop->pmmu.end_addr; + host_page_size = prop->pmmu.page_size; host_huge_range_start = prop->pmmu_huge.start_addr; host_huge_range_end = prop->pmmu_huge.end_addr; + host_huge_page_size = prop->pmmu_huge.page_size; return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end, - host_huge_range_start, host_huge_range_end, - dram_range_start, dram_range_end); + host_page_size, host_huge_range_start, + host_huge_range_end, host_huge_page_size, + dram_range_start, dram_range_end, dram_page_size); } /* @@ -1824,10 +1824,10 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) } spin_unlock(&vm->idr_lock); - va_range_fini(hdev, ctx->dram_va_range); + va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]); if (hdev->pmmu_huge_range) - va_range_fini(hdev, ctx->host_huge_va_range); - va_range_fini(hdev, ctx->host_va_range); + va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]); + va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]); mutex_destroy(&ctx->mem_hash_lock); hl_mmu_ctx_fini(ctx); -- cgit v1.2.3 From 804a72276cc4a1e27822d611907350b74f7fe561 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Fri, 30 Oct 2020 11:16:23 +0200 Subject: habanalabs: Rename hw_queues_mirror to cs_mirror Future command submission types might be submitted to HW not via the QMAN queues path. However, it would be still required to have the TDR mechanism for these CS, and thus the patch renames the TDR fields and replaces the hw_queues_ prefix with cs_. Signed-off-by: Tomer Tayar Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_submission.c | 18 ++++++++---------- drivers/misc/habanalabs/common/device.c | 4 ++-- drivers/misc/habanalabs/common/habanalabs.h | 8 ++++---- drivers/misc/habanalabs/common/hw_queue.c | 12 ++++++------ 4 files changed, 20 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 20b34fb054ee..749ec6c95fc2 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -343,25 +343,24 @@ static void cs_do_release(struct kref *ref) /* Need to update CI for internal queues */ hl_int_hw_queue_update_ci(cs); - spin_lock(&hdev->hw_queues_mirror_lock); - /* remove CS from hw_queues mirror list */ + /* remove CS from CS mirror list */ + spin_lock(&hdev->cs_mirror_lock); list_del_init(&cs->mirror_node); - spin_unlock(&hdev->hw_queues_mirror_lock); + spin_unlock(&hdev->cs_mirror_lock); /* Don't cancel TDR in case this CS was timedout because we might be * running from the TDR context */ - if (!cs->timedout && - hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) { + if (!cs->timedout && hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) { struct hl_cs *next; if (cs->tdr_active) cancel_delayed_work_sync(&cs->work_tdr); - spin_lock(&hdev->hw_queues_mirror_lock); + spin_lock(&hdev->cs_mirror_lock); /* queue TDR for next CS */ - next = list_first_entry_or_null(&hdev->hw_queues_mirror_list, + next = list_first_entry_or_null(&hdev->cs_mirror_list, struct hl_cs, mirror_node); if (next && !next->tdr_active) { @@ -370,7 +369,7 @@ static void cs_do_release(struct kref *ref) hdev->timeout_jiffies); } - spin_unlock(&hdev->hw_queues_mirror_lock); + spin_unlock(&hdev->cs_mirror_lock); } out: @@ -534,8 +533,7 @@ void hl_cs_rollback_all(struct hl_device *hdev) flush_workqueue(hdev->cq_wq[i]); /* Make sure we don't have leftovers in the H/W queues mirror list */ - list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list, - mirror_node) { + list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) { cs_get(cs); cs->aborted = true; dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n", diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 348faf31668b..ce0a1270e5ff 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -343,8 +343,8 @@ static int device_early_init(struct hl_device *hdev) mutex_init(&hdev->send_cpu_message_lock); mutex_init(&hdev->debug_lock); mutex_init(&hdev->mmu_cache_lock); - INIT_LIST_HEAD(&hdev->hw_queues_mirror_list); - spin_lock_init(&hdev->hw_queues_mirror_lock); + INIT_LIST_HEAD(&hdev->cs_mirror_list); + spin_lock_init(&hdev->cs_mirror_lock); INIT_LIST_HEAD(&hdev->fpriv_list); mutex_init(&hdev->fpriv_list_lock); atomic_set(&hdev->in_reset, 0); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 40b566c4b791..eb968c30adb9 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -1699,8 +1699,8 @@ struct hl_mmu_funcs { * @eq_wq: work queue of event queue for executing work in process context. * @kernel_ctx: Kernel driver context structure. * @kernel_queues: array of hl_hw_queue. - * @hw_queues_mirror_list: CS mirror list for TDR. - * @hw_queues_mirror_lock: protects hw_queues_mirror_list. + * @cs_mirror_list: CS mirror list for TDR. + * @cs_mirror_lock: protects cs_mirror_list. * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs. * @event_queue: event queue for IRQ from CPU-CP. * @dma_pool: DMA pool for small allocations. @@ -1810,8 +1810,8 @@ struct hl_device { struct workqueue_struct *eq_wq; struct hl_ctx *kernel_ctx; struct hl_hw_queue *kernel_queues; - struct list_head hw_queues_mirror_list; - spinlock_t hw_queues_mirror_lock; + struct list_head cs_mirror_list; + spinlock_t cs_mirror_lock; struct hl_cb_mgr kernel_cb_mgr; struct hl_eq event_queue; struct dma_pool *dma_pool; diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index f9550fcf5500..d1d30fb36410 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -578,20 +578,20 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) else if (cs->type == CS_TYPE_COLLECTIVE_WAIT) hdev->asic_funcs->collective_wait_init_cs(cs); - spin_lock(&hdev->hw_queues_mirror_lock); - list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list); + spin_lock(&hdev->cs_mirror_lock); + list_add_tail(&cs->mirror_node, &hdev->cs_mirror_list); /* Queue TDR if the CS is the first entry and if timeout is wanted */ if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) && - (list_first_entry(&hdev->hw_queues_mirror_list, + (list_first_entry(&hdev->cs_mirror_list, struct hl_cs, mirror_node) == cs)) { cs->tdr_active = true; schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies); - spin_unlock(&hdev->hw_queues_mirror_lock); - } else { - spin_unlock(&hdev->hw_queues_mirror_lock); + } + spin_unlock(&hdev->cs_mirror_lock); + if (!hdev->cs_active_cnt++) { struct hl_device_idle_busy_ts *ts; -- cgit v1.2.3 From adb51298fdc41b89f47c520171d86e4859cc7731 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 8 Oct 2020 10:27:42 +0300 Subject: habanalabs: improve hard reset procedure We want to handle the scenario in which the driver was not able to kill all user processes due to many memory mappings. We need to retry again after some period while releasing the cores. The devices will be unusable and "in-reset" status during that time. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/device.c | 128 ++++++++++++++++++++-------- drivers/misc/habanalabs/common/habanalabs.h | 16 +++- 2 files changed, 106 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index ce0a1270e5ff..c9011541c647 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -13,8 +13,6 @@ #include #include -#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10) - enum hl_device_status hl_device_status(struct hl_device *hdev) { enum hl_device_status status; @@ -256,6 +254,26 @@ static void device_cdev_sysfs_del(struct hl_device *hdev) cdev_device_del(&hdev->cdev, hdev->dev); } +static void device_hard_reset_pending(struct work_struct *work) +{ + struct hl_device_reset_work *device_reset_work = + container_of(work, struct hl_device_reset_work, + reset_work.work); + struct hl_device *hdev = device_reset_work->hdev; + int rc; + + rc = hl_device_reset(hdev, true, true); + if ((rc == -EBUSY) && !hdev->device_fini_pending) { + dev_info(hdev->dev, + "Could not reset device. will try again in %u seconds", + HL_PENDING_RESET_PER_SEC); + + queue_delayed_work(device_reset_work->wq, + &device_reset_work->reset_work, + msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000)); + } +} + /* * device_early_init - do some early initialization for the habanalabs device * @@ -340,6 +358,19 @@ static int device_early_init(struct hl_device *hdev) hl_cb_mgr_init(&hdev->kernel_cb_mgr); + hdev->device_reset_work.wq = + create_singlethread_workqueue("hl_device_reset"); + if (!hdev->device_reset_work.wq) { + rc = -ENOMEM; + dev_err(hdev->dev, "Failed to create device reset WQ\n"); + goto free_cb_mgr; + } + + INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, + device_hard_reset_pending); + hdev->device_reset_work.hdev = hdev; + hdev->device_fini_pending = 0; + mutex_init(&hdev->send_cpu_message_lock); mutex_init(&hdev->debug_lock); mutex_init(&hdev->mmu_cache_lock); @@ -351,6 +382,8 @@ static int device_early_init(struct hl_device *hdev) return 0; +free_cb_mgr: + hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); free_idle_busy_ts_arr: kfree(hdev->idle_busy_ts_arr); free_chip_info: @@ -393,6 +426,7 @@ static void device_early_fini(struct hl_device *hdev) kfree(hdev->hl_chip_info); destroy_workqueue(hdev->eq_wq); + destroy_workqueue(hdev->device_reset_work.wq); for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) destroy_workqueue(hdev->cq_wq[i]); @@ -771,16 +805,12 @@ disable_device: return rc; } -static int device_kill_open_processes(struct hl_device *hdev) +static int device_kill_open_processes(struct hl_device *hdev, u32 timeout) { - u16 pending_total, pending_cnt; struct hl_fpriv *hpriv; struct task_struct *task = NULL; + u32 pending_cnt; - if (hdev->pldm) - pending_total = HL_PLDM_PENDING_RESET_PER_SEC; - else - pending_total = HL_PENDING_RESET_PER_SEC; /* Giving time for user to close FD, and for processes that are inside * hl_device_open to finish @@ -788,6 +818,19 @@ static int device_kill_open_processes(struct hl_device *hdev) if (!list_empty(&hdev->fpriv_list)) ssleep(1); + if (timeout) { + pending_cnt = timeout; + } else { + if (hdev->process_kill_trial_cnt) { + /* Processes have been already killed */ + pending_cnt = 1; + goto wait_for_processes; + } else { + /* Wait a small period after process kill */ + pending_cnt = HL_PENDING_RESET_PER_SEC; + } + } + mutex_lock(&hdev->fpriv_list_lock); /* This section must be protected because we are dereferencing @@ -816,29 +859,27 @@ static int device_kill_open_processes(struct hl_device *hdev) * continuing with the reset. */ - pending_cnt = pending_total; - +wait_for_processes: while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) { - dev_info(hdev->dev, - "Waiting for all user contexts to get closed before hard reset\n"); + dev_dbg(hdev->dev, + "Waiting for all unmap operations to finish before hard reset\n"); pending_cnt--; ssleep(1); } - return list_empty(&hdev->fpriv_list) ? 0 : -EBUSY; -} + /* All processes exited successfully */ + if (list_empty(&hdev->fpriv_list)) + return 0; -static void device_hard_reset_pending(struct work_struct *work) -{ - struct hl_device_reset_work *device_reset_work = - container_of(work, struct hl_device_reset_work, reset_work); - struct hl_device *hdev = device_reset_work->hdev; + /* Give up waiting for processes to exit */ + if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS) + return -ETIME; - hl_device_reset(hdev, true, true); + hdev->process_kill_trial_cnt++; - kfree(device_reset_work); + return -EBUSY; } /* @@ -875,6 +916,10 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset, hard_reset = true; } + /* Re-entry of reset thread */ + if (from_hard_reset_thread && hdev->process_kill_trial_cnt) + goto kill_processes; + /* * Prevent concurrency in this function - only one reset should be * done at any given time. Only need to perform this if we didn't @@ -920,26 +965,17 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset, again: if ((hard_reset) && (!from_hard_reset_thread)) { - struct hl_device_reset_work *device_reset_work; - hdev->hard_reset_pending = true; - device_reset_work = kzalloc(sizeof(*device_reset_work), - GFP_ATOMIC); - if (!device_reset_work) { - rc = -ENOMEM; - goto out_err; - } + hdev->process_kill_trial_cnt = 0; /* * Because the reset function can't run from interrupt or * from heartbeat work, we need to call the reset function * from a dedicated work */ - INIT_WORK(&device_reset_work->reset_work, - device_hard_reset_pending); - device_reset_work->hdev = hdev; - schedule_work(&device_reset_work->reset_work); + queue_delayed_work(hdev->device_reset_work.wq, + &hdev->device_reset_work.reset_work, 0); return 0; } @@ -965,12 +1001,25 @@ again: /* Go over all the queues, release all CS and their jobs */ hl_cs_rollback_all(hdev); +kill_processes: if (hard_reset) { /* Kill processes here after CS rollback. This is because the * process can't really exit until all its CSs are done, which * is what we do in cs rollback */ - rc = device_kill_open_processes(hdev); + rc = device_kill_open_processes(hdev, 0); + + if (rc == -EBUSY) { + if (hdev->device_fini_pending) { + dev_crit(hdev->dev, + "Failed to kill all open processes, stopping hard reset\n"); + goto out_err; + } + + /* signal reset thread to reschedule */ + return rc; + } + if (rc) { dev_crit(hdev->dev, "Failed to kill all open processes, stopping hard reset\n"); @@ -1408,11 +1457,14 @@ out_disabled: */ void hl_device_fini(struct hl_device *hdev) { - int i, rc; ktime_t timeout; + int i, rc; dev_info(hdev->dev, "Removing device\n"); + hdev->device_fini_pending = 1; + flush_delayed_work(&hdev->device_reset_work.reset_work); + /* * This function is competing with the reset function, so try to * take the reset atomic and if we are already in middle of reset, @@ -1468,7 +1520,11 @@ void hl_device_fini(struct hl_device *hdev) * can't really exit until all its CSs are done, which is what we * do in cs rollback */ - rc = device_kill_open_processes(hdev); + dev_info(hdev->dev, + "Waiting for all processes to exit (timeout of %u seconds)", + HL_PENDING_RESET_LONG_SEC); + + rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC); if (rc) dev_crit(hdev->dev, "Failed to kill all open processes\n"); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index eb968c30adb9..87060cd2c525 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -40,7 +40,9 @@ #define HL_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFull >> PAGE_SHIFT) #define HL_MMAP_OFFSET_VALUE_GET(off) (off & HL_MMAP_OFFSET_VALUE_MASK) -#define HL_PENDING_RESET_PER_SEC 30 +#define HL_PENDING_RESET_PER_SEC 10 +#define HL_PENDING_RESET_MAX_TRIALS 60 /* 10 minutes */ +#define HL_PENDING_RESET_LONG_SEC 60 #define HL_HARD_RESET_MAX_TIMEOUT 120 @@ -1588,11 +1590,13 @@ struct hwmon_chip_info; /** * struct hl_device_reset_work - reset workqueue task wrapper. + * @wq: work queue for device reset procedure. * @reset_work: reset work to be done. * @hdev: habanalabs device structure. */ struct hl_device_reset_work { - struct work_struct reset_work; + struct workqueue_struct *wq; + struct delayed_work reset_work; struct hl_device *hdev; }; @@ -1691,6 +1695,7 @@ struct hl_mmu_funcs { * @dev_ctrl: related kernel device structure for the control device * @work_freq: delayed work to lower device frequency if possible. * @work_heartbeat: delayed work for CPU-CP is-alive check. + * @device_reset_work: delayed work which performs hard reset * @asic_name: ASIC specific name. * @asic_type: ASIC specific type. * @completion_queue: array of hl_cq. @@ -1790,6 +1795,10 @@ struct hl_mmu_funcs { * @supports_cb_mapping: is mapping a CB to the device's MMU supported. * @needs_reset: true if reset_on_lockup is false and device should be reset * due to lockup. + * @process_kill_trial_cnt: number of trials reset thread tried killing + * user processes + * @device_fini_pending: true if device_fini was called and might be + * waiting for the reset thread to finish */ struct hl_device { struct pci_dev *pdev; @@ -1802,6 +1811,7 @@ struct hl_device { struct device *dev_ctrl; struct delayed_work work_freq; struct delayed_work work_heartbeat; + struct hl_device_reset_work device_reset_work; char asic_name[HL_STR_MAX]; char status[HL_DEV_STS_MAX][HL_STR_MAX]; enum hl_asic_type asic_type; @@ -1894,6 +1904,8 @@ struct hl_device { u8 supports_soft_reset; u8 supports_cb_mapping; u8 needs_reset; + u8 process_kill_trial_cnt; + u8 device_fini_pending; /* Parameters for bring-up */ u64 nic_ports_mask; -- cgit v1.2.3 From be91b91fa40f5d2b1c8b79dbc34c1130de16f9e7 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 22 Oct 2020 15:04:10 +0300 Subject: habanalabs: use host va range for internal pools Instead of using a dedicated va range for each internal pool, we introduce a new way for reserving a va block from an existing va range. This is a more generic way of reserving va blocks for future use. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs.h | 4 ++ drivers/misc/habanalabs/common/memory.c | 79 +++++++++++++++++++++++++++++ drivers/misc/habanalabs/gaudi/gaudi.c | 18 +++++-- drivers/misc/habanalabs/gaudi/gaudiP.h | 5 +- 4 files changed, 98 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 87060cd2c525..822f90087e04 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -2109,6 +2109,10 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx); int hl_vm_init(struct hl_device *hdev); void hl_vm_fini(struct hl_device *hdev); +u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, + enum hl_va_range_type type, u32 size); +int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, + u64 start_addr, u64 size); int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, struct hl_userptr *userptr); void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr); diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 02233899336f..01e93e83d3af 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -620,6 +620,85 @@ out: return res_valid_start; } +/* + * hl_reserve_va_block() - reserve a virtual block of a given size. + * @hdev: pointer to the habanalabs device structure. + * @ctx: current context + * @type: virtual addresses range type. + * @size: requested block size. + * + * This function does the following: + * - Iterate on the virtual block list to find a suitable virtual block for the + * given size. + * - Reserve the requested block and update the list. + * - Return the start address of the virtual block. + */ +u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, + enum hl_va_range_type type, u32 size) +{ + return get_va_block(hdev, ctx->va_range[type], size, 0, + ctx->va_range[type]->page_size); +} + +/** + * hl_get_va_range_type() - get va_range type for the given address and size. + * @address: The start address of the area we want to validate. + * @size: The size in bytes of the area we want to validate. + * @type: returned va_range type + * + * Return: true if the area is inside a valid range, false otherwise. + */ +static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size, + enum hl_va_range_type *type) +{ + int i; + + for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) { + if (hl_mem_area_inside_range(address, size, + ctx->va_range[i]->start_addr, + ctx->va_range[i]->end_addr)) { + *type = i; + return 0; + } + } + + return -EINVAL; +} + +/* + * hl_unreserve_va_block - wrapper for add_va_block for unreserving a va block + * + * @hdev: pointer to the habanalabs device structure + * @ctx: current context + * @start: start virtual address + * @end: end virtual address + * + * This function does the following: + * - Takes the list lock and calls add_va_block_locked + */ +int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, + u64 start_addr, u64 size) +{ + enum hl_va_range_type type; + int rc; + + rc = hl_get_va_range_type(ctx, start_addr, size, &type); + if (rc) { + dev_err(hdev->dev, + "cannot find va_range for va %#llx size %llu", + start_addr, size); + return rc; + } + + rc = add_va_block(hdev, ctx->va_range[type], start_addr, + start_addr + size - 1); + if (rc) + dev_warn(hdev->dev, + "add va block failed for vaddr: 0x%llx\n", start_addr); + + return rc; +} + /* * get_sg_info - get number of pages and the DMA address from SG list * diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index eb47aff92e01..e2b5160130d7 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -7757,7 +7757,11 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev, goto destroy_internal_cb_pool; } - hdev->internal_cb_va_base = VA_HOST_SPACE_INTERNAL_CB_START; + hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx, + HL_VA_RANGE_TYPE_HOST, HOST_SPACE_INTERNAL_CB_SZ); + + if (!hdev->internal_cb_va_base) + goto destroy_internal_cb_pool; mutex_lock(&ctx->mmu_lock); @@ -7765,7 +7769,7 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev, * is aligned to HOST_SPACE_INTERNAL_CB_SZ */ for (off = 0 ; off < HOST_SPACE_INTERNAL_CB_SZ ; off += PAGE_SIZE_4KB) { - va = VA_HOST_SPACE_INTERNAL_CB_START + off; + va = hdev->internal_cb_va_base + off; pa = hdev->internal_cb_pool_dma_addr + off; flush_pte = (off + PAGE_SIZE_4KB) >= HOST_SPACE_INTERNAL_CB_SZ; rc = hl_mmu_map(ctx, va, pa, PAGE_SIZE_4KB, flush_pte); @@ -7785,13 +7789,16 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev, unmap: for (; off >= 0 ; off -= PAGE_SIZE_4KB) { - va = VA_HOST_SPACE_INTERNAL_CB_START + off; + va = hdev->internal_cb_va_base + off; flush_pte = (off - (s32) PAGE_SIZE_4KB) < 0; if (hl_mmu_unmap(ctx, va, PAGE_SIZE_4KB, flush_pte)) dev_warn_ratelimited(hdev->dev, "failed to unmap va 0x%llx\n", va); } + hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, + HOST_SPACE_INTERNAL_CB_SZ); + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); mutex_unlock(&ctx->mmu_lock); @@ -7821,7 +7828,7 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev, mutex_lock(&ctx->mmu_lock); for (off = 0 ; off < HOST_SPACE_INTERNAL_CB_SZ ; off += PAGE_SIZE_4KB) { - va = VA_HOST_SPACE_INTERNAL_CB_START + off; + va = hdev->internal_cb_va_base + off; if (off + PAGE_SIZE_4KB >= HOST_SPACE_INTERNAL_CB_SZ) flush_pte = true; @@ -7831,6 +7838,9 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev, "failed to unmap va 0x%llx\n", va); } + hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, + HOST_SPACE_INTERNAL_CB_SZ); + hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); mutex_unlock(&ctx->mmu_lock); diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index e882cf7f8cf7..1a5e681c720d 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -153,12 +153,9 @@ /* Virtual address space */ #define VA_HOST_SPACE_START 0x1000000000000ull /* 256TB */ -#define VA_HOST_SPACE_END 0x3FF7FFFE00000ull /* 1PB - 1TB */ +#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 512GB */ #define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \ VA_HOST_SPACE_START) /* 767TB */ - -#define VA_HOST_SPACE_INTERNAL_CB_START 0x3FF7FFFE00000ull /* 1PB - 1TB - 2MB */ -#define VA_HOST_SPACE_INTERNAL_CB_END 0x3FF8000000000ull /* 1PB - 1TB */ #define HOST_SPACE_INTERNAL_CB_SZ SZ_2M #define HW_CAP_PLL BIT(0) -- cgit v1.2.3 From 7f070c913c361ed79dd29d9256b486b1b105d7f0 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Mon, 9 Nov 2020 09:48:31 +0200 Subject: habanalabs: move asic property to correct structure Whether an ASIC has MMU towards its DRAM is an ASIC property, so move it to the asic fixed properties structure. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/debugfs.c | 2 +- drivers/misc/habanalabs/common/habanalabs.h | 4 ++-- drivers/misc/habanalabs/common/memory.c | 6 +++--- drivers/misc/habanalabs/common/mmu_v1.c | 4 ++-- drivers/misc/habanalabs/gaudi/gaudi.c | 3 +-- drivers/misc/habanalabs/goya/goya.c | 2 +- 6 files changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 104b9686e57b..3d9bd86312e2 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -598,7 +598,7 @@ static bool hl_is_device_va(struct hl_device *hdev, u64 addr) if (!hdev->mmu_enable) goto out; - if (hdev->dram_supports_virtual_memory && + if (prop->dram_supports_virtual_memory && (addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr)) return true; diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 822f90087e04..a1d82de60ef6 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -411,6 +411,7 @@ struct hl_mmu_properties { * false otherwise * @fw_security_status_valid: security status bits are valid and can be fetched * from BOOT_DEV_STS0 + * @dram_supports_virtual_memory: is there an MMU towards the DRAM */ struct asic_fixed_properties { struct hw_queue_properties *hw_queues_props; @@ -467,6 +468,7 @@ struct asic_fixed_properties { u8 completion_queues_count; u8 fw_security_disabled; u8 fw_security_status_valid; + u8 dram_supports_virtual_memory; }; /** @@ -1772,7 +1774,6 @@ struct hl_mmu_funcs { * @heartbeat: is heartbeat sanity check towards CPU-CP enabled. * @reset_on_lockup: true if a reset should be done in case of stuck CS, false * otherwise. - * @dram_supports_virtual_memory: is MMU enabled towards DRAM. * @dram_default_page_mapping: is DRAM default page mapping enabled. * @memory_scrub: true to perform device memory scrub in various locations, * such as context-switch, context close, page free, etc. @@ -1886,7 +1887,6 @@ struct hl_device { u8 hard_reset_pending; u8 heartbeat; u8 reset_on_lockup; - u8 dram_supports_virtual_memory; u8 dram_default_page_mapping; u8 memory_scrub; u8 pmmu_huge_range; diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 01e93e83d3af..f27ca80d3c3c 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -1351,7 +1351,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) * the user's input, the driver can't ensure the validity * of this accounting. */ - if (!hdev->dram_supports_virtual_memory) { + if (!hdev->asic_prop.dram_supports_virtual_memory) { atomic64_add(args->in.alloc.mem_size, &ctx->dram_phys_mem); atomic64_add(args->in.alloc.mem_size, @@ -1380,7 +1380,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) * the user's input, the driver can't ensure the validity * of this accounting. */ - if (!hdev->dram_supports_virtual_memory) { + if (!hdev->asic_prop.dram_supports_virtual_memory) { atomic64_sub(args->in.alloc.mem_size, &ctx->dram_phys_mem); atomic64_sub(args->in.alloc.mem_size, @@ -1915,7 +1915,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) * because the user notifies us on allocations. If the user is no more, * all DRAM is available */ - if (!ctx->hdev->dram_supports_virtual_memory) + if (!ctx->hdev->asic_prop.dram_supports_virtual_memory) atomic64_set(&ctx->hdev->dram_used_mem, 0); } diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c index ac3784523baa..5f62cb158eef 100644 --- a/drivers/misc/habanalabs/common/mmu_v1.c +++ b/drivers/misc/habanalabs/common/mmu_v1.c @@ -262,7 +262,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx) hop2_pte_addr, hop3_pte_addr, pte_val; int rc, i, j, hop3_allocated = 0; - if ((!hdev->dram_supports_virtual_memory) || + if ((!prop->dram_supports_virtual_memory) || (!hdev->dram_default_page_mapping) || (ctx->asid == HL_KERNEL_ASID_ID)) return 0; @@ -362,7 +362,7 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx) hop2_pte_addr, hop3_pte_addr; int i, j; - if ((!hdev->dram_supports_virtual_memory) || + if ((!prop->dram_supports_virtual_memory) || (!hdev->dram_default_page_mapping) || (ctx->asid == HL_KERNEL_ASID_ID)) return; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index e2b5160130d7..fe2e2b3d2a04 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -462,6 +462,7 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) prop->mmu_hop_table_size = HOP_TABLE_SIZE; prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE; prop->dram_page_size = PAGE_SIZE_2MB; + prop->dram_supports_virtual_memory = false; prop->pmmu.hop0_shift = HOP0_SHIFT; prop->pmmu.hop1_shift = HOP1_SHIFT; @@ -3562,8 +3563,6 @@ static int gaudi_mmu_init(struct hl_device *hdev) if (gaudi->hw_cap_initialized & HW_CAP_MMU) return 0; - hdev->dram_supports_virtual_memory = false; - for (i = 0 ; i < prop->max_asid ; i++) { hop0_addr = prop->mmu_pgt_addr + (i * prop->mmu_hop_table_size); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 002fc53fb39d..bf21f05f7849 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -410,6 +410,7 @@ int goya_get_fixed_properties(struct hl_device *hdev) prop->mmu_hop_table_size = HOP_TABLE_SIZE; prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE; prop->dram_page_size = PAGE_SIZE_2MB; + prop->dram_supports_virtual_memory = true; prop->dmmu.hop0_shift = HOP0_SHIFT; prop->dmmu.hop1_shift = HOP1_SHIFT; @@ -2481,7 +2482,6 @@ int goya_mmu_init(struct hl_device *hdev) if (goya->hw_cap_initialized & HW_CAP_MMU) return 0; - hdev->dram_supports_virtual_memory = true; hdev->dram_default_page_mapping = true; for (i = 0 ; i < prop->max_asid ; i++) { -- cgit v1.2.3 From d611b9f0b199ebfaca78ec4205a889831fdd45cd Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Sun, 8 Nov 2020 13:10:09 +0200 Subject: habanalabs: fetch hard reset capability from FW Driver must fetch FW hard reset capability during boot time, in order to skip the hard reset flow if necessary. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 10 +++++++- drivers/misc/habanalabs/common/habanalabs.h | 2 ++ drivers/misc/habanalabs/gaudi/gaudi.c | 1 + drivers/misc/habanalabs/goya/goya.c | 1 + .../misc/habanalabs/include/common/hl_boot_if.h | 30 ++++++++++++++-------- 5 files changed, 32 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index 2fc12e529241..b5464cd34071 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -784,10 +784,18 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, } /* Read FW application security bits */ - if (hdev->asic_prop.fw_security_status_valid) + if (hdev->asic_prop.fw_security_status_valid) { hdev->asic_prop.fw_app_security_map = RREG32(cpu_security_boot_status_reg); + if (hdev->asic_prop.fw_app_security_map & + CPU_BOOT_DEV_STS0_FW_HARD_RST_EN) + hdev->asic_prop.hard_reset_done_by_fw = true; + } + + dev_info(hdev->dev, "Firmware hard-reset is %s\n", + hdev->asic_prop.hard_reset_done_by_fw ? "enabled" : "disabled"); + dev_info(hdev->dev, "Successfully loaded firmware to device\n"); out: diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index a1d82de60ef6..eeb78381177b 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -412,6 +412,7 @@ struct hl_mmu_properties { * @fw_security_status_valid: security status bits are valid and can be fetched * from BOOT_DEV_STS0 * @dram_supports_virtual_memory: is there an MMU towards the DRAM + * @hard_reset_done_by_fw: true if firmware is handling hard reset flow */ struct asic_fixed_properties { struct hw_queue_properties *hw_queues_props; @@ -469,6 +470,7 @@ struct asic_fixed_properties { u8 fw_security_disabled; u8 fw_security_status_valid; u8 dram_supports_virtual_memory; + u8 hard_reset_done_by_fw; }; /** diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index fe2e2b3d2a04..856cb1be9506 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -518,6 +518,7 @@ static int gaudi_get_fixed_properties(struct hl_device *hdev) /* disable fw security for now, set it in a later stage */ prop->fw_security_disabled = true; prop->fw_security_status_valid = false; + prop->hard_reset_done_by_fw = false; return 0; } diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index bf21f05f7849..3398b4cc1298 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -458,6 +458,7 @@ int goya_get_fixed_properties(struct hl_device *hdev) /* disable fw security for now, set it in a later stage */ prop->fw_security_disabled = true; prop->fw_security_status_valid = false; + prop->hard_reset_done_by_fw = false; return 0; } diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h index d928ad93cd80..60916780df35 100644 --- a/drivers/misc/habanalabs/include/common/hl_boot_if.h +++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h @@ -84,45 +84,52 @@ * device is indicated as security enabled, * registers are protected, and device * uses keys for image verification. - * Initialized at: preboot + * Initialized in: preboot * * CPU_BOOT_DEV_STS0_DEBUG_EN Debug is enabled. * Enabled when JTAG or DEBUG is enabled * in FW. - * Initialized at: preboot + * Initialized in: preboot * * CPU_BOOT_DEV_STS0_WATCHDOG_EN Watchdog is enabled. * Watchdog is enabled in FW. - * Initialized at: preboot + * Initialized in: preboot * * CPU_BOOT_DEV_STS0_DRAM_INIT_EN DRAM initialization is enabled. * DRAM initialization has been done in FW. - * Initialized at: u-boot + * Initialized in: u-boot * * CPU_BOOT_DEV_STS0_BMC_WAIT_EN Waiting for BMC data enabled. * If set, it means that during boot, * FW waited for BMC data. - * Initialized at: u-boot + * Initialized in: u-boot * * CPU_BOOT_DEV_STS0_E2E_CRED_EN E2E credits initialized. * FW initialized E2E credits. - * Initialized at: u-boot + * Initialized in: u-boot * * CPU_BOOT_DEV_STS0_HBM_CRED_EN HBM credits initialized. * FW initialized HBM credits. - * Initialized at: u-boot + * Initialized in: u-boot * * CPU_BOOT_DEV_STS0_RL_EN Rate limiter initialized. * FW initialized rate limiter. - * Initialized at: u-boot + * Initialized in: u-boot * * CPU_BOOT_DEV_STS0_SRAM_SCR_EN SRAM scrambler enabled. * FW initialized SRAM scrambler. - * Initialized at: linux + * Initialized in: linux * * CPU_BOOT_DEV_STS0_DRAM_SCR_EN DRAM scrambler enabled. * FW initialized DRAM scrambler. - * Initialized at: u-boot + * Initialized in: u-boot + * + * CPU_BOOT_DEV_STS0_FW_HARD_RST_EN FW hard reset procedure is enabled. + * FW has the hard reset procedure + * implemented. This means that FW will + * perform hard reset procedure on + * receiving the halt-machine event. + * Initialized in: linux * * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled. * This is a main indication that the @@ -130,7 +137,7 @@ * register. Meaning the device status * bits are not garbage, but actual * statuses. - * Initialized at: preboot + * Initialized in: preboot */ #define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << 0) #define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << 1) @@ -142,6 +149,7 @@ #define CPU_BOOT_DEV_STS0_RL_EN (1 << 7) #define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << 8) #define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9) +#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10) #define CPU_BOOT_DEV_STS0_ENABLED (1 << 31) enum cpu_boot_status { -- cgit v1.2.3 From 5a2998f46c8555815b1bf367daf0bf42b0440300 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Mon, 5 Oct 2020 13:44:59 +0300 Subject: habanalabs/gaudi: fetch HBM ecc info from FW Once FW security is enabled there is no access to HBM ecc registers, need to read values from FW using a dedicated interface. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 47 +++++++++++++++++++---- drivers/misc/habanalabs/include/common/cpucp_if.h | 32 +++++++++++++++ 2 files changed, 72 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 856cb1be9506..f93809f7e3a0 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -6839,10 +6839,41 @@ static int gaudi_soft_reset_late_init(struct hl_device *hdev) return hl_fw_unmask_irq_arr(hdev, gaudi->events, sizeof(gaudi->events)); } -static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device) +static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device, + struct hl_eq_hbm_ecc_data *hbm_ecc_data) { - int ch, err = 0; - u32 base, val, val2; + u32 base, val, val2, wr_par, rd_par, ca_par, derr, serr, type, ch; + int err = 0; + + if (!hdev->asic_prop.fw_security_disabled) { + if (!hbm_ecc_data) { + dev_err(hdev->dev, "No FW ECC data"); + return 0; + } + + wr_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + rd_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + ca_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + derr = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_DERR_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + serr = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_SERR_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + type = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + ch = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK, + le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); + + dev_err(hdev->dev, + "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n", + device, ch, wr_par, rd_par, ca_par, serr, derr); + + err = 1; + + return 0; + } base = GAUDI_HBM_CFG_BASE + device * GAUDI_HBM_CFG_OFFSET; for (ch = 0 ; ch < GAUDI_HBM_CHANNELS ; ch++) { @@ -6858,7 +6889,7 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device) val2 = RREG32(base + ch * 0x1000 + 0x060); dev_err(hdev->dev, - "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n", + "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DEC_CNT=%d\n", device, ch * 2, RREG32(base + ch * 0x1000 + 0x064), (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10, @@ -6878,7 +6909,7 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device) val2 = RREG32(base + ch * 0x1000 + 0x070); dev_err(hdev->dev, - "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DED_CNT=%d\n", + "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DEC_CNT=%d\n", device, ch * 2 + 1, RREG32(base + ch * 0x1000 + 0x074), (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10, @@ -7079,7 +7110,8 @@ static void gaudi_handle_eqe(struct hl_device *hdev, case GAUDI_EVENT_HBM3_SPI_0: gaudi_print_irq_info(hdev, event_type, false); gaudi_hbm_read_interrupts(hdev, - gaudi_hbm_event_to_dev(event_type)); + gaudi_hbm_event_to_dev(event_type), + &eq_entry->hbm_ecc_data); if (hdev->hard_reset_on_fw_events) hl_device_reset(hdev, true, false); break; @@ -7090,7 +7122,8 @@ static void gaudi_handle_eqe(struct hl_device *hdev, case GAUDI_EVENT_HBM3_SPI_1: gaudi_print_irq_info(hdev, event_type, false); gaudi_hbm_read_interrupts(hdev, - gaudi_hbm_event_to_dev(event_type)); + gaudi_hbm_event_to_dev(event_type), + &eq_entry->hbm_ecc_data); break; case GAUDI_EVENT_TPC0_DEC: diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h index 1c1e2b394457..759c068b2b7a 100644 --- a/drivers/misc/habanalabs/include/common/cpucp_if.h +++ b/drivers/misc/habanalabs/include/common/cpucp_if.h @@ -11,6 +11,37 @@ #include #include +#define NUM_HBM_PSEUDO_CH 2 +#define NUM_HBM_CH_PER_DEV 8 +#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_SHIFT 0 +#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK 0x00000001 +#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_SHIFT 1 +#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK 0x00000002 +#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_SHIFT 2 +#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK 0x00000004 +#define CPUCP_PKT_HBM_ECC_INFO_DERR_SHIFT 3 +#define CPUCP_PKT_HBM_ECC_INFO_DERR_MASK 0x00000008 +#define CPUCP_PKT_HBM_ECC_INFO_SERR_SHIFT 4 +#define CPUCP_PKT_HBM_ECC_INFO_SERR_MASK 0x00000010 +#define CPUCP_PKT_HBM_ECC_INFO_TYPE_SHIFT 5 +#define CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK 0x00000020 +#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_SHIFT 6 +#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK 0x000007C0 + +struct hl_eq_hbm_ecc_data { + /* SERR counter */ + __le32 sec_cnt; + /* DERR counter */ + __le32 dec_cnt; + /* Supplemental Information according to the mask bits */ + __le32 hbm_ecc_info; + /* Address in hbm where the ecc happened */ + __le32 first_addr; + /* SERR continuous address counter */ + __le32 sec_cont_cnt; + __le32 pad; +}; + /* * EVENT QUEUE */ @@ -31,6 +62,7 @@ struct hl_eq_entry { struct hl_eq_header hdr; union { struct hl_eq_ecc_data ecc_data; + struct hl_eq_hbm_ecc_data hbm_ecc_data; __le64 data[7]; }; }; -- cgit v1.2.3 From 92ede12a07d613540c966866039e7359f373d9ac Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Tue, 10 Nov 2020 22:03:43 +0200 Subject: habanalabs: print message with correct device During hard-reset, the driver rejects further IOCTL calls and prints an error message. That error message should be printed with the correct device instead of using only the control device. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs_ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 0729cd43f297..ba8217fc9425 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -573,7 +573,7 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg, int retcode; if (hdev->hard_reset_pending) { - dev_crit_ratelimited(hdev->dev_ctrl, + dev_crit_ratelimited(dev, "Device HARD reset pending! Please close FD\n"); return -ENODEV; } -- cgit v1.2.3 From b2d09622bec3f59bc51417ef672c105f3c5fc4d7 Mon Sep 17 00:00:00 2001 From: Guy Nisan Date: Thu, 12 Nov 2020 17:52:52 +0200 Subject: habanalabs: add boot errors prints Add log prints for security and eFuse boot error bits Signed-off-by: Guy Nisan Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index b5464cd34071..e37d451e6415 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -510,6 +510,13 @@ static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg, if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) dev_err(hdev->dev, "Device boot error - NIC F/W initialization failed\n"); + if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) + dev_warn(hdev->dev, + "Device boot warning - security not ready\n"); + if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) + dev_err(hdev->dev, "Device boot error - security failure\n"); + if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) + dev_err(hdev->dev, "Device boot error - eFuse failure\n"); security_val = RREG32(cpu_security_boot_status_reg); if (security_val & CPU_BOOT_DEV_STS0_ENABLED) -- cgit v1.2.3 From 412c41fcd526b239472a24974591a05227bf5225 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Wed, 4 Nov 2020 15:18:55 +0200 Subject: habanalabs: support reserving aligned va block Add support for reserving va block with alignment different than page size. This is a pre-requisite for allocations needed in future ASICs Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/habanalabs.h | 4 +++- drivers/misc/habanalabs/common/memory.c | 8 +++++--- drivers/misc/habanalabs/gaudi/gaudi.c | 3 ++- 3 files changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index eeb78381177b..76ab9741895d 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -303,6 +303,8 @@ enum hl_device_hw_state { HL_DEVICE_HW_STATE_DIRTY }; +#define HL_MMU_VA_ALIGNMENT_NOT_NEEDED 0 + /** * struct hl_mmu_properties - ASIC specific MMU address translation properties. * @start_addr: virtual start address of the memory region. @@ -2112,7 +2114,7 @@ int hl_vm_init(struct hl_device *hdev); void hl_vm_fini(struct hl_device *hdev); u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, - enum hl_va_range_type type, u32 size); + enum hl_va_range_type type, u32 size, u32 alignment); int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, u64 start_addr, u64 size); int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index f27ca80d3c3c..351c9927151f 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -626,18 +626,20 @@ out: * @ctx: current context * @type: virtual addresses range type. * @size: requested block size. + * @alignment: required alignment in bytes of the virtual block start address, + * 0 means no alignment. * * This function does the following: * - Iterate on the virtual block list to find a suitable virtual block for the - * given size. + * given size and alignment. * - Reserve the requested block and update the list. * - Return the start address of the virtual block. */ u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx, - enum hl_va_range_type type, u32 size) + enum hl_va_range_type type, u32 size, u32 alignment) { return get_va_block(hdev, ctx->va_range[type], size, 0, - ctx->va_range[type]->page_size); + max(alignment, ctx->va_range[type]->page_size)); } /** diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index f93809f7e3a0..4c884082be04 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -7791,7 +7791,8 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev, } hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx, - HL_VA_RANGE_TYPE_HOST, HOST_SPACE_INTERNAL_CB_SZ); + HL_VA_RANGE_TYPE_HOST, HOST_SPACE_INTERNAL_CB_SZ, + HL_MMU_VA_ALIGNMENT_NOT_NEEDED); if (!hdev->internal_cb_va_base) goto destroy_internal_cb_pool; -- cgit v1.2.3 From fe2bc2d249539c117cc887dc1f960ffcbba52d1e Mon Sep 17 00:00:00 2001 From: Moti Haimovski Date: Tue, 27 Oct 2020 11:03:32 +0200 Subject: habanalabs: share a single ctx-mutex between all MMUs Multiple locks are usually a source of problems, which in the MMU case can be avoided since it is relatively rare that both MMU tables are updated at the same time. Therefore, use a single shared lock instead of two separate ones. Signed-off-by: Moti Haimovski Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/mmu.c | 4 ++++ drivers/misc/habanalabs/common/mmu_v1.c | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c index 6f535c81478d..f4b3d02fe0d8 100644 --- a/drivers/misc/habanalabs/common/mmu.c +++ b/drivers/misc/habanalabs/common/mmu.c @@ -81,6 +81,8 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx) if (!hdev->mmu_enable) return 0; + mutex_init(&ctx->mmu_lock); + if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) { rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx); if (rc) @@ -115,6 +117,8 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx) if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL) hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx); + + mutex_destroy(&ctx->mmu_lock); } /* diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c index 5f62cb158eef..92b22298bb5c 100644 --- a/drivers/misc/habanalabs/common/mmu_v1.c +++ b/drivers/misc/habanalabs/common/mmu_v1.c @@ -481,9 +481,7 @@ static void hl_mmu_v1_fini(struct hl_device *hdev) */ static int hl_mmu_v1_ctx_init(struct hl_ctx *ctx) { - mutex_init(&ctx->mmu_lock); hash_init(ctx->mmu_shadow_hash); - return dram_default_mapping_init(ctx); } @@ -516,8 +514,6 @@ static void hl_mmu_v1_ctx_fini(struct hl_ctx *ctx) pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes); _free_hop(ctx, pgt_info); } - - mutex_destroy(&ctx->mmu_lock); } static int _hl_mmu_v1_unmap(struct hl_ctx *ctx, -- cgit v1.2.3 From 00e1b59c8b155d524cf416155607c3e52c54c32a Mon Sep 17 00:00:00 2001 From: Moti Haimovski Date: Tue, 27 Oct 2020 10:55:42 +0200 Subject: habanalabs: fix MMU debugfs operations After the MMU-code refactoring, the existing MMU debugfs operations are no longer working so we need to fix them. In addition, remove the duplicate code that was in the debugfs code and use the already existing MMU-code. Signed-off-by: Moti Haimovski Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/debugfs.c | 282 ++-------------------------- drivers/misc/habanalabs/common/habanalabs.h | 35 ++++ drivers/misc/habanalabs/common/mmu.c | 50 +++++ drivers/misc/habanalabs/common/mmu_v1.c | 102 ++++++++++ 4 files changed, 206 insertions(+), 263 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 3d9bd86312e2..1db804de45ba 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -301,93 +301,15 @@ static int vm_show(struct seq_file *s, void *data) return 0; } -/* these inline functions are copied from mmu.c */ -static inline u64 get_hop0_addr(struct hl_ctx *ctx) -{ - return ctx->hdev->asic_prop.mmu_pgt_addr + - (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); -} - -static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr, - u64 virt_addr, u64 mask, u64 shift) -{ - return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * - ((virt_addr & mask) >> shift); -} - -static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_specs, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop0_mask, - mmu_specs->hop0_shift); -} - -static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_specs, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop1_mask, - mmu_specs->hop1_shift); -} - -static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_specs, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop2_mask, - mmu_specs->hop2_shift); -} - -static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_specs, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop3_mask, - mmu_specs->hop3_shift); -} - -static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_specs, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop4_mask, - mmu_specs->hop4_shift); -} - -static inline u64 get_hop5_pte_addr(struct hl_ctx *ctx, - struct hl_mmu_properties *mmu_specs, - u64 hop_addr, u64 vaddr) -{ - return get_hopN_pte_addr(ctx, hop_addr, vaddr, mmu_specs->hop5_mask, - mmu_specs->hop5_shift); -} - -static inline u64 get_next_hop_addr(u64 curr_pte) -{ - if (curr_pte & PAGE_PRESENT_MASK) - return curr_pte & HOP_PHYS_ADDR_MASK; - else - return ULLONG_MAX; -} - static int mmu_show(struct seq_file *s, void *data) { struct hl_debugfs_entry *entry = s->private; struct hl_dbg_device_entry *dev_entry = entry->dev_entry; struct hl_device *hdev = dev_entry->hdev; - struct asic_fixed_properties *prop = &hdev->asic_prop; - struct hl_mmu_properties *mmu_prop; struct hl_ctx *ctx; - bool is_dram_addr; - - u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0, - hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0, - hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0, - hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0, - hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0, - hop5_addr = 0, hop5_pte_addr = 0, hop5_pte = 0, - virt_addr = dev_entry->mmu_addr; + struct hl_mmu_hop_info hops_info; + u64 virt_addr = dev_entry->mmu_addr; + int i; if (!hdev->mmu_enable) return 0; @@ -402,132 +324,24 @@ static int mmu_show(struct seq_file *s, void *data) return 0; } - is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, - prop->dmmu.start_addr, - prop->dmmu.end_addr); - - /* shifts and masks are the same in PMMU and HPMMU, use one of them */ - mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; - - mutex_lock(&ctx->mmu_lock); - - /* the following lookup is copied from unmap() in mmu.c */ - - hop0_addr = get_hop0_addr(ctx); - hop0_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop0_addr, virt_addr); - hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr); - hop1_addr = get_next_hop_addr(hop0_pte); - - if (hop1_addr == ULLONG_MAX) - goto not_mapped; - - hop1_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop1_addr, virt_addr); - hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr); - hop2_addr = get_next_hop_addr(hop1_pte); - - if (hop2_addr == ULLONG_MAX) - goto not_mapped; - - hop2_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop2_addr, virt_addr); - hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr); - hop3_addr = get_next_hop_addr(hop2_pte); - - if (hop3_addr == ULLONG_MAX) - goto not_mapped; - - hop3_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop3_addr, virt_addr); - hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr); - - if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) { - if (!(hop3_pte & LAST_MASK)) { - hop4_addr = get_next_hop_addr(hop3_pte); - - if (hop4_addr == ULLONG_MAX) - goto not_mapped; - - hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, - hop4_addr, virt_addr); - hop4_pte = hdev->asic_funcs->read_pte(hdev, - hop4_pte_addr); - if (!(hop4_pte & PAGE_PRESENT_MASK)) - goto not_mapped; - } else { - if (!(hop3_pte & PAGE_PRESENT_MASK)) - goto not_mapped; - } - } else { - hop4_addr = get_next_hop_addr(hop3_pte); - - if (hop4_addr == ULLONG_MAX) - goto not_mapped; - - hop4_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, - hop4_addr, virt_addr); - hop4_pte = hdev->asic_funcs->read_pte(hdev, - hop4_pte_addr); - if (!(hop4_pte & LAST_MASK)) { - hop5_addr = get_next_hop_addr(hop4_pte); - - if (hop5_addr == ULLONG_MAX) - goto not_mapped; - - hop5_pte_addr = get_hop5_pte_addr(ctx, mmu_prop, - hop5_addr, virt_addr); - hop5_pte = hdev->asic_funcs->read_pte(hdev, - hop5_pte_addr); - if (!(hop5_pte & PAGE_PRESENT_MASK)) - goto not_mapped; - } else { - if (!(hop4_pte & PAGE_PRESENT_MASK)) - goto not_mapped; - } + if (hl_mmu_get_tlb_info(ctx, virt_addr, &hops_info)) { + dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", + virt_addr); + return 0; } seq_printf(s, "asid: %u, virt_addr: 0x%llx\n", dev_entry->mmu_asid, dev_entry->mmu_addr); - seq_printf(s, "hop0_addr: 0x%llx\n", hop0_addr); - seq_printf(s, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr); - seq_printf(s, "hop0_pte: 0x%llx\n", hop0_pte); - - seq_printf(s, "hop1_addr: 0x%llx\n", hop1_addr); - seq_printf(s, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr); - seq_printf(s, "hop1_pte: 0x%llx\n", hop1_pte); - - seq_printf(s, "hop2_addr: 0x%llx\n", hop2_addr); - seq_printf(s, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr); - seq_printf(s, "hop2_pte: 0x%llx\n", hop2_pte); - - seq_printf(s, "hop3_addr: 0x%llx\n", hop3_addr); - seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr); - seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte); - - if (mmu_prop->num_hops == MMU_ARCH_5_HOPS) { - if (!(hop3_pte & LAST_MASK)) { - seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr); - seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr); - seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte); - } - } else { - seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr); - seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr); - seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte); - - if (!(hop4_pte & LAST_MASK)) { - seq_printf(s, "hop5_addr: 0x%llx\n", hop5_addr); - seq_printf(s, "hop5_pte_addr: 0x%llx\n", hop5_pte_addr); - seq_printf(s, "hop5_pte: 0x%llx\n", hop5_pte); - } + for (i = 0 ; i < hops_info.used_hops ; i++) { + seq_printf(s, "hop%d_addr: 0x%llx\n", + i, hops_info.hop_info[i].hop_addr); + seq_printf(s, "hop%d_pte_addr: 0x%llx\n", + i, hops_info.hop_info[i].hop_pte_addr); + seq_printf(s, "hop%d_pte: 0x%llx\n", + i, hops_info.hop_info[i].hop_pte_val); } - goto out; - -not_mapped: - dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", - virt_addr); -out: - mutex_unlock(&ctx->mmu_lock); - return 0; } @@ -617,78 +431,20 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u64 *phys_addr) { struct hl_ctx *ctx = hdev->compute_ctx; - struct asic_fixed_properties *prop = &hdev->asic_prop; - struct hl_mmu_properties *mmu_prop; - u64 hop_addr, hop_pte_addr, hop_pte; - u64 offset_mask = HOP4_MASK | FLAGS_MASK; int rc = 0; - bool is_dram_addr; if (!ctx) { dev_err(hdev->dev, "no ctx available\n"); return -EINVAL; } - is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, - prop->dmmu.start_addr, - prop->dmmu.end_addr); - - /* shifts and masks are the same in PMMU and HPMMU, use one of them */ - mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; - - mutex_lock(&ctx->mmu_lock); - - /* hop 0 */ - hop_addr = get_hop0_addr(ctx); - hop_pte_addr = get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); - - /* hop 1 */ - hop_addr = get_next_hop_addr(hop_pte); - if (hop_addr == ULLONG_MAX) - goto not_mapped; - hop_pte_addr = get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); - - /* hop 2 */ - hop_addr = get_next_hop_addr(hop_pte); - if (hop_addr == ULLONG_MAX) - goto not_mapped; - hop_pte_addr = get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); - - /* hop 3 */ - hop_addr = get_next_hop_addr(hop_pte); - if (hop_addr == ULLONG_MAX) - goto not_mapped; - hop_pte_addr = get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); - hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); - - if (!(hop_pte & LAST_MASK)) { - /* hop 4 */ - hop_addr = get_next_hop_addr(hop_pte); - if (hop_addr == ULLONG_MAX) - goto not_mapped; - hop_pte_addr = get_hop4_pte_addr(ctx, mmu_prop, hop_addr, - virt_addr); - hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr); - - offset_mask = FLAGS_MASK; + rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr); + if (rc) { + dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", + virt_addr); + rc = -EINVAL; } - if (!(hop_pte & PAGE_PRESENT_MASK)) - goto not_mapped; - - *phys_addr = (hop_pte & ~offset_mask) | (virt_addr & offset_mask); - - goto out; - -not_mapped: - dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", - virt_addr); - rc = -EINVAL; -out: - mutex_unlock(&ctx->mmu_lock); return rc; } diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 76ab9741895d..b8ad5bfa6359 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -10,6 +10,7 @@ #include "../include/common/cpucp_if.h" #include "../include/common/qman_if.h" +#include "../include/hw_ip/mmu/mmu_general.h" #include #include @@ -1661,6 +1662,32 @@ struct hl_mmu_priv { struct hl_mmu_hr_priv hr; }; +/** + * struct hl_mmu_per_hop_info - A structure describing one TLB HOP and its entry + * that was created in order to translate a virtual address to a + * physical one. + * @hop_addr: The address of the hop. + * @hop_pte_addr: The address of the hop entry. + * @hop_pte_val: The value in the hop entry. + */ +struct hl_mmu_per_hop_info { + u64 hop_addr; + u64 hop_pte_addr; + u64 hop_pte_val; +}; + +/** + * struct hl_mmu_hop_info - A structure describing the TLB hops and their + * hop-entries that were created in order to translate a virtual address to a + * physical one. + * @hop_info: Array holding the per-hop information used for the translation. + * @used_hops: The number of hops used for the translation. + */ +struct hl_mmu_hop_info { + struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS]; + u32 used_hops; +}; + /** * struct hl_mmu_funcs - Device related MMU functions. * @init: initialize the MMU module. @@ -1672,6 +1699,9 @@ struct hl_mmu_priv { * @flush: flush all writes from all cores to reach device MMU. * @swap_out: marks all mapping of the given context as swapped out. * @swap_in: marks all mapping of the given context as swapped in. + * @get_tlb_info: returns the list of hops and hop-entries used that were + * created in order to translate the giver virtual address to a + * physical one. */ struct hl_mmu_funcs { int (*init)(struct hl_device *hdev); @@ -1686,6 +1716,8 @@ struct hl_mmu_funcs { void (*flush)(struct hl_ctx *ctx); void (*swap_out)(struct hl_ctx *ctx); void (*swap_in)(struct hl_ctx *ctx); + int (*get_tlb_info)(struct hl_ctx *ctx, + u64 virt_addr, struct hl_mmu_hop_info *hops); }; /** @@ -2138,6 +2170,9 @@ void hl_mmu_swap_out(struct hl_ctx *ctx); void hl_mmu_swap_in(struct hl_ctx *ctx); int hl_mmu_if_set_funcs(struct hl_device *hdev); void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu); +int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr); +int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, + struct hl_mmu_hop_info *hops); int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, void __iomem *dst, u32 src_offset, u32 size); diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c index f4b3d02fe0d8..7279c83cc081 100644 --- a/drivers/misc/habanalabs/common/mmu.c +++ b/drivers/misc/habanalabs/common/mmu.c @@ -342,6 +342,56 @@ void hl_mmu_swap_in(struct hl_ctx *ctx) hdev->mmu_func[MMU_HR_PGT].swap_in(ctx); } +int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr) +{ + struct hl_mmu_hop_info hops; + u64 tmp_addr; + int rc; + + rc = hl_mmu_get_tlb_info(ctx, virt_addr, &hops); + if (rc) + return rc; + + /* last hop holds the phys address and flags */ + tmp_addr = hops.hop_info[hops.used_hops - 1].hop_pte_val; + *phys_addr = (tmp_addr & HOP_PHYS_ADDR_MASK) | (virt_addr & FLAGS_MASK); + + return 0; +} + +int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, + struct hl_mmu_hop_info *hops) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + int rc; + bool is_dram_addr; + + if (!hdev->mmu_enable) + return -EOPNOTSUPP; + + is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, + prop->dmmu.start_addr, + prop->dmmu.end_addr); + + /* host-residency is the same in PMMU and HPMMU, use one of them */ + mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; + + mutex_lock(&ctx->mmu_lock); + + if (mmu_prop->host_resident) + rc = hdev->mmu_func[MMU_HR_PGT].get_tlb_info(ctx, + virt_addr, hops); + else + rc = hdev->mmu_func[MMU_DR_PGT].get_tlb_info(ctx, + virt_addr, hops); + + mutex_unlock(&ctx->mmu_lock); + + return rc; +} + int hl_mmu_if_set_funcs(struct hl_device *hdev) { if (!hdev->mmu_enable) diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c index 92b22298bb5c..2ce6ea89d4fa 100644 --- a/drivers/misc/habanalabs/common/mmu_v1.c +++ b/drivers/misc/habanalabs/common/mmu_v1.c @@ -837,6 +837,107 @@ static void hl_mmu_v1_swap_in(struct hl_ctx *ctx) } +static inline u64 get_hop_pte_addr(struct hl_ctx *ctx, + struct hl_mmu_properties *mmu_prop, + int hop_num, u64 hop_addr, u64 virt_addr) +{ + switch (hop_num) { + case 0: + return get_hop0_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + case 1: + return get_hop1_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + case 2: + return get_hop2_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + case 3: + return get_hop3_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + case 4: + return get_hop4_pte_addr(ctx, mmu_prop, hop_addr, virt_addr); + default: + break; + } + return U64_MAX; +} + +static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, + struct hl_mmu_hop_info *hops) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_mmu_properties *mmu_prop; + bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr, is_huge; + int i, used_hops; + + is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size, + prop->dmmu.start_addr, + prop->dmmu.end_addr); + is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size, + prop->pmmu.start_addr, + prop->pmmu.end_addr); + is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr, + prop->pmmu_huge.page_size, + prop->pmmu_huge.start_addr, + prop->pmmu_huge.end_addr); + if (is_dram_addr) { + mmu_prop = &prop->dmmu; + is_huge = true; + } else if (is_pmmu_addr) { + mmu_prop = &prop->pmmu; + is_huge = false; + } else if (is_pmmu_h_addr) { + mmu_prop = &prop->pmmu_huge; + is_huge = true; + } else { + return -EINVAL; + } + + used_hops = mmu_prop->num_hops; + + /* huge pages use lesser hops */ + if (is_huge) + used_hops--; + + hops->hop_info[0].hop_addr = get_phys_hop0_addr(ctx); + hops->hop_info[0].hop_pte_addr = + get_hop_pte_addr(ctx, mmu_prop, 0, + hops->hop_info[0].hop_addr, virt_addr); + hops->hop_info[0].hop_pte_val = + hdev->asic_funcs->read_pte(hdev, + hops->hop_info[0].hop_pte_addr); + + for (i = 1 ; i < used_hops ; i++) { + hops->hop_info[i].hop_addr = + get_next_hop_addr(ctx, + hops->hop_info[i - 1].hop_pte_val); + if (hops->hop_info[i].hop_addr == ULLONG_MAX) + return -EFAULT; + + hops->hop_info[i].hop_pte_addr = + get_hop_pte_addr(ctx, mmu_prop, i, + hops->hop_info[i].hop_addr, + virt_addr); + hops->hop_info[i].hop_pte_val = + hdev->asic_funcs->read_pte(hdev, + hops->hop_info[i].hop_pte_addr); + + if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) + return -EFAULT; + + if (hops->hop_info[i].hop_pte_val & LAST_MASK) + break; + } + + /* if passed over all hops then no last hop was found */ + if (i == mmu_prop->num_hops) + return -EFAULT; + + if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) + return -EFAULT; + + hops->used_hops = i + 1; + + return 0; +} + /* * hl_mmu_v1_prepare - prepare mmu for working with mmu v1 * @@ -853,4 +954,5 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu) mmu->flush = flush; mmu->swap_out = hl_mmu_v1_swap_out; mmu->swap_in = hl_mmu_v1_swap_in; + mmu->get_tlb_info = hl_mmu_v1_get_tlb_info; } -- cgit v1.2.3 From 439bc47b8e833db547c16369ac562f9364e6bc14 Mon Sep 17 00:00:00 2001 From: Alon Mizrahi Date: Tue, 10 Nov 2020 13:49:10 +0200 Subject: habanalabs: firmware returns 64bit argument F/W message returns 64bit value but up until now we casted it to a 32bit variable, instead of receiving 64bit in the first place. Signed-off-by: Alon Mizrahi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/debugfs.c | 5 ++++- drivers/misc/habanalabs/common/firmware_if.c | 26 +++++++++++++------------- drivers/misc/habanalabs/common/habanalabs.h | 4 ++-- drivers/misc/habanalabs/common/hwmon.c | 25 ++++++++++++++++++++----- drivers/misc/habanalabs/common/sysfs.c | 10 +++++----- drivers/misc/habanalabs/gaudi/gaudi.c | 2 +- drivers/misc/habanalabs/goya/goya.c | 6 +++--- drivers/misc/habanalabs/goya/goyaP.h | 2 +- 8 files changed, 49 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index 1db804de45ba..b47a62da0b41 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -22,6 +22,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, u8 i2c_reg, long *val) { struct cpucp_packet pkt; + u64 result; int rc; if (!hl_device_operational(hdev, NULL)) @@ -36,7 +37,9 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, pkt.i2c_reg = i2c_reg; rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, val); + 0, &result); + + *val = (long) result; if (rc) dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc); diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index e37d451e6415..8f70d0bbe5e1 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -88,7 +88,7 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode) } int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, - u16 len, u32 timeout, long *result) + u16 len, u32 timeout, u64 *result) { struct cpucp_packet *pkt; dma_addr_t pkt_dma_addr; @@ -143,7 +143,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, >> CPUCP_PKT_CTL_OPCODE_SHIFT); rc = -EIO; } else if (result) { - *result = (long) le64_to_cpu(pkt->result); + *result = le64_to_cpu(pkt->result); } out: @@ -157,7 +157,7 @@ out: int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type) { struct cpucp_packet pkt; - long result; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -180,7 +180,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr, { struct cpucp_unmask_irq_arr_packet *pkt; size_t total_pkt_size; - long result; + u64 result; int rc; total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) + @@ -219,7 +219,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr, int hl_fw_test_cpu_queue(struct hl_device *hdev) { struct cpucp_packet test_pkt = {}; - long result; + u64 result; int rc; test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST << @@ -232,7 +232,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev) if (!rc) { if (result != CPUCP_PACKET_FENCE_VAL) dev_err(hdev->dev, - "CPU queue test failed (0x%08lX)\n", result); + "CPU queue test failed (%#08llx)\n", result); } else { dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc); } @@ -263,7 +263,7 @@ void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, int hl_fw_send_heartbeat(struct hl_device *hdev) { struct cpucp_packet hb_pkt = {}; - long result; + u64 result; int rc; hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST << @@ -285,7 +285,7 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev) struct cpucp_packet pkt = {}; void *cpucp_info_cpu_addr; dma_addr_t cpucp_info_dma_addr; - long result; + u64 result; int rc; cpucp_info_cpu_addr = @@ -336,7 +336,7 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size) struct cpucp_packet pkt = {}; void *eeprom_info_cpu_addr; dma_addr_t eeprom_info_dma_addr; - long result; + u64 result; int rc; eeprom_info_cpu_addr = @@ -379,7 +379,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, struct hl_info_pci_counters *counters) { struct cpucp_packet pkt = {}; - long result; + u64 result; int rc; pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET << @@ -426,7 +426,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy) { struct cpucp_packet pkt = {}; - long result; + u64 result; int rc; pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET << @@ -452,7 +452,7 @@ int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 *pll_info) { struct cpucp_packet pkt; - long result; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -467,7 +467,7 @@ int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, if (rc) dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc); - *pll_info = result; + *pll_info = (u32) result; return rc; } diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index b8ad5bfa6359..43aa8cbd8969 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -925,7 +925,7 @@ struct hl_asic_funcs { int (*get_eeprom_data)(struct hl_device *hdev, void *data, size_t max_size); int (*send_cpu_message)(struct hl_device *hdev, u32 *msg, - u16 len, u32 timeout, long *result); + u16 len, u32 timeout, u64 *result); int (*pci_bars_map)(struct hl_device *hdev); int (*init_iatu)(struct hl_device *hdev); u32 (*rreg)(struct hl_device *hdev, u32 reg); @@ -2178,7 +2178,7 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, void __iomem *dst, u32 src_offset, u32 size); int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode); int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, - u16 len, u32 timeout, long *result); + u16 len, u32 timeout, u64 *result); int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type); int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr, size_t irq_arr_size); diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c index ab96401c3752..6b421d76b311 100644 --- a/drivers/misc/habanalabs/common/hwmon.c +++ b/drivers/misc/habanalabs/common/hwmon.c @@ -312,6 +312,7 @@ int hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr, long *value) { struct cpucp_packet pkt; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -322,7 +323,9 @@ int hl_get_temperature(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, value); + 0, &result); + + *value = (long) result; if (rc) { dev_err(hdev->dev, @@ -363,6 +366,7 @@ int hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr, long *value) { struct cpucp_packet pkt; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -373,7 +377,9 @@ int hl_get_voltage(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, value); + 0, &result); + + *value = (long) result; if (rc) { dev_err(hdev->dev, @@ -389,6 +395,7 @@ int hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr, long *value) { struct cpucp_packet pkt; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -399,7 +406,9 @@ int hl_get_current(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, value); + 0, &result); + + *value = (long) result; if (rc) { dev_err(hdev->dev, @@ -415,6 +424,7 @@ int hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr, long *value) { struct cpucp_packet pkt; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -425,7 +435,9 @@ int hl_get_fan_speed(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, value); + 0, &result); + + *value = (long) result; if (rc) { dev_err(hdev->dev, @@ -441,6 +453,7 @@ int hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long *value) { struct cpucp_packet pkt; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -451,7 +464,9 @@ int hl_get_pwm_info(struct hl_device *hdev, pkt.type = __cpu_to_le16(attr); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), - 0, value); + 0, &result); + + *value = (long) result; if (rc) { dev_err(hdev->dev, diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c index 94ca68e62000..4366d8f93842 100644 --- a/drivers/misc/habanalabs/common/sysfs.c +++ b/drivers/misc/habanalabs/common/sysfs.c @@ -12,7 +12,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) { struct cpucp_packet pkt; - long result; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -32,10 +32,10 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n", pll_index, rc); - result = rc; + return rc; } - return result; + return (long) result; } void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq) @@ -62,7 +62,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq) u64 hl_get_max_power(struct hl_device *hdev) { struct cpucp_packet pkt; - long result; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); @@ -75,7 +75,7 @@ u64 hl_get_max_power(struct hl_device *hdev) if (rc) { dev_err(hdev->dev, "Failed to get max power, error %d\n", rc); - result = rc; + return (u64) rc; } return result; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 4c884082be04..4fab90e33170 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -4561,7 +4561,7 @@ static void *gaudi_get_int_queue_base(struct hl_device *hdev, } static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg, - u16 len, u32 timeout, long *result) + u16 len, u32 timeout, u64 *result) { struct gaudi_device *gaudi = hdev->asic_specific; diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 3398b4cc1298..55d174d3cac8 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -2954,7 +2954,7 @@ free_fence_ptr: } int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, - u32 timeout, long *result) + u32 timeout, u64 *result) { struct goya_device *goya = hdev->asic_specific; @@ -4540,7 +4540,7 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr, { struct cpucp_unmask_irq_arr_packet *pkt; size_t total_pkt_size; - long result; + u64 result; int rc; int irq_num_entries, irq_arr_index; __le32 *goya_irq_arr; @@ -4599,7 +4599,7 @@ static int goya_soft_reset_late_init(struct hl_device *hdev) static int goya_unmask_irq(struct hl_device *hdev, u16 event_type) { struct cpucp_packet pkt; - long result; + u64 result; int rc; memset(&pkt, 0, sizeof(pkt)); diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h index ef4298f84a0a..8b3408211af6 100644 --- a/drivers/misc/habanalabs/goya/goyaP.h +++ b/drivers/misc/habanalabs/goya/goyaP.h @@ -192,7 +192,7 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id); int goya_test_queues(struct hl_device *hdev); int goya_test_cpu_queue(struct hl_device *hdev); int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, - u32 timeout, long *result); + u32 timeout, u64 *result); long goya_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr); long goya_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr); -- cgit v1.2.3 From b90c8944346217d82f04acdc69375a4860451484 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Sun, 8 Nov 2020 12:59:04 +0200 Subject: habanalabs/gaudi: align to new FW reset scheme As part of the security effort in which FW will be handling sensitive HW registers, hard reset flow will be done by FW and will be triggered by driver. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 4fab90e33170..fda3d8a85ada 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -65,7 +65,7 @@ #define GAUDI_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */ -#define GAUDI_RESET_TIMEOUT_MSEC 1000 /* 1000ms */ +#define GAUDI_RESET_TIMEOUT_MSEC 2000 /* 2000ms */ #define GAUDI_RESET_WAIT_MSEC 1 /* 1ms */ #define GAUDI_CPU_RESET_WAIT_MSEC 200 /* 200ms */ #define GAUDI_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */ @@ -3523,7 +3523,6 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset) wait_timeout_ms = GAUDI_RESET_WAIT_MSEC; gaudi_stop_nic_qmans(hdev); - gaudi_stop_mme_qmans(hdev); gaudi_stop_tpc_qmans(hdev); gaudi_stop_hbm_dma_qmans(hdev); @@ -3900,26 +3899,31 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) /* Set device to handle FLR by H/W as we will put the device CPU to * halt mode */ - WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK | + if (!hdev->asic_prop.hard_reset_done_by_fw) + WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK | PCIE_AUX_FLR_CTRL_INT_MASK_MASK)); /* I don't know what is the state of the CPU so make sure it is * stopped in any means necessary */ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE); + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE); - msleep(cpu_timeout_ms); + if (!hdev->asic_prop.hard_reset_done_by_fw) { + msleep(cpu_timeout_ms); - /* Tell ASIC not to re-initialize PCIe */ - WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC); + /* Tell ASIC not to re-initialize PCIe */ + WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC); - /* Restart BTL/BLR upon hard-reset */ - if (hdev->asic_prop.fw_security_disabled) - WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1); + /* Restart BTL/BLR upon hard-reset */ + if (hdev->asic_prop.fw_security_disabled) + WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1); - WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST, + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST, 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT); + } + dev_info(hdev->dev, "Issued HARD reset command, going to wait %dms\n", reset_timeout_ms); -- cgit v1.2.3 From 5e5867e51d6eb0a086ead83796efbae4234eab1a Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Mon, 16 Nov 2020 10:25:53 +0200 Subject: habanalabs: print CS type when it is stuck We have several types of command submissions and the user wants to know which type of command submission has not finished in time when that event occurs. This is very helpful for debug. Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 28 +++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 749ec6c95fc2..7309dd2b88a9 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -418,9 +418,31 @@ static void cs_timedout(struct work_struct *work) hdev = cs->ctx->hdev; - dev_err(hdev->dev, - "Command submission %llu has not finished in time!\n", - cs->sequence); + switch (cs->type) { + case CS_TYPE_SIGNAL: + dev_err(hdev->dev, + "Signal command submission %llu has not finished in time!\n", + cs->sequence); + break; + + case CS_TYPE_WAIT: + dev_err(hdev->dev, + "Wait command submission %llu has not finished in time!\n", + cs->sequence); + break; + + case CS_TYPE_COLLECTIVE_WAIT: + dev_err(hdev->dev, + "Collective Wait command submission %llu has not finished in time!\n", + cs->sequence); + break; + + default: + dev_err(hdev->dev, + "Command submission %llu has not finished in time!\n", + cs->sequence); + break; + } cs_put(cs); -- cgit v1.2.3 From 5c05487f15509320572c13fce8f490fb914cf7d4 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 22 Oct 2020 15:13:10 +0300 Subject: habanalabs: mmu map wrapper for sizes larger than a page We introduce a new wrapper which allows us to mmu map any size to any host va_range available. In addition we remove duplicated code from various places in driver and using this new wrapper instead. This wrapper supports mapping only contiguous physical memory blocks and will be used for mappings that are done to the driver ASID. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_buffer.c | 10 +-- drivers/misc/habanalabs/common/habanalabs.h | 7 +- drivers/misc/habanalabs/common/memory.c | 6 +- drivers/misc/habanalabs/common/mmu.c | 112 ++++++++++++++++++++++-- drivers/misc/habanalabs/gaudi/gaudi.c | 60 ++----------- drivers/misc/habanalabs/goya/goya.c | 26 +++--- 6 files changed, 144 insertions(+), 77 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c index 0c482358f350..2856bb3423ee 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -67,9 +67,9 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) bus_addr = cb->bus_address; offset = 0; list_for_each_entry(va_block, &cb->va_block_list, node) { - rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size, - list_is_last(&va_block->node, - &cb->va_block_list)); + rc = hl_mmu_map_page(ctx, va_block->start, bus_addr, + va_block->size, list_is_last(&va_block->node, + &cb->va_block_list)); if (rc) { dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", va_block->start); @@ -92,7 +92,7 @@ err_va_umap: list_for_each_entry(va_block, &cb->va_block_list, node) { if (offset <= 0) break; - hl_mmu_unmap(ctx, va_block->start, va_block->size, + hl_mmu_unmap_page(ctx, va_block->start, va_block->size, offset <= va_block->size); offset -= va_block->size; } @@ -119,7 +119,7 @@ static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) mutex_lock(&ctx->mmu_lock); list_for_each_entry(va_block, &cb->va_block_list, node) - if (hl_mmu_unmap(ctx, va_block->start, va_block->size, + if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size, list_is_last(&va_block->node, &cb->va_block_list))) dev_warn_ratelimited(hdev->dev, diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 43aa8cbd8969..e1db8301ecbd 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -2162,10 +2162,13 @@ int hl_mmu_init(struct hl_device *hdev); void hl_mmu_fini(struct hl_device *hdev); int hl_mmu_ctx_init(struct hl_ctx *ctx); void hl_mmu_ctx_fini(struct hl_ctx *ctx); -int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, +int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, bool flush_pte); -int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, +int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte); +int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr, + u64 phys_addr, u32 size); +int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size); void hl_mmu_swap_out(struct hl_ctx *ctx); void hl_mmu_swap_in(struct hl_ctx *ctx); int hl_mmu_if_set_funcs(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 351c9927151f..744275dd6410 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -843,7 +843,7 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, for (i = 0 ; i < phys_pg_pack->npages ; i++) { paddr = phys_pg_pack->pages[i]; - rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size, + rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size, (i + 1) == phys_pg_pack->npages); if (rc) { dev_err(hdev->dev, @@ -862,7 +862,7 @@ static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, err: next_vaddr = vaddr; for (i = 0 ; i < mapped_pg_cnt ; i++) { - if (hl_mmu_unmap(ctx, next_vaddr, page_size, + if (hl_mmu_unmap_page(ctx, next_vaddr, page_size, (i + 1) == mapped_pg_cnt)) dev_warn_ratelimited(hdev->dev, "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n", @@ -892,7 +892,7 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, next_vaddr = vaddr; for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) { - if (hl_mmu_unmap(ctx, next_vaddr, page_size, + if (hl_mmu_unmap_page(ctx, next_vaddr, page_size, (i + 1) == phys_pg_pack->npages)) dev_warn_ratelimited(hdev->dev, "unmap failed for vaddr: 0x%llx\n", next_vaddr); diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c index 7279c83cc081..33ae953d3a36 100644 --- a/drivers/misc/habanalabs/common/mmu.c +++ b/drivers/misc/habanalabs/common/mmu.c @@ -122,7 +122,7 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx) } /* - * hl_mmu_unmap - unmaps a virtual addr + * hl_mmu_unmap_page - unmaps a virtual addr * * @ctx: pointer to the context structure * @virt_addr: virt addr to map from @@ -142,7 +142,7 @@ void hl_mmu_ctx_fini(struct hl_ctx *ctx) * For optimization reasons PCI flush may be requested once after unmapping of * large area. */ -int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, +int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte) { struct hl_device *hdev = ctx->hdev; @@ -200,7 +200,7 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, } /* - * hl_mmu_map - maps a virtual addr to physical addr + * hl_mmu_map_page - maps a virtual addr to physical addr * * @ctx: pointer to the context structure * @virt_addr: virt addr to map from @@ -221,8 +221,8 @@ int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, * For optimization reasons PCI flush may be requested once after mapping of * large area. */ -int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, - bool flush_pte) +int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, + u32 page_size, bool flush_pte) { struct hl_device *hdev = ctx->hdev; struct asic_fixed_properties *prop = &hdev->asic_prop; @@ -302,6 +302,108 @@ err: return rc; } +/* + * hl_mmu_map_contiguous - implements a wrapper for hl_mmu_map_page + * for mapping contiguous physical memory + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to map from + * @phys_addr: phys addr to map to + * @size: size to map + * + */ +int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr, + u64 phys_addr, u32 size) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 curr_va, curr_pa; + u32 page_size; + bool flush_pte; + int rc = 0, off; + + if (hl_mem_area_inside_range(virt_addr, size, + prop->dmmu.start_addr, prop->dmmu.end_addr)) + page_size = prop->dmmu.page_size; + else if (hl_mem_area_inside_range(virt_addr, size, + prop->pmmu.start_addr, prop->pmmu.end_addr)) + page_size = prop->pmmu.page_size; + else if (hl_mem_area_inside_range(virt_addr, size, + prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr)) + page_size = prop->pmmu_huge.page_size; + else + return -EINVAL; + + for (off = 0 ; off < size ; off += page_size) { + curr_va = virt_addr + off; + curr_pa = phys_addr + off; + flush_pte = (off + page_size) >= size; + rc = hl_mmu_map_page(ctx, curr_va, curr_pa, page_size, + flush_pte); + if (rc) { + dev_err(hdev->dev, + "Map failed for va 0x%llx to pa 0x%llx\n", + curr_va, curr_pa); + goto unmap; + } + } + + return rc; + +unmap: + for (; off >= 0 ; off -= page_size) { + curr_va = virt_addr + off; + flush_pte = (off - (s32) page_size) < 0; + if (hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap va 0x%llx\n", curr_va); + } + + return rc; +} + +/* + * hl_mmu_unmap_contiguous - implements a wrapper for hl_mmu_unmap_page + * for unmapping contiguous physical memory + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to unmap + * @size: size to unmap + * + */ +int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 curr_va; + u32 page_size; + bool flush_pte; + int rc = 0, off; + + if (hl_mem_area_inside_range(virt_addr, size, + prop->dmmu.start_addr, prop->dmmu.end_addr)) + page_size = prop->dmmu.page_size; + else if (hl_mem_area_inside_range(virt_addr, size, + prop->pmmu.start_addr, prop->pmmu.end_addr)) + page_size = prop->pmmu.page_size; + else if (hl_mem_area_inside_range(virt_addr, size, + prop->pmmu_huge.start_addr, prop->pmmu_huge.end_addr)) + page_size = prop->pmmu_huge.page_size; + else + return -EINVAL; + + for (off = 0 ; off < size ; off += page_size) { + curr_va = virt_addr + off; + flush_pte = (off + page_size) >= size; + rc = hl_mmu_unmap_page(ctx, curr_va, page_size, flush_pte); + if (rc) + dev_warn_ratelimited(hdev->dev, + "Unmap failed for va 0x%llx\n", curr_va); + } + + return rc; +} + /* * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out * diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index fda3d8a85ada..49d4b5dda115 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -7755,9 +7755,6 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev, struct hl_ctx *ctx) { struct gaudi_device *gaudi = hdev->asic_specific; - bool flush_pte; - u64 va, pa; - s64 off; int min_alloc_order, rc, collective_cb_size; if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) @@ -7802,48 +7799,23 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev, goto destroy_internal_cb_pool; mutex_lock(&ctx->mmu_lock); - - /* The mapping is done page by page since we can't assure allocated ptr - * is aligned to HOST_SPACE_INTERNAL_CB_SZ - */ - for (off = 0 ; off < HOST_SPACE_INTERNAL_CB_SZ ; off += PAGE_SIZE_4KB) { - va = hdev->internal_cb_va_base + off; - pa = hdev->internal_cb_pool_dma_addr + off; - flush_pte = (off + PAGE_SIZE_4KB) >= HOST_SPACE_INTERNAL_CB_SZ; - rc = hl_mmu_map(ctx, va, pa, PAGE_SIZE_4KB, flush_pte); - if (rc) { - dev_err(hdev->dev, - "Map failed for va 0x%llx to pa 0x%llx\n", - va, pa); - goto unmap; - } - } + rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base, + hdev->internal_cb_pool_dma_addr, + HOST_SPACE_INTERNAL_CB_SZ); hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR); - mutex_unlock(&ctx->mmu_lock); - return 0; + if (rc) + goto unreserve_internal_cb_pool; -unmap: - for (; off >= 0 ; off -= PAGE_SIZE_4KB) { - va = hdev->internal_cb_va_base + off; - flush_pte = (off - (s32) PAGE_SIZE_4KB) < 0; - if (hl_mmu_unmap(ctx, va, PAGE_SIZE_4KB, flush_pte)) - dev_warn_ratelimited(hdev->dev, - "failed to unmap va 0x%llx\n", va); - } + return 0; +unreserve_internal_cb_pool: hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ); - - hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); - - mutex_unlock(&ctx->mmu_lock); - destroy_internal_cb_pool: gen_pool_destroy(hdev->internal_cb_pool); - free_internal_cb_pool: hdev->asic_funcs->asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, @@ -7857,30 +7829,16 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev, struct hl_ctx *ctx) { struct gaudi_device *gaudi = hdev->asic_specific; - bool flush_pte = false; - u64 va, off; if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) return; mutex_lock(&ctx->mmu_lock); - - for (off = 0 ; off < HOST_SPACE_INTERNAL_CB_SZ ; off += PAGE_SIZE_4KB) { - va = hdev->internal_cb_va_base + off; - - if (off + PAGE_SIZE_4KB >= HOST_SPACE_INTERNAL_CB_SZ) - flush_pte = true; - - if (hl_mmu_unmap(ctx, va, PAGE_SIZE_4KB, flush_pte)) - dev_warn_ratelimited(hdev->dev, - "failed to unmap va 0x%llx\n", va); - } - + hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, + HOST_SPACE_INTERNAL_CB_SZ); hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ); - hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR); - mutex_unlock(&ctx->mmu_lock); gen_pool_destroy(hdev->internal_cb_pool); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 55d174d3cac8..342227b93778 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -4906,9 +4906,10 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev) return 0; for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) { - rc = hl_mmu_map(hdev->kernel_ctx, prop->dram_base_address + off, - prop->dram_base_address + off, PAGE_SIZE_2MB, - (off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE); + rc = hl_mmu_map_page(hdev->kernel_ctx, + prop->dram_base_address + off, + prop->dram_base_address + off, PAGE_SIZE_2MB, + (off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE); if (rc) { dev_err(hdev->dev, "Map failed for address 0x%llx\n", prop->dram_base_address + off); @@ -4917,8 +4918,10 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev) } if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) { - rc = hl_mmu_map(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR, - hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB, true); + rc = hl_mmu_map_page(hdev->kernel_ctx, + VA_CPU_ACCESSIBLE_MEM_ADDR, + hdev->cpu_accessible_dma_address, + PAGE_SIZE_2MB, true); if (rc) { dev_err(hdev->dev, @@ -4928,7 +4931,7 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev) } } else { for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) { - rc = hl_mmu_map(hdev->kernel_ctx, + rc = hl_mmu_map_page(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off, hdev->cpu_accessible_dma_address + cpu_off, PAGE_SIZE_4KB, true); @@ -4955,7 +4958,7 @@ static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev) unmap_cpu: for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB) - if (hl_mmu_unmap(hdev->kernel_ctx, + if (hl_mmu_unmap_page(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off, PAGE_SIZE_4KB, true)) dev_warn_ratelimited(hdev->dev, @@ -4963,7 +4966,7 @@ unmap_cpu: VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off); unmap: for (; off >= 0 ; off -= PAGE_SIZE_2MB) - if (hl_mmu_unmap(hdev->kernel_ctx, + if (hl_mmu_unmap_page(hdev->kernel_ctx, prop->dram_base_address + off, PAGE_SIZE_2MB, true)) dev_warn_ratelimited(hdev->dev, @@ -4989,13 +4992,14 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev) WREG32(mmCPU_IF_AWUSER_OVR_EN, 0); if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) { - if (hl_mmu_unmap(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR, + if (hl_mmu_unmap_page(hdev->kernel_ctx, + VA_CPU_ACCESSIBLE_MEM_ADDR, PAGE_SIZE_2MB, true)) dev_warn(hdev->dev, "Failed to unmap CPU accessible memory\n"); } else { for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) - if (hl_mmu_unmap(hdev->kernel_ctx, + if (hl_mmu_unmap_page(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off, PAGE_SIZE_4KB, (cpu_off + PAGE_SIZE_4KB) >= SZ_2M)) @@ -5005,7 +5009,7 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev) } for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) - if (hl_mmu_unmap(hdev->kernel_ctx, + if (hl_mmu_unmap_page(hdev->kernel_ctx, prop->dram_base_address + off, PAGE_SIZE_2MB, (off + PAGE_SIZE_2MB) >= CPU_FW_IMAGE_SIZE)) dev_warn_ratelimited(hdev->dev, -- cgit v1.2.3 From 4147864e8d65a0d57dd8573cf306382653616ac2 Mon Sep 17 00:00:00 2001 From: Alon Mizrahi Date: Tue, 17 Nov 2020 14:25:14 +0200 Subject: habanalabs: fetch pll frequency from firmware Once firmware security is enabled, driver must fetch pll frequencies through the firmware message interface instead of reading the registers directly. Signed-off-by: Alon Mizrahi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 24 ++-- drivers/misc/habanalabs/common/habanalabs.h | 9 +- drivers/misc/habanalabs/common/habanalabs_ioctl.c | 22 ++++ drivers/misc/habanalabs/gaudi/gaudi.c | 135 ++++++++++++++------- drivers/misc/habanalabs/gaudi/gaudiP.h | 8 ++ drivers/misc/habanalabs/goya/goya.c | 2 +- drivers/misc/habanalabs/include/common/cpucp_if.h | 40 +++++- .../misc/habanalabs/include/common/hl_boot_if.h | 4 + .../habanalabs/include/gaudi/asic_reg/gaudi_regs.h | 14 ++- .../include/gaudi/asic_reg/psoc_hbm_pll_regs.h | 114 ----------------- .../include/gaudi/asic_reg/psoc_pci_pll_regs.h | 114 ----------------- include/uapi/misc/habanalabs.h | 9 ++ 12 files changed, 199 insertions(+), 296 deletions(-) delete mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h delete mode 100644 drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index 8f70d0bbe5e1..c4a8d6ca34bb 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -279,7 +279,8 @@ int hl_fw_send_heartbeat(struct hl_device *hdev) return rc; } -int hl_fw_cpucp_info_get(struct hl_device *hdev) +int hl_fw_cpucp_info_get(struct hl_device *hdev, + u32 cpu_security_boot_status_reg) { struct asic_fixed_properties *prop = &hdev->asic_prop; struct cpucp_packet pkt = {}; @@ -324,6 +325,11 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev) goto out; } + /* Read FW application security bits again */ + if (hdev->asic_prop.fw_security_status_valid) + hdev->asic_prop.fw_app_security_map = + RREG32(cpu_security_boot_status_reg); + out: hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_info), cpucp_info_cpu_addr); @@ -446,10 +452,8 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy) return rc; } -int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, - enum cpucp_pll_type_attributes pll_type, - enum cpucp_pll_reg_attributes pll_reg, - u32 *pll_info) +int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index, + u16 *pll_freq_arr) { struct cpucp_packet pkt; u64 result; @@ -457,17 +461,19 @@ int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, memset(&pkt, 0, sizeof(pkt)); - pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_REG_GET << + pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_INFO_GET << CPUCP_PKT_CTL_OPCODE_SHIFT); - pkt.pll_type = __cpu_to_le16(pll_type); - pkt.pll_reg = __cpu_to_le16(pll_reg); + pkt.pll_type = __cpu_to_le16(pll_index); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), HL_CPUCP_INFO_TIMEOUT_USEC, &result); if (rc) dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc); - *pll_info = (u32) result; + pll_freq_arr[0] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT0_MASK, result); + pll_freq_arr[1] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT1_MASK, result); + pll_freq_arr[2] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT2_MASK, result); + pll_freq_arr[3] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT3_MASK, result); return rc; } diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index e1db8301ecbd..9c9c8b24c47a 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -2191,16 +2191,15 @@ void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr); int hl_fw_send_heartbeat(struct hl_device *hdev); -int hl_fw_cpucp_info_get(struct hl_device *hdev); +int hl_fw_cpucp_info_get(struct hl_device *hdev, + u32 cpu_security_boot_status_reg); int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size); int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, struct hl_info_pci_counters *counters); int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy); -int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, - enum cpucp_pll_type_attributes pll_type, - enum cpucp_pll_reg_attributes pll_reg, - u32 *pll_info); +int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u16 pll_index, + u16 *pll_freq_arr); int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, u32 msg_to_cpu_reg, u32 cpu_msg_status_reg, u32 cpu_security_boot_status_reg, u32 boot_err0_reg, diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index ba8217fc9425..32e6af1db4e3 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -403,6 +403,25 @@ static int total_energy_consumption_info(struct hl_fpriv *hpriv, min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0; } +static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_pll_frequency_info freq_info = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + int rc; + + if ((!max_size) || (!out)) + return -EINVAL; + + rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output); + if (rc) + return rc; + + return copy_to_user(out, &freq_info, + min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0; +} + static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, struct device *dev) { @@ -480,6 +499,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, case HL_INFO_TOTAL_ENERGY: return total_energy_consumption_info(hpriv, args); + case HL_INFO_PLL_FREQUENCY: + return pll_frequency_info(hpriv, args); + default: dev_err(dev, "Invalid request %d\n", args->op); rc = -ENOTTY; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 49d4b5dda115..732559053133 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -103,6 +103,8 @@ #define HBM_SCRUBBING_TIMEOUT_US 1000000 /* 1s */ +#define GAUDI_PLL_MAX 10 + static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = { "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3", "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3", @@ -149,6 +151,19 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = { [PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe) }; +static const u32 gaudi_pll_base_addresses[GAUDI_PLL_MAX] = { + [CPU_PLL] = mmPSOC_CPU_PLL_NR, + [PCI_PLL] = mmPSOC_PCI_PLL_NR, + [SRAM_PLL] = mmSRAM_W_PLL_NR, + [HBM_PLL] = mmPSOC_HBM_PLL_NR, + [NIC_PLL] = mmNIC0_PLL_NR, + [DMA_PLL] = mmDMA_W_PLL_NR, + [MESH_PLL] = mmMESH_W_PLL_NR, + [MME_PLL] = mmPSOC_MME_PLL_NR, + [TPC_PLL] = mmPSOC_TPC_PLL_NR, + [IF_PLL] = mmIF_W_PLL_NR +}; + static inline bool validate_packet_id(enum packet_id id) { switch (id) { @@ -688,61 +703,93 @@ static int gaudi_early_fini(struct hl_device *hdev) } /** - * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values + * gaudi_fetch_pll_frequency - Fetch PLL frequency values * * @hdev: pointer to hl_device structure + * @pll_index: index of the pll to fetch frequency from + * @pll_freq: pointer to store the pll frequency in MHz in each of the available + * outputs. if a certain output is not available a 0 will be set * */ -static int gaudi_fetch_psoc_frequency(struct hl_device *hdev) +static int gaudi_fetch_pll_frequency(struct hl_device *hdev, + enum gaudi_pll_index pll_index, + u16 *pll_freq_arr) { - struct asic_fixed_properties *prop = &hdev->asic_prop; - u32 trace_freq = 0, pll_clk = 0; - u32 div_fctr, div_sel, nr, nf, od; - int rc; + u32 nr = 0, nf = 0, od = 0, pll_clk = 0, div_fctr, div_sel, + pll_base_addr = gaudi_pll_base_addresses[pll_index]; + u16 freq = 0; + int i, rc; + + if (hdev->asic_prop.fw_security_status_valid && + (hdev->asic_prop.fw_app_security_map & + CPU_BOOT_DEV_STS0_PLL_INFO_EN)) { + rc = hl_fw_cpucp_pll_info_get(hdev, pll_index, pll_freq_arr); - if (hdev->asic_prop.fw_security_disabled) { - div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2); - div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2); - nr = RREG32(mmPSOC_CPU_PLL_NR); - nf = RREG32(mmPSOC_CPU_PLL_NF); - od = RREG32(mmPSOC_CPU_PLL_OD); - } else { - rc = hl_fw_cpucp_pll_info_get(hdev, cpucp_pll_cpu, - cpucp_pll_div_factor_reg, &div_fctr); - rc |= hl_fw_cpucp_pll_info_get(hdev, cpucp_pll_cpu, - cpucp_pll_div_sel_reg, &div_sel); - rc |= hl_fw_cpucp_pll_info_get(hdev, cpucp_pll_cpu, - cpucp_pll_nr_reg, &nr); - rc |= hl_fw_cpucp_pll_info_get(hdev, cpucp_pll_cpu, - cpucp_pll_nf_reg, &nf); - rc |= hl_fw_cpucp_pll_info_get(hdev, cpucp_pll_cpu, - cpucp_pll_od_reg, &od); if (rc) return rc; - } - - if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) { - if (div_sel == DIV_SEL_REF_CLK) - trace_freq = PLL_REF_CLK; - else - trace_freq = PLL_REF_CLK / (div_fctr + 1); - } else if (div_sel == DIV_SEL_PLL_CLK || + } else if (hdev->asic_prop.fw_security_disabled) { + /* Backward compatibility */ + nr = RREG32(pll_base_addr + PLL_NR_OFFSET); + nf = RREG32(pll_base_addr + PLL_NF_OFFSET); + od = RREG32(pll_base_addr + PLL_OD_OFFSET); + + for (i = 0; i < HL_PLL_NUM_OUTPUTS; i++) { + div_fctr = RREG32(pll_base_addr + + PLL_DIV_FACTOR_0_OFFSET + i * 4); + div_sel = RREG32(pll_base_addr + + PLL_DIV_SEL_0_OFFSET + i * 4); + + if (div_sel == DIV_SEL_REF_CLK || + div_sel == DIV_SEL_DIVIDED_REF) { + if (div_sel == DIV_SEL_REF_CLK) + freq = PLL_REF_CLK; + else + freq = PLL_REF_CLK / (div_fctr + 1); + } else if (div_sel == DIV_SEL_PLL_CLK || div_sel == DIV_SEL_DIVIDED_PLL) { - pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1)); - if (div_sel == DIV_SEL_PLL_CLK) - trace_freq = pll_clk; - else - trace_freq = pll_clk / (div_fctr + 1); + pll_clk = PLL_REF_CLK * (nf + 1) / + ((nr + 1) * (od + 1)); + if (div_sel == DIV_SEL_PLL_CLK) + freq = pll_clk; + else + freq = pll_clk / (div_fctr + 1); + } else { + dev_warn(hdev->dev, + "Received invalid div select value: %d", + div_sel); + } + + pll_freq_arr[i] = freq; + } } else { - dev_warn(hdev->dev, - "Received invalid div select value: %d", div_sel); + dev_err(hdev->dev, "Failed to fetch PLL frequency values\n"); + return -EIO; } - prop->psoc_timestamp_frequency = trace_freq; - prop->psoc_pci_pll_nr = nr; - prop->psoc_pci_pll_nf = nf; - prop->psoc_pci_pll_od = od; - prop->psoc_pci_pll_div_factor = div_fctr; + return 0; +} + +/** + * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values + * + * @hdev: pointer to hl_device structure + * + */ +static int gaudi_fetch_psoc_frequency(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u16 pll_freq[HL_PLL_NUM_OUTPUTS]; + int rc; + + rc = gaudi_fetch_pll_frequency(hdev, CPU_PLL, pll_freq); + if (rc) + return rc; + + prop->psoc_timestamp_frequency = pll_freq[2]; + prop->psoc_pci_pll_nr = 0; + prop->psoc_pci_pll_nf = 0; + prop->psoc_pci_pll_od = 0; + prop->psoc_pci_pll_div_factor = 0; return 0; } @@ -7438,7 +7485,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev) if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) return 0; - rc = hl_fw_cpucp_info_get(hdev); + rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0); if (rc) return rc; diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h index 1a5e681c720d..f2d91f4fcffe 100644 --- a/drivers/misc/habanalabs/gaudi/gaudiP.h +++ b/drivers/misc/habanalabs/gaudi/gaudiP.h @@ -14,6 +14,7 @@ #include "../include/gaudi/gaudi_packets.h" #include "../include/gaudi/gaudi.h" #include "../include/gaudi/gaudi_async_events.h" +#include "../include/gaudi/gaudi_fw_if.h" #define NUMBER_OF_EXT_HW_QUEUES 8 #define NUMBER_OF_CMPLT_QUEUES NUMBER_OF_EXT_HW_QUEUES @@ -104,6 +105,13 @@ #define MME_ACC_OFFSET (mmMME1_ACC_BASE - mmMME0_ACC_BASE) #define SRAM_BANK_OFFSET (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE) +#define PLL_NR_OFFSET 0 +#define PLL_NF_OFFSET (mmPSOC_CPU_PLL_NF - mmPSOC_CPU_PLL_NR) +#define PLL_OD_OFFSET (mmPSOC_CPU_PLL_OD - mmPSOC_CPU_PLL_NR) +#define PLL_DIV_FACTOR_0_OFFSET (mmPSOC_CPU_PLL_DIV_FACTOR_0 - \ + mmPSOC_CPU_PLL_NR) +#define PLL_DIV_SEL_0_OFFSET (mmPSOC_CPU_PLL_DIV_SEL_0 - mmPSOC_CPU_PLL_NR) + #define NUM_OF_SOB_IN_BLOCK \ (((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \ mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2) diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 342227b93778..d91f553b8595 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5156,7 +5156,7 @@ int goya_cpucp_info_get(struct hl_device *hdev) if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) return 0; - rc = hl_fw_cpucp_info_get(hdev); + rc = hl_fw_cpucp_info_get(hdev, mmCPU_BOOT_DEV_STS0); if (rc) return rc; diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h index 759c068b2b7a..554f82271d5f 100644 --- a/drivers/misc/habanalabs/include/common/cpucp_if.h +++ b/drivers/misc/habanalabs/include/common/cpucp_if.h @@ -252,10 +252,26 @@ enum pq_init_status { * The packet's arguments specify the desired sensor and the field to * set. * - * CPUCP_PACKET_PLL_REG_GET - * Fetch register of PLL from the required PLL IP. - * The packet's arguments specify the PLL IP and the register to get. - * Each register is 32-bit value which is returned in result field. + * CPUCP_PACKET_PCIE_THROUGHPUT_GET + * Get throughput of PCIe. + * The packet's arguments specify the transaction direction (TX/RX). + * The window measurement is 10[msec], and the return value is in KB/sec. + * + * CPUCP_PACKET_PCIE_REPLAY_CNT_GET + * Replay count measures number of "replay" events, which is basicly + * number of retries done by PCIe. + * + * CPUCP_PACKET_TOTAL_ENERGY_GET + * Total Energy is measurement of energy from the time FW Linux + * is loaded. It is calculated by multiplying the average power + * by time (passed from armcp start). The units are in MilliJouls. + * + * CPUCP_PACKET_PLL_INFO_GET + * Fetch frequencies of PLL from the required PLL IP. + * The packet's arguments specify the device PLL type + * Pll type is the PLL from device pll_index enum. + * The result is composed of 4 outputs, each is 16-bit + * frequency in MHz. * */ @@ -289,7 +305,7 @@ enum cpucp_packet_id { CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */ CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */ CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */ - CPUCP_PACKET_PLL_REG_GET, /* internal */ + CPUCP_PACKET_PLL_INFO_GET, /* internal */ }; #define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5 @@ -300,6 +316,15 @@ enum cpucp_packet_id { #define CPUCP_PKT_CTL_OPCODE_SHIFT 16 #define CPUCP_PKT_CTL_OPCODE_MASK 0x1FFF0000 +#define CPUCP_PKT_RES_PLL_OUT0_SHIFT 0 +#define CPUCP_PKT_RES_PLL_OUT0_MASK 0x000000000000FFFF +#define CPUCP_PKT_RES_PLL_OUT1_SHIFT 16 +#define CPUCP_PKT_RES_PLL_OUT1_MASK 0x00000000FFFF0000 +#define CPUCP_PKT_RES_PLL_OUT2_SHIFT 32 +#define CPUCP_PKT_RES_PLL_OUT2_MASK 0x0000FFFF00000000 +#define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48 +#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000 + struct cpucp_packet { union { __le64 value; /* For SET packets */ @@ -324,8 +349,9 @@ struct cpucp_packet { __u8 pad; /* unused */ }; - struct {/* For PLL register fetch */ + struct {/* For PLL info fetch */ __le16 pll_type; + /* TODO pll_reg is kept temporary before removal */ __le16 pll_reg; }; @@ -404,6 +430,7 @@ enum cpucp_pcie_throughput_attributes { cpucp_pcie_throughput_rx }; +/* TODO temporary kept before removal */ enum cpucp_pll_reg_attributes { cpucp_pll_nr_reg, cpucp_pll_nf_reg, @@ -412,6 +439,7 @@ enum cpucp_pll_reg_attributes { cpucp_pll_div_sel_reg }; +/* TODO temporary kept before removal */ enum cpucp_pll_type_attributes { cpucp_pll_cpu, cpucp_pll_pci, diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h index 60916780df35..68ac15c53f37 100644 --- a/drivers/misc/habanalabs/include/common/hl_boot_if.h +++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h @@ -131,6 +131,9 @@ * receiving the halt-machine event. * Initialized in: linux * + * CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled. + * Initialized in: linux + * * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled. * This is a main indication that the * running FW populates the device status @@ -150,6 +153,7 @@ #define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << 8) #define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9) #define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10) +#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11) #define CPU_BOOT_DEV_STS0_ENABLED (1 << 31) enum cpu_boot_status { diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h index df21a40691e5..5bb54b34a8ae 100644 --- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h +++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h @@ -81,6 +81,7 @@ #include "sif_rtr_ctrl_6_regs.h" #include "sif_rtr_ctrl_7_regs.h" #include "psoc_etr_regs.h" +#include "psoc_cpu_pll_regs.h" #include "dma0_qm_masks.h" #include "mme0_qm_masks.h" @@ -102,9 +103,6 @@ #include "nic0_qm0_masks.h" -#include "psoc_hbm_pll_regs.h" -#include "psoc_cpu_pll_regs.h" - #define GAUDI_ECC_MEM_SEL_OFFSET 0xF18 #define GAUDI_ECC_ADDRESS_OFFSET 0xF1C #define GAUDI_ECC_SYNDROME_OFFSET 0xF20 @@ -307,4 +305,14 @@ #define mmPCIE_AUX_FLR_CTRL 0xC07394 #define mmPCIE_AUX_DBI 0xC07490 +#define mmPSOC_PCI_PLL_NR 0xC72100 +#define mmSRAM_W_PLL_NR 0x4C8100 +#define mmPSOC_HBM_PLL_NR 0xC74100 +#define mmNIC0_PLL_NR 0xCF9100 +#define mmDMA_W_PLL_NR 0x487100 +#define mmMESH_W_PLL_NR 0x4C7100 +#define mmPSOC_MME_PLL_NR 0xC71100 +#define mmPSOC_TPC_PLL_NR 0xC73100 +#define mmIF_W_PLL_NR 0x488100 + #endif /* ASIC_REG_GAUDI_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h deleted file mode 100644 index 687e2255cb19..000000000000 --- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_hbm_pll_regs.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * Copyright 2016-2018 HabanaLabs, Ltd. - * All Rights Reserved. - * - */ - -/************************************ - ** This is an auto-generated file ** - ** DO NOT EDIT BELOW ** - ************************************/ - -#ifndef ASIC_REG_PSOC_HBM_PLL_REGS_H_ -#define ASIC_REG_PSOC_HBM_PLL_REGS_H_ - -/* - ***************************************** - * PSOC_HBM_PLL (Prototype: PLL) - ***************************************** - */ - -#define mmPSOC_HBM_PLL_NR 0xC74100 - -#define mmPSOC_HBM_PLL_NF 0xC74104 - -#define mmPSOC_HBM_PLL_OD 0xC74108 - -#define mmPSOC_HBM_PLL_NB 0xC7410C - -#define mmPSOC_HBM_PLL_CFG 0xC74110 - -#define mmPSOC_HBM_PLL_LOSE_MASK 0xC74120 - -#define mmPSOC_HBM_PLL_LOCK_INTR 0xC74128 - -#define mmPSOC_HBM_PLL_LOCK_BYPASS 0xC7412C - -#define mmPSOC_HBM_PLL_DATA_CHNG 0xC74130 - -#define mmPSOC_HBM_PLL_RST 0xC74134 - -#define mmPSOC_HBM_PLL_SLIP_WD_CNTR 0xC74150 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_0 0xC74200 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_1 0xC74204 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_2 0xC74208 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_3 0xC7420C - -#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_0 0xC74220 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_1 0xC74224 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_2 0xC74228 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_CMD_3 0xC7422C - -#define mmPSOC_HBM_PLL_DIV_SEL_0 0xC74280 - -#define mmPSOC_HBM_PLL_DIV_SEL_1 0xC74284 - -#define mmPSOC_HBM_PLL_DIV_SEL_2 0xC74288 - -#define mmPSOC_HBM_PLL_DIV_SEL_3 0xC7428C - -#define mmPSOC_HBM_PLL_DIV_EN_0 0xC742A0 - -#define mmPSOC_HBM_PLL_DIV_EN_1 0xC742A4 - -#define mmPSOC_HBM_PLL_DIV_EN_2 0xC742A8 - -#define mmPSOC_HBM_PLL_DIV_EN_3 0xC742AC - -#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_0 0xC742C0 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_1 0xC742C4 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_2 0xC742C8 - -#define mmPSOC_HBM_PLL_DIV_FACTOR_BUSY_3 0xC742CC - -#define mmPSOC_HBM_PLL_CLK_GATER 0xC74300 - -#define mmPSOC_HBM_PLL_CLK_RLX_0 0xC74310 - -#define mmPSOC_HBM_PLL_CLK_RLX_1 0xC74314 - -#define mmPSOC_HBM_PLL_CLK_RLX_2 0xC74318 - -#define mmPSOC_HBM_PLL_CLK_RLX_3 0xC7431C - -#define mmPSOC_HBM_PLL_REF_CNTR_PERIOD 0xC74400 - -#define mmPSOC_HBM_PLL_REF_LOW_THRESHOLD 0xC74410 - -#define mmPSOC_HBM_PLL_REF_HIGH_THRESHOLD 0xC74420 - -#define mmPSOC_HBM_PLL_PLL_NOT_STABLE 0xC74430 - -#define mmPSOC_HBM_PLL_FREQ_CALC_EN 0xC74440 - -#define mmPSOC_HBM_PLL_RLX_BITMAP_CFG 0xC74500 - -#define mmPSOC_HBM_PLL_RLX_BITMAP_0 0xC74510 - -#define mmPSOC_HBM_PLL_RLX_BITMAP_1 0xC74514 - -#define mmPSOC_HBM_PLL_RLX_BITMAP_2 0xC74518 - -#define mmPSOC_HBM_PLL_RLX_BITMAP_3 0xC7451C - -#endif /* ASIC_REG_PSOC_HBM_PLL_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h deleted file mode 100644 index 3dc9bb4542dd..000000000000 --- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_pci_pll_regs.h +++ /dev/null @@ -1,114 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 - * - * Copyright 2016-2018 HabanaLabs, Ltd. - * All Rights Reserved. - * - */ - -/************************************ - ** This is an auto-generated file ** - ** DO NOT EDIT BELOW ** - ************************************/ - -#ifndef ASIC_REG_PSOC_PCI_PLL_REGS_H_ -#define ASIC_REG_PSOC_PCI_PLL_REGS_H_ - -/* - ***************************************** - * PSOC_PCI_PLL (Prototype: PLL) - ***************************************** - */ - -#define mmPSOC_PCI_PLL_NR 0xC72100 - -#define mmPSOC_PCI_PLL_NF 0xC72104 - -#define mmPSOC_PCI_PLL_OD 0xC72108 - -#define mmPSOC_PCI_PLL_NB 0xC7210C - -#define mmPSOC_PCI_PLL_CFG 0xC72110 - -#define mmPSOC_PCI_PLL_LOSE_MASK 0xC72120 - -#define mmPSOC_PCI_PLL_LOCK_INTR 0xC72128 - -#define mmPSOC_PCI_PLL_LOCK_BYPASS 0xC7212C - -#define mmPSOC_PCI_PLL_DATA_CHNG 0xC72130 - -#define mmPSOC_PCI_PLL_RST 0xC72134 - -#define mmPSOC_PCI_PLL_SLIP_WD_CNTR 0xC72150 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_0 0xC72200 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_1 0xC72204 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_2 0xC72208 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_3 0xC7220C - -#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_0 0xC72220 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_1 0xC72224 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_2 0xC72228 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_3 0xC7222C - -#define mmPSOC_PCI_PLL_DIV_SEL_0 0xC72280 - -#define mmPSOC_PCI_PLL_DIV_SEL_1 0xC72284 - -#define mmPSOC_PCI_PLL_DIV_SEL_2 0xC72288 - -#define mmPSOC_PCI_PLL_DIV_SEL_3 0xC7228C - -#define mmPSOC_PCI_PLL_DIV_EN_0 0xC722A0 - -#define mmPSOC_PCI_PLL_DIV_EN_1 0xC722A4 - -#define mmPSOC_PCI_PLL_DIV_EN_2 0xC722A8 - -#define mmPSOC_PCI_PLL_DIV_EN_3 0xC722AC - -#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_0 0xC722C0 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_1 0xC722C4 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_2 0xC722C8 - -#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_3 0xC722CC - -#define mmPSOC_PCI_PLL_CLK_GATER 0xC72300 - -#define mmPSOC_PCI_PLL_CLK_RLX_0 0xC72310 - -#define mmPSOC_PCI_PLL_CLK_RLX_1 0xC72314 - -#define mmPSOC_PCI_PLL_CLK_RLX_2 0xC72318 - -#define mmPSOC_PCI_PLL_CLK_RLX_3 0xC7231C - -#define mmPSOC_PCI_PLL_REF_CNTR_PERIOD 0xC72400 - -#define mmPSOC_PCI_PLL_REF_LOW_THRESHOLD 0xC72410 - -#define mmPSOC_PCI_PLL_REF_HIGH_THRESHOLD 0xC72420 - -#define mmPSOC_PCI_PLL_PLL_NOT_STABLE 0xC72430 - -#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440 - -#define mmPSOC_PCI_PLL_RLX_BITMAP_CFG 0xC72500 - -#define mmPSOC_PCI_PLL_RLX_BITMAP_0 0xC72510 - -#define mmPSOC_PCI_PLL_RLX_BITMAP_1 0xC72514 - -#define mmPSOC_PCI_PLL_RLX_BITMAP_2 0xC72518 - -#define mmPSOC_PCI_PLL_RLX_BITMAP_3 0xC7251C - -#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */ diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index d9cc782aba21..96eea49f48bc 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -295,6 +295,7 @@ enum hl_device_status { #define HL_INFO_CLK_THROTTLE_REASON 13 #define HL_INFO_SYNC_MANAGER 14 #define HL_INFO_TOTAL_ENERGY 15 +#define HL_INFO_PLL_FREQUENCY 16 #define HL_INFO_VERSION_MAX_LEN 128 #define HL_INFO_CARD_NAME_MAX_LEN 16 @@ -396,6 +397,12 @@ struct hl_info_energy { __u64 total_energy_consumption; }; +#define HL_PLL_NUM_OUTPUTS 4 + +struct hl_pll_frequency_info { + __u16 output[HL_PLL_NUM_OUTPUTS]; +}; + /** * struct hl_info_sync_manager - sync manager information * @first_available_sync_object: first available sob @@ -465,6 +472,8 @@ struct hl_info_args { * resolution. */ __u32 period_ms; + /* PLL frequency retrieval */ + __u32 pll_index; }; __u32 pad; -- cgit v1.2.3 From 2a570736ef3977a8c4d53e2da1861d4f88c0ecdd Mon Sep 17 00:00:00 2001 From: kernel test robot Date: Thu, 19 Nov 2020 11:18:30 +0800 Subject: habanalabs: goya_reset_sob_group() can be static Make some functions static Reported-by: kernel test robot Signed-off-by: kernel test robot Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/goya/goya.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index d91f553b8595..5e151610c1c8 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5340,7 +5340,7 @@ static void goya_reset_sob(struct hl_device *hdev, void *data) } -void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group) +static void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group) { } @@ -5366,12 +5366,12 @@ u64 goya_get_device_time(struct hl_device *hdev) return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL); } -void goya_collective_wait_init_cs(struct hl_cs *cs) +static void goya_collective_wait_init_cs(struct hl_cs *cs) { } -int goya_collective_wait_create_jobs(struct hl_device *hdev, +static int goya_collective_wait_create_jobs(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id, u32 collective_engine_id) { -- cgit v1.2.3 From 293744d92c87d50d0b4af89a2459fbd61741ff5e Mon Sep 17 00:00:00 2001 From: kernel test robot Date: Thu, 19 Nov 2020 12:25:43 +0800 Subject: habanalabs: gaudi_ctx_fini() can be static Make a function in gaudi.c to be static Reported-by: kernel test robot Signed-off-by: kernel test robot Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 732559053133..c99ad8d65dea 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -7902,7 +7902,7 @@ static int gaudi_ctx_init(struct hl_ctx *ctx) return gaudi_internal_cb_pool_init(ctx->hdev, ctx); } -void gaudi_ctx_fini(struct hl_ctx *ctx) +static void gaudi_ctx_fini(struct hl_ctx *ctx) { struct hl_device *hdev = ctx->hdev; -- cgit v1.2.3 From 051504d9f6048509c27a994848ac02796999d6c5 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Fri, 20 Nov 2020 21:39:09 +0200 Subject: habanalabs: update firmware files Update various firmware header files with new defines. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/include/common/hl_boot_if.h | 17 +++++++++++++++++ drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h | 3 ++- drivers/misc/habanalabs/include/goya/goya_fw_if.h | 3 ++- 3 files changed, 21 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h index 68ac15c53f37..e5801ecf0cb2 100644 --- a/drivers/misc/habanalabs/include/common/hl_boot_if.h +++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h @@ -56,6 +56,20 @@ * CPU_BOOT_ERR0_EFUSE_FAIL Reading from eFuse failed. * The PCI device ID might be wrong. * + * CPU_BOOT_ERR0_PRI_IMG_VER_FAIL Verification of primary image failed. + * It mean that ppboot checksum + * verification for the preboot primary + * image has failed to match expected + * checksum. Trying to program image again + * might solve this. + * + * CPU_BOOT_ERR0_SEC_IMG_VER_FAIL Verification of secondary image failed. + * It mean that ppboot checksum + * verification for the preboot secondary + * image has failed to match expected + * checksum. Trying to program image again + * might solve this. + * * CPU_BOOT_ERR0_ENABLED Error registers enabled. * This is a main indication that the * running FW populates the error @@ -72,6 +86,8 @@ #define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << 7) #define CPU_BOOT_ERR0_SECURITY_FAIL (1 << 8) #define CPU_BOOT_ERR0_EFUSE_FAIL (1 << 9) +#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << 10) +#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << 11) #define CPU_BOOT_ERR0_ENABLED (1 << 31) /* @@ -141,6 +157,7 @@ * bits are not garbage, but actual * statuses. * Initialized in: preboot + * */ #define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << 0) #define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << 1) diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h index d61a4c87b765..25acd9e87e20 100644 --- a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h +++ b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h @@ -30,7 +30,8 @@ enum gaudi_pll_index { MESH_PLL, MME_PLL, TPC_PLL, - IF_PLL + IF_PLL, + PLL_MAX }; enum gaudi_nic_axi_error { diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/misc/habanalabs/include/goya/goya_fw_if.h index 0fa80fe9f6cc..daf8d8cd14be 100644 --- a/drivers/misc/habanalabs/include/goya/goya_fw_if.h +++ b/drivers/misc/habanalabs/include/goya/goya_fw_if.h @@ -22,7 +22,8 @@ enum goya_pll_index { MME_PLL, PCI_PLL, EMMC_PLL, - TPC_PLL + TPC_PLL, + PLL_MAX }; #define GOYA_PLL_FREQ_LOW 50000000 /* 50 MHz */ -- cgit v1.2.3 From 64a9d5ab2c1cccf7aa902039887d5911a6060141 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Sat, 21 Nov 2020 14:29:25 +0200 Subject: habanalabs/gaudi: print ECC type field We have the ECC type field from the firmware but the driver didn't print it, so we need to add that field to the ECC print message. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index c99ad8d65dea..cd18456fa523 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -6918,8 +6918,8 @@ static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device, le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); dev_err(hdev->dev, - "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n", - device, ch, wr_par, rd_par, ca_par, serr, derr); + "HBM%d pc%d ECC: TYPE=%d, WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n", + device, ch, type, wr_par, rd_par, ca_par, serr, derr); err = 1; -- cgit v1.2.3 From 9d127ad5719a865bac668a506dfe924ac11cd9bb Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Tue, 10 Nov 2020 16:30:53 +0200 Subject: habanalabs: indicate to user that a cs is gone We want to indicate to the user that a certain command submission is finished long time ago and it is no longer in database. This means no further information regarding this cs can be obtained. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 68 ++++++++++++++++------ include/uapi/misc/habanalabs.h | 5 +- 2 files changed, 54 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index 7309dd2b88a9..f91b17480588 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -11,9 +11,22 @@ #include #include +/** + * enum hl_cs_wait_status - cs wait status + * @CS_WAIT_STATUS_BUSY: cs was not completed yet + * @CS_WAIT_STATUS_COMPLETED: cs completed + * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone + */ +enum hl_cs_wait_status { + CS_WAIT_STATUS_BUSY, + CS_WAIT_STATUS_COMPLETED, + CS_WAIT_STATUS_GONE +}; + static void job_wq_completion(struct work_struct *work); -static long _hl_cs_wait_ioctl(struct hl_device *hdev, - struct hl_ctx *ctx, u64 timeout_us, u64 seq); +static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, + u64 timeout_us, u64 seq, + enum hl_cs_wait_status *status); static void cs_do_release(struct kref *ref); static void hl_sob_reset(struct kref *ref) @@ -942,7 +955,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, int rc = 0, do_ctx_switch; void __user *chunks; u32 num_chunks, tmp; - long ret; + int ret; do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0); @@ -996,18 +1009,19 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, /* Need to wait for restore completion before execution phase */ if (num_chunks) { + enum hl_cs_wait_status status; wait_again: ret = _hl_cs_wait_ioctl(hdev, ctx, jiffies_to_usecs(hdev->timeout_jiffies), - *cs_seq); - if (ret <= 0) { + *cs_seq, &status); + if (ret) { if (ret == -ERESTARTSYS) { usleep_range(100, 200); goto wait_again; } dev_err(hdev->dev, - "Restore CS for context %d failed to complete %ld\n", + "Restore CS for context %d failed to complete %d\n", ctx->asid, ret); rc = -ENOEXEC; goto out; @@ -1337,12 +1351,14 @@ out: return rc; } -static long _hl_cs_wait_ioctl(struct hl_device *hdev, - struct hl_ctx *ctx, u64 timeout_us, u64 seq) +static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, + u64 timeout_us, u64 seq, + enum hl_cs_wait_status *status) { struct hl_fence *fence; unsigned long timeout; - long rc; + int rc = 0; + long completion_rc; if (timeout_us == MAX_SCHEDULE_TIMEOUT) timeout = timeout_us; @@ -1360,11 +1376,17 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev, seq, ctx->cs_sequence); } else if (fence) { if (!timeout_us) - rc = completion_done(&fence->completion); + completion_rc = completion_done(&fence->completion); else - rc = wait_for_completion_interruptible_timeout( + completion_rc = + wait_for_completion_interruptible_timeout( &fence->completion, timeout); + if (completion_rc > 0) + *status = CS_WAIT_STATUS_COMPLETED; + else + *status = CS_WAIT_STATUS_BUSY; + if (fence->error == -ETIMEDOUT) rc = -ETIMEDOUT; else if (fence->error == -EIO) @@ -1375,7 +1397,7 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev, dev_dbg(hdev->dev, "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n", seq, ctx->cs_sequence); - rc = 1; + *status = CS_WAIT_STATUS_GONE; } hl_ctx_put(ctx); @@ -1387,14 +1409,16 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) { struct hl_device *hdev = hpriv->hdev; union hl_wait_cs_args *args = data; + enum hl_cs_wait_status status; u64 seq = args->in.seq; - long rc; + int rc; - rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq); + rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, + &status); memset(args, 0, sizeof(*args)); - if (rc < 0) { + if (rc) { if (rc == -ERESTARTSYS) { dev_err_ratelimited(hdev->dev, "user process got signal while waiting for CS handle %llu\n", @@ -1415,10 +1439,18 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) return rc; } - if (rc == 0) - args->out.status = HL_WAIT_CS_STATUS_BUSY; - else + switch (status) { + case CS_WAIT_STATUS_GONE: + args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE; + fallthrough; + case CS_WAIT_STATUS_COMPLETED: args->out.status = HL_WAIT_CS_STATUS_COMPLETED; + break; + case CS_WAIT_STATUS_BUSY: + default: + args->out.status = HL_WAIT_CS_STATUS_BUSY; + break; + } return 0; } diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 96eea49f48bc..808d20da024a 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -662,10 +662,13 @@ struct hl_wait_cs_in { #define HL_WAIT_CS_STATUS_ABORTED 3 #define HL_WAIT_CS_STATUS_INTERRUPTED 4 +#define HL_WAIT_CS_STATUS_FLAG_GONE 0x1 + struct hl_wait_cs_out { /* HL_WAIT_CS_STATUS_* */ __u32 status; - __u32 pad; + /* HL_WAIT_CS_STATUS_FLAG* */ + __u32 flags; }; union hl_wait_cs_args { -- cgit v1.2.3 From bd2f477f2037d2638464dc105f16994994308c20 Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Tue, 10 Nov 2020 17:26:22 +0200 Subject: habanalabs: add support for cs with timestamp add support for user to request a timestamp upon cs completion. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 44 ++++++++++++++++------ drivers/misc/habanalabs/common/habanalabs.h | 4 ++ include/uapi/misc/habanalabs.h | 6 ++- 3 files changed, 41 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index f91b17480588..bd2f54399020 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -26,7 +26,7 @@ enum hl_cs_wait_status { static void job_wq_completion(struct work_struct *work); static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq, - enum hl_cs_wait_status *status); + enum hl_cs_wait_status *status, s64 *timestamp); static void cs_do_release(struct kref *ref); static void hl_sob_reset(struct kref *ref) @@ -150,6 +150,7 @@ static void hl_fence_init(struct hl_fence *fence) { kref_init(&fence->refcount); fence->error = 0; + fence->timestamp = ktime_set(0, 0); init_completion(&fence->completion); } @@ -404,6 +405,8 @@ out: else if (!cs->submitted) cs->fence->error = -EBUSY; + if (cs->timestamp) + cs->fence->timestamp = ktime_get(); complete_all(&cs->fence->completion); hl_fence_put(cs->fence); @@ -734,7 +737,8 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args) return -EBUSY; } - cs_type_flags = args->in.cs_flags & ~HL_CS_FLAGS_FORCE_RESTORE; + cs_type_flags = args->in.cs_flags & + ~(HL_CS_FLAGS_FORCE_RESTORE | HL_CS_FLAGS_TIMESTAMP); if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) { dev_err(hdev->dev, @@ -798,7 +802,7 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev, } static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, - u32 num_chunks, u64 *cs_seq) + u32 num_chunks, u64 *cs_seq, bool timestamp) { bool int_queues_only = true; struct hl_device *hdev = hpriv->hdev; @@ -825,6 +829,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, goto free_cs_chunk_array; } + cs->timestamp = !!timestamp; *cs_seq = cs->sequence; hl_debugfs_add_cs(cs); @@ -995,7 +1000,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, rc = 0; } else { rc = cs_ioctl_default(hpriv, chunks, num_chunks, - cs_seq); + cs_seq, false); } mutex_unlock(&hpriv->restore_phase_mutex); @@ -1013,7 +1018,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args, wait_again: ret = _hl_cs_wait_ioctl(hdev, ctx, jiffies_to_usecs(hdev->timeout_jiffies), - *cs_seq, &status); + *cs_seq, &status, NULL); if (ret) { if (ret == -ERESTARTSYS) { usleep_range(100, 200); @@ -1154,7 +1159,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev, static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, void __user *chunks, u32 num_chunks, - u64 *cs_seq) + u64 *cs_seq, bool timestamp) { struct hl_cs_chunk *cs_chunk_array, *chunk; struct hw_queue_properties *hw_queue_prop; @@ -1259,6 +1264,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type, goto free_cs_chunk_array; } + cs->timestamp = !!timestamp; + /* * Save the signal CS fence for later initialization right before * hanging the wait CS on the queue. @@ -1334,10 +1341,11 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) case CS_TYPE_WAIT: case CS_TYPE_COLLECTIVE_WAIT: rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks, - &cs_seq); + &cs_seq, args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP); break; default: - rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq); + rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq, + args->in.cs_flags & HL_CS_FLAGS_TIMESTAMP); break; } @@ -1353,13 +1361,16 @@ out: static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq, - enum hl_cs_wait_status *status) + enum hl_cs_wait_status *status, s64 *timestamp) { struct hl_fence *fence; unsigned long timeout; int rc = 0; long completion_rc; + if (timestamp) + *timestamp = 0; + if (timeout_us == MAX_SCHEDULE_TIMEOUT) timeout = timeout_us; else @@ -1382,10 +1393,13 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, wait_for_completion_interruptible_timeout( &fence->completion, timeout); - if (completion_rc > 0) + if (completion_rc > 0) { *status = CS_WAIT_STATUS_COMPLETED; - else + if (timestamp) + *timestamp = ktime_to_ns(fence->timestamp); + } else { *status = CS_WAIT_STATUS_BUSY; + } if (fence->error == -ETIMEDOUT) rc = -ETIMEDOUT; @@ -1411,10 +1425,11 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) union hl_wait_cs_args *args = data; enum hl_cs_wait_status status; u64 seq = args->in.seq; + s64 timestamp; int rc; rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, - &status); + &status, ×tamp); memset(args, 0, sizeof(*args)); @@ -1439,6 +1454,11 @@ int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) return rc; } + if (timestamp) { + args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD; + args->out.timestamp_nsec = timestamp; + } + switch (status) { case CS_WAIT_STATUS_GONE: args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE; diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 9c9c8b24c47a..8e2d164d97e8 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -481,12 +481,14 @@ struct asic_fixed_properties { * @completion: fence is implemented using completion * @refcount: refcount for this fence * @error: mark this fence with error + * @timestamp: timestamp upon completion * */ struct hl_fence { struct completion completion; struct kref refcount; int error; + ktime_t timestamp; }; /** @@ -1127,6 +1129,7 @@ struct hl_userptr { * @tdr_active: true if TDR was activated for this CS (to prevent * double TDR activation). * @aborted: true if CS was aborted due to some device error. + * @timestamp: true if a timestmap must be captured upon completion */ struct hl_cs { u16 *jobs_in_queue_cnt; @@ -1147,6 +1150,7 @@ struct hl_cs { u8 timedout; u8 tdr_active; u8 aborted; + u8 timestamp; }; /** diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 808d20da024a..6eff4e05eccb 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -592,6 +592,7 @@ struct hl_cs_chunk { #define HL_CS_FLAGS_SIGNAL 0x2 #define HL_CS_FLAGS_WAIT 0x4 #define HL_CS_FLAGS_COLLECTIVE_WAIT 0x8 +#define HL_CS_FLAGS_TIMESTAMP 0x20 #define HL_CS_STATUS_SUCCESS 0 @@ -662,13 +663,16 @@ struct hl_wait_cs_in { #define HL_WAIT_CS_STATUS_ABORTED 3 #define HL_WAIT_CS_STATUS_INTERRUPTED 4 -#define HL_WAIT_CS_STATUS_FLAG_GONE 0x1 +#define HL_WAIT_CS_STATUS_FLAG_GONE 0x1 +#define HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD 0x2 struct hl_wait_cs_out { /* HL_WAIT_CS_STATUS_* */ __u32 status; /* HL_WAIT_CS_STATUS_FLAG* */ __u32 flags; + /* valid only if HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD is set */ + __s64 timestamp_nsec; }; union hl_wait_cs_args { -- cgit v1.2.3 From d2bbf2ca33440b8e8cab2387802a6d5694611227 Mon Sep 17 00:00:00 2001 From: Alon Mizrahi Date: Sun, 22 Nov 2020 22:09:52 +0200 Subject: habanalabs: add ull to PLL masks These defines are 64-bit defines so they need ull suffix. Signed-off-by: Alon Mizrahi Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/include/common/cpucp_if.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h index 554f82271d5f..00bd9b392f93 100644 --- a/drivers/misc/habanalabs/include/common/cpucp_if.h +++ b/drivers/misc/habanalabs/include/common/cpucp_if.h @@ -317,13 +317,13 @@ enum cpucp_packet_id { #define CPUCP_PKT_CTL_OPCODE_MASK 0x1FFF0000 #define CPUCP_PKT_RES_PLL_OUT0_SHIFT 0 -#define CPUCP_PKT_RES_PLL_OUT0_MASK 0x000000000000FFFF +#define CPUCP_PKT_RES_PLL_OUT0_MASK 0x000000000000FFFFull #define CPUCP_PKT_RES_PLL_OUT1_SHIFT 16 -#define CPUCP_PKT_RES_PLL_OUT1_MASK 0x00000000FFFF0000 +#define CPUCP_PKT_RES_PLL_OUT1_MASK 0x00000000FFFF0000ull #define CPUCP_PKT_RES_PLL_OUT2_SHIFT 32 -#define CPUCP_PKT_RES_PLL_OUT2_MASK 0x0000FFFF00000000 +#define CPUCP_PKT_RES_PLL_OUT2_MASK 0x0000FFFF00000000ull #define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48 -#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000 +#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull struct cpucp_packet { union { -- cgit v1.2.3 From ee3287798d49bbe5a9597d51ec914144179c03e8 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Wed, 25 Nov 2020 08:02:40 +0200 Subject: habanalabs: add missing counter update The global CS drop-on-reset counter wasn't updated together with the context counter. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/hw_queue.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c index d1d30fb36410..7caf868d1585 100644 --- a/drivers/misc/habanalabs/common/hw_queue.c +++ b/drivers/misc/habanalabs/common/hw_queue.c @@ -529,6 +529,7 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) hdev->asic_funcs->hw_queues_lock(hdev); if (!hl_device_operational(hdev, &status)) { + atomic64_inc(&cntr->device_in_reset_drop_cnt); atomic64_inc(&ctx->cs_counters.device_in_reset_drop_cnt); dev_err(hdev->dev, "device is %s, CS rejected!\n", hdev->status[status]); -- cgit v1.2.3 From a63c3fb37b157b56d713a386dc024bf6deb9c7cf Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Thu, 26 Nov 2020 18:11:05 +0200 Subject: habanalabs/gaudi: handle reset when f/w is in preboot Currently, if the f/w is in preboot/u-boot they don't perform the new reset mechanism. Therefore, the driver needs to reset the device. To prevent reset of PCI_IF, the driver needs to first configure the reset units. If the security is enabled, the driver can't configure the reset units. In that situation, don't reset the card. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/gaudi/gaudi.c | 60 ++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index cd18456fa523..568afe1b6849 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -3829,33 +3829,6 @@ static void gaudi_pre_hw_init(struct hl_device *hdev) * cleared by the H/W upon H/W reset */ WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY); - if (hdev->asic_prop.fw_security_disabled) { - /* Configure the reset registers. Must be done as early as - * possible in case we fail during H/W initialization - */ - WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H, - (CFG_RST_H_DMA_MASK | - CFG_RST_H_MME_MASK | - CFG_RST_H_SM_MASK | - CFG_RST_H_TPC_7_MASK)); - - WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK); - - WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H, - (CFG_RST_H_HBM_MASK | - CFG_RST_H_TPC_7_MASK | - CFG_RST_H_NIC_MASK | - CFG_RST_H_SM_MASK | - CFG_RST_H_DMA_MASK | - CFG_RST_H_MME_MASK | - CFG_RST_H_CPU_MASK | - CFG_RST_H_MMU_MASK)); - - WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L, - (CFG_RST_L_IF_MASK | - CFG_RST_L_PSOC_MASK | - CFG_RST_L_TPC_MASK)); - } } static int gaudi_hw_init(struct hl_device *hdev) @@ -3946,7 +3919,8 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) /* Set device to handle FLR by H/W as we will put the device CPU to * halt mode */ - if (!hdev->asic_prop.hard_reset_done_by_fw) + if (hdev->asic_prop.fw_security_disabled && + !hdev->asic_prop.hard_reset_done_by_fw) WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK | PCIE_AUX_FLR_CTRL_INT_MASK_MASK)); @@ -3957,7 +3931,35 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset) WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE); - if (!hdev->asic_prop.hard_reset_done_by_fw) { + if (hdev->asic_prop.fw_security_disabled && + !hdev->asic_prop.hard_reset_done_by_fw) { + + /* Configure the reset registers. Must be done as early as + * possible in case we fail during H/W initialization + */ + WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H, + (CFG_RST_H_DMA_MASK | + CFG_RST_H_MME_MASK | + CFG_RST_H_SM_MASK | + CFG_RST_H_TPC_7_MASK)); + + WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK); + + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H, + (CFG_RST_H_HBM_MASK | + CFG_RST_H_TPC_7_MASK | + CFG_RST_H_NIC_MASK | + CFG_RST_H_SM_MASK | + CFG_RST_H_DMA_MASK | + CFG_RST_H_MME_MASK | + CFG_RST_H_CPU_MASK | + CFG_RST_H_MMU_MASK)); + + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L, + (CFG_RST_L_IF_MASK | + CFG_RST_L_PSOC_MASK | + CFG_RST_L_TPC_MASK)); + msleep(cpu_timeout_ms); /* Tell ASIC not to re-initialize PCIe */ -- cgit v1.2.3 From 8e718f2eda8d4d2268f1872bd4deddd54900283a Mon Sep 17 00:00:00 2001 From: Ofir Bitton Date: Thu, 26 Nov 2020 13:01:11 +0200 Subject: habanalabs: free host huge va_range if not used If huge range is not valid, driver uses the host range also for huge page allocations, but driver never frees its allocation. This introduces a memory leak every time a user closes its context. Signed-off-by: Ofir Bitton Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/memory.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index 744275dd6410..cbe9da4e0211 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -1761,6 +1761,7 @@ static int vm_ctx_init_with_ranges(struct hl_ctx *ctx, goto clear_host_va_range; } } else { + kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]); ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] = ctx->va_range[HL_VA_RANGE_TYPE_HOST]; } @@ -1906,9 +1907,10 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx) spin_unlock(&vm->idr_lock); va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]); + va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]); + if (hdev->pmmu_huge_range) va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]); - va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]); mutex_destroy(&ctx->mem_hash_lock); hl_mmu_ctx_fini(ctx); -- cgit v1.2.3 From 3b82c34f0662cc311a81b4b0e94b982db3d24a7c Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Fri, 27 Nov 2020 18:10:20 +0200 Subject: habanalabs: change messages to debug level Some messages should be changed to debug mode as we want to keep minimal prints during normal operation of the device. Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/firmware_if.c | 4 ++-- drivers/misc/habanalabs/gaudi/gaudi.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index c4a8d6ca34bb..0e1c629e9800 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -526,7 +526,7 @@ static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg, security_val = RREG32(cpu_security_boot_status_reg); if (security_val & CPU_BOOT_DEV_STS0_ENABLED) - dev_info(hdev->dev, "Device security status %#x\n", + dev_dbg(hdev->dev, "Device security status %#x\n", security_val); } @@ -806,7 +806,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg, hdev->asic_prop.hard_reset_done_by_fw = true; } - dev_info(hdev->dev, "Firmware hard-reset is %s\n", + dev_dbg(hdev->dev, "Firmware hard-reset is %s\n", hdev->asic_prop.hard_reset_done_by_fw ? "enabled" : "disabled"); dev_info(hdev->dev, "Successfully loaded firmware to device\n"); diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 568afe1b6849..aac3c9c5a2e0 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -1732,7 +1732,7 @@ static int gaudi_enable_msi_single(struct hl_device *hdev) { int rc, irq; - dev_info(hdev->dev, "Working in single MSI IRQ mode\n"); + dev_dbg(hdev->dev, "Working in single MSI IRQ mode\n"); irq = gaudi_pci_irq_vector(hdev, 0, false); rc = request_irq(irq, gaudi_irq_handler_single, 0, -- cgit v1.2.3 From 3e438b42a5fbf19e44c6c3e0481ed4b5672d8f42 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Sun, 22 Nov 2020 11:02:50 +0200 Subject: habanalabs: Add mask for CS type bits in CS flags hl_cs_sanity_checks() extracts the CS type bits of the CS flags, by masking out the non-type bits. To save the need for updating the function whenever new bits for non-type flags are added, add an explicit mask for the CS type bits. Signed-off-by: Tomer Tayar Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_submission.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index bd2f54399020..dc85ed6ab525 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -11,6 +11,9 @@ #include #include +#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \ + HL_CS_FLAGS_COLLECTIVE_WAIT) + /** * enum hl_cs_wait_status - cs wait status * @CS_WAIT_STATUS_BUSY: cs was not completed yet @@ -737,8 +740,7 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args) return -EBUSY; } - cs_type_flags = args->in.cs_flags & - ~(HL_CS_FLAGS_FORCE_RESTORE | HL_CS_FLAGS_TIMESTAMP); + cs_type_flags = args->in.cs_flags & HL_CS_FLAGS_TYPE_MASK; if (unlikely(cs_type_flags && !is_power_of_2(cs_type_flags))) { dev_err(hdev->dev, -- cgit v1.2.3 From f07486745442f6118ede18e2b52ee7be69cde5a4 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Sun, 2 Aug 2020 22:51:31 +0300 Subject: habanalabs: Modify the cs_cnt of a CB to be atomic Modify the CS counter of a CB to be atomic, so no locking is required when it is being modified or read. Signed-off-by: Tomer Tayar Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../misc/habanalabs/common/command_submission.c | 29 ++++++---------------- drivers/misc/habanalabs/common/debugfs.c | 2 +- drivers/misc/habanalabs/common/habanalabs.h | 4 +-- drivers/misc/habanalabs/gaudi/gaudi.c | 10 ++++---- drivers/misc/habanalabs/goya/goya.c | 4 +-- 5 files changed, 17 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c index dc85ed6ab525..beb482310a58 100644 --- a/drivers/misc/habanalabs/common/command_submission.c +++ b/drivers/misc/habanalabs/common/command_submission.c @@ -233,10 +233,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) job->patched_cb = parser.patched_cb; job->job_cb_size = parser.patched_cb_size; job->contains_dma_pkt = parser.contains_dma_pkt; - - spin_lock(&job->patched_cb->lock); - job->patched_cb->cs_cnt++; - spin_unlock(&job->patched_cb->lock); + atomic_inc(&job->patched_cb->cs_cnt); } /* @@ -244,9 +241,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) * original CB anymore because it was already parsed and * won't be accessed again for this CS */ - spin_lock(&job->user_cb->lock); - job->user_cb->cs_cnt--; - spin_unlock(&job->user_cb->lock); + atomic_dec(&job->user_cb->cs_cnt); hl_cb_put(job->user_cb); job->user_cb = NULL; } else if (!rc) { @@ -268,10 +263,7 @@ static void complete_job(struct hl_device *hdev, struct hl_cs_job *job) * created, so we need to check it's not NULL */ if (job->patched_cb) { - spin_lock(&job->patched_cb->lock); - job->patched_cb->cs_cnt--; - spin_unlock(&job->patched_cb->lock); - + atomic_dec(&job->patched_cb->cs_cnt); hl_cb_put(job->patched_cb); } } @@ -284,10 +276,7 @@ static void complete_job(struct hl_device *hdev, struct hl_cs_job *job) if (job->is_kernel_allocated_cb && ((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) || job->queue_type == QUEUE_TYPE_INT)) { - spin_lock(&job->user_cb->lock); - job->user_cb->cs_cnt--; - spin_unlock(&job->user_cb->lock); - + atomic_dec(&job->user_cb->cs_cnt); hl_cb_put(job->user_cb); } @@ -680,9 +669,7 @@ static struct hl_cb *get_cb_from_cs_chunk(struct hl_device *hdev, goto release_cb; } - spin_lock(&cb->lock); - cb->cs_cnt++; - spin_unlock(&cb->lock); + atomic_inc(&cb->cs_cnt); return cb; @@ -936,9 +923,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks, goto put_cs; release_cb: - spin_lock(&cb->lock); - cb->cs_cnt--; - spin_unlock(&cb->lock); + atomic_dec(&cb->cs_cnt); hl_cb_put(cb); free_cs_object: cs_rollback(hdev, cs); @@ -1133,7 +1118,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev, job->id = 0; job->cs = cs; job->user_cb = cb; - job->user_cb->cs_cnt++; + atomic_inc(&job->user_cb->cs_cnt); job->user_cb_size = cb_size; job->hw_queue_id = q_idx; diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c index b47a62da0b41..cef716643979 100644 --- a/drivers/misc/habanalabs/common/debugfs.c +++ b/drivers/misc/habanalabs/common/debugfs.c @@ -116,7 +116,7 @@ static int command_buffers_show(struct seq_file *s, void *data) " %03llu %d 0x%08x %d %d %d\n", cb->id, cb->ctx->asid, cb->size, kref_read(&cb->refcount), - cb->mmap, cb->cs_cnt); + cb->mmap, atomic_read(&cb->cs_cnt)); } spin_unlock(&dev_entry->cb_spinlock); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index 8e2d164d97e8..571eda6ef5ab 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -532,7 +532,7 @@ struct hl_cb_mgr { * @refcount: reference counter for usage of the CB. * @hdev: pointer to device this CB belongs to. * @ctx: pointer to the CB owner's context. - * @lock: spinlock to protect mmap/cs flows. + * @lock: spinlock to protect mmap flows. * @debugfs_list: node in debugfs list of command buffers. * @pool_list: node in pool list of command buffers. * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to @@ -561,7 +561,7 @@ struct hl_cb { dma_addr_t bus_address; u32 mmap_size; u32 size; - u32 cs_cnt; + atomic_t cs_cnt; u8 mmap; u8 is_pool; u8 is_internal; diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index aac3c9c5a2e0..1f1926607c5e 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -838,7 +838,7 @@ static int _gaudi_init_tpc_mem(struct hl_device *hdev, job->id = 0; job->user_cb = cb; - job->user_cb->cs_cnt++; + atomic_inc(&job->user_cb->cs_cnt); job->user_cb_size = cb_size; job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; job->patched_cb = job->user_cb; @@ -861,7 +861,7 @@ free_job: hl_userptr_delete_list(hdev, &job->userptr_list); hl_debugfs_remove_job(hdev, job); kfree(job); - cb->cs_cnt--; + atomic_dec(&cb->cs_cnt); release_cb: hl_cb_put(cb); @@ -1248,7 +1248,7 @@ static int gaudi_collective_wait_create_job(struct hl_device *hdev, job->id = 0; job->cs = cs; job->user_cb = cb; - job->user_cb->cs_cnt++; + atomic_inc(&job->user_cb->cs_cnt); job->user_cb_size = cb_size; job->hw_queue_id = queue_id; @@ -5570,7 +5570,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr, job->id = 0; job->user_cb = cb; - job->user_cb->cs_cnt++; + atomic_inc(&job->user_cb->cs_cnt); job->user_cb_size = cb_size; job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; job->patched_cb = job->user_cb; @@ -5581,7 +5581,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr, rc = gaudi_send_job_on_qman0(hdev, job); hl_debugfs_remove_job(hdev, job); kfree(job); - cb->cs_cnt--; + atomic_dec(&cb->cs_cnt); /* Verify DMA is OK */ err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 5e151610c1c8..3e5eb9e3d7bd 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -4811,7 +4811,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, job->id = 0; job->user_cb = cb; - job->user_cb->cs_cnt++; + atomic_inc(&job->user_cb->cs_cnt); job->user_cb_size = cb_size; job->hw_queue_id = GOYA_QUEUE_ID_DMA_0; job->patched_cb = job->user_cb; @@ -4823,7 +4823,7 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, hl_debugfs_remove_job(hdev, job); kfree(job); - cb->cs_cnt--; + atomic_dec(&cb->cs_cnt); release_cb: hl_cb_put(cb); -- cgit v1.2.3 From f44afb5b5a5d04448da843b2fe872e01669bc317 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Wed, 2 Sep 2020 13:43:32 +0300 Subject: habanalabs: Add CB IOCTL opcode to retrieve CB information Add a new CB IOCTL opcode that enables a user to query about a CB and get its usage count. Signed-off-by: Tomer Tayar Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/common/command_buffer.c | 38 +++++++++++++++++++++++++ include/uapi/misc/habanalabs.h | 15 ++++++++-- 2 files changed, 51 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c index 2856bb3423ee..6f6a904ab6ca 100644 --- a/drivers/misc/habanalabs/common/command_buffer.c +++ b/drivers/misc/habanalabs/common/command_buffer.c @@ -375,12 +375,43 @@ int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle) return rc; } +static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr, + u64 cb_handle, u32 *usage_cnt) +{ + struct hl_cb *cb; + u32 handle; + int rc = 0; + + /* The CB handle was given to user to do mmap, so need to shift it back + * to the value which was allocated by the IDR module. + */ + cb_handle >>= PAGE_SHIFT; + handle = (u32) cb_handle; + + spin_lock(&mgr->cb_lock); + + cb = idr_find(&mgr->cb_handles, handle); + if (!cb) { + dev_err(hdev->dev, + "CB info failed, no match to handle 0x%x\n", handle); + rc = -EINVAL; + goto out; + } + + *usage_cnt = atomic_read(&cb->cs_cnt); + +out: + spin_unlock(&mgr->cb_lock); + return rc; +} + int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) { union hl_cb_args *args = data; struct hl_device *hdev = hpriv->hdev; enum hl_device_status status; u64 handle = 0; + u32 usage_cnt = 0; int rc; if (!hl_device_operational(hdev, &status)) { @@ -413,6 +444,13 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) args->in.cb_handle); break; + case HL_CB_OP_INFO: + rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle, + &usage_cnt); + memset(args, 0, sizeof(*args)); + args->out.usage_cnt = usage_cnt; + break; + default: rc = -ENOTTY; break; diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 6eff4e05eccb..8c15a7d336a0 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -483,6 +483,8 @@ struct hl_info_args { #define HL_CB_OP_CREATE 0 /* Opcode to destroy previously created command buffer */ #define HL_CB_OP_DESTROY 1 +/* Opcode to retrieve information about a command buffer */ +#define HL_CB_OP_INFO 2 /* 2MB minus 32 bytes for 2xMSG_PROT */ #define HL_MAX_CB_SIZE (0x200000 - 32) @@ -506,8 +508,17 @@ struct hl_cb_in { }; struct hl_cb_out { - /* Handle of CB */ - __u64 cb_handle; + union { + /* Handle of CB */ + __u64 cb_handle; + + /* Information about CB */ + struct { + /* Usage count of CB */ + __u32 usage_cnt; + __u32 pad; + }; + }; }; union hl_cb_args { -- cgit v1.2.3 From bf4d01e107e3fdc03104fa0db96d8d2721529eb6 Mon Sep 17 00:00:00 2001 From: Wan Ahmad Zainie Date: Mon, 16 Nov 2020 20:08:31 +0800 Subject: phy: intel: Add Keem Bay USB PHY support Add PHY driver for the USB3.1 and USB 2.0 PHYs found on Intel Keem Bay SoC. This driver takes care of enabling the required USB susbsystem (USS) clocks, initializing the PHYs and turning on/off the USB dwc3 core. Signed-off-by: Wan Ahmad Zainie Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20201116120831.32641-3-wan.ahmad.zainie.wan.mohamad@intel.com Signed-off-by: Vinod Koul --- drivers/phy/intel/Kconfig | 12 ++ drivers/phy/intel/Makefile | 1 + drivers/phy/intel/phy-intel-keembay-usb.c | 301 ++++++++++++++++++++++++++++++ 3 files changed, 314 insertions(+) create mode 100644 drivers/phy/intel/phy-intel-keembay-usb.c (limited to 'drivers') diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig index 58ec695c92ec..f72c699a3e02 100644 --- a/drivers/phy/intel/Kconfig +++ b/drivers/phy/intel/Kconfig @@ -14,6 +14,18 @@ config PHY_INTEL_KEEMBAY_EMMC To compile this driver as a module, choose M here: the module will be called phy-keembay-emmc.ko. +config PHY_INTEL_KEEMBAY_USB + tristate "Intel Keem Bay USB PHY driver" + depends on ARCH_KEEMBAY || COMPILE_TEST + depends on HAS_IOMEM + select GENERIC_PHY + select REGMAP_MMIO + help + Choose this option if you have an Intel Keem Bay SoC. + + To compile this driver as a module, choose M here: the module + will be called phy-keembay-usb.ko. + config PHY_INTEL_LGM_COMBO bool "Intel Lightning Mountain ComboPHY driver" depends on X86 || COMPILE_TEST diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile index a5e0af5ccd75..14550981a707 100644 --- a/drivers/phy/intel/Makefile +++ b/drivers/phy/intel/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_PHY_INTEL_KEEMBAY_EMMC) += phy-intel-keembay-emmc.o +obj-$(CONFIG_PHY_INTEL_KEEMBAY_USB) += phy-intel-keembay-usb.o obj-$(CONFIG_PHY_INTEL_LGM_COMBO) += phy-intel-lgm-combo.o obj-$(CONFIG_PHY_INTEL_LGM_EMMC) += phy-intel-lgm-emmc.o diff --git a/drivers/phy/intel/phy-intel-keembay-usb.c b/drivers/phy/intel/phy-intel-keembay-usb.c new file mode 100644 index 000000000000..c8b05f7b2445 --- /dev/null +++ b/drivers/phy/intel/phy-intel-keembay-usb.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel Keem Bay USB PHY driver + * Copyright (C) 2020 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* USS (USB Subsystem) clock control registers */ +#define USS_CPR_CLK_EN 0x00 +#define USS_CPR_CLK_SET 0x04 +#define USS_CPR_CLK_CLR 0x08 +#define USS_CPR_RST_EN 0x10 +#define USS_CPR_RST_SET 0x14 +#define USS_CPR_RST_CLR 0x18 + +/* USS clock/reset bit fields */ +#define USS_CPR_PHY_TST BIT(6) +#define USS_CPR_LOW_JIT BIT(5) +#define USS_CPR_CORE BIT(4) +#define USS_CPR_SUSPEND BIT(3) +#define USS_CPR_ALT_REF BIT(2) +#define USS_CPR_REF BIT(1) +#define USS_CPR_SYS BIT(0) +#define USS_CPR_MASK GENMASK(6, 0) + +/* USS APB slave registers */ +#define USS_USB_CTRL_CFG0 0x10 +#define VCC_RESET_N_MASK BIT(31) + +#define USS_USB_PHY_CFG0 0x30 +#define POR_MASK BIT(15) +#define PHY_RESET_MASK BIT(14) +#define PHY_REF_USE_PAD_MASK BIT(5) + +#define USS_USB_PHY_CFG6 0x64 +#define PHY0_SRAM_EXT_LD_DONE_MASK BIT(23) + +#define USS_USB_PARALLEL_IF_CTRL 0xa0 +#define USB_PHY_CR_PARA_SEL_MASK BIT(2) + +#define USS_USB_TSET_SIGNALS_AND_GLOB 0xac +#define USB_PHY_CR_PARA_CLK_EN_MASK BIT(7) + +#define USS_USB_STATUS_REG 0xb8 +#define PHY0_SRAM_INIT_DONE_MASK BIT(3) + +#define USS_USB_TIEOFFS_CONSTANTS_REG1 0xc0 +#define IDDQ_ENABLE_MASK BIT(10) + +struct keembay_usb_phy { + struct device *dev; + struct regmap *regmap_cpr; + struct regmap *regmap_slv; +}; + +static const struct regmap_config keembay_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = USS_USB_TIEOFFS_CONSTANTS_REG1, +}; + +static int keembay_usb_clocks_on(struct keembay_usb_phy *priv) +{ + int ret; + + ret = regmap_update_bits(priv->regmap_cpr, USS_CPR_CLK_SET, + USS_CPR_MASK, USS_CPR_MASK); + if (ret) { + dev_err(priv->dev, "error clock set: %d\n", ret); + return ret; + } + + ret = regmap_update_bits(priv->regmap_cpr, USS_CPR_RST_SET, + USS_CPR_MASK, USS_CPR_MASK); + if (ret) { + dev_err(priv->dev, "error reset set: %d\n", ret); + return ret; + } + + ret = regmap_update_bits(priv->regmap_slv, + USS_USB_TIEOFFS_CONSTANTS_REG1, + IDDQ_ENABLE_MASK, + FIELD_PREP(IDDQ_ENABLE_MASK, 0)); + if (ret) { + dev_err(priv->dev, "error iddq disable: %d\n", ret); + return ret; + } + + /* Wait 30us to ensure all analog blocks are powered up. */ + usleep_range(30, 60); + + ret = regmap_update_bits(priv->regmap_slv, USS_USB_PHY_CFG0, + PHY_REF_USE_PAD_MASK, + FIELD_PREP(PHY_REF_USE_PAD_MASK, 1)); + if (ret) + dev_err(priv->dev, "error ref clock select: %d\n", ret); + + return ret; +} + +static int keembay_usb_core_off(struct keembay_usb_phy *priv) +{ + int ret; + + ret = regmap_update_bits(priv->regmap_slv, USS_USB_CTRL_CFG0, + VCC_RESET_N_MASK, + FIELD_PREP(VCC_RESET_N_MASK, 0)); + if (ret) + dev_err(priv->dev, "error core reset: %d\n", ret); + + return ret; +} + +static int keembay_usb_core_on(struct keembay_usb_phy *priv) +{ + int ret; + + ret = regmap_update_bits(priv->regmap_slv, USS_USB_CTRL_CFG0, + VCC_RESET_N_MASK, + FIELD_PREP(VCC_RESET_N_MASK, 1)); + if (ret) + dev_err(priv->dev, "error core on: %d\n", ret); + + return ret; +} + +static int keembay_usb_phys_on(struct keembay_usb_phy *priv) +{ + int ret; + + ret = regmap_update_bits(priv->regmap_slv, USS_USB_PHY_CFG0, + POR_MASK | PHY_RESET_MASK, + FIELD_PREP(POR_MASK | PHY_RESET_MASK, 0)); + if (ret) + dev_err(priv->dev, "error phys on: %d\n", ret); + + return ret; +} + +static int keembay_usb_phy_init(struct phy *phy) +{ + struct keembay_usb_phy *priv = phy_get_drvdata(phy); + u32 val; + int ret; + + ret = keembay_usb_core_off(priv); + if (ret) + return ret; + + /* + * According to Keem Bay datasheet, wait minimum 20us after clock + * enable before bringing PHYs out of reset. + */ + usleep_range(20, 40); + + ret = keembay_usb_phys_on(priv); + if (ret) + return ret; + + ret = regmap_update_bits(priv->regmap_slv, + USS_USB_TSET_SIGNALS_AND_GLOB, + USB_PHY_CR_PARA_CLK_EN_MASK, + FIELD_PREP(USB_PHY_CR_PARA_CLK_EN_MASK, 0)); + if (ret) { + dev_err(priv->dev, "error cr clock disable: %d\n", ret); + return ret; + } + + /* + * According to Keem Bay datasheet, wait 2us after disabling the + * clock into the USB 3.x parallel interface. + */ + udelay(2); + + ret = regmap_update_bits(priv->regmap_slv, + USS_USB_PARALLEL_IF_CTRL, + USB_PHY_CR_PARA_SEL_MASK, + FIELD_PREP(USB_PHY_CR_PARA_SEL_MASK, 1)); + if (ret) { + dev_err(priv->dev, "error cr select: %d\n", ret); + return ret; + } + + ret = regmap_update_bits(priv->regmap_slv, + USS_USB_TSET_SIGNALS_AND_GLOB, + USB_PHY_CR_PARA_CLK_EN_MASK, + FIELD_PREP(USB_PHY_CR_PARA_CLK_EN_MASK, 1)); + if (ret) { + dev_err(priv->dev, "error cr clock enable: %d\n", ret); + return ret; + } + + ret = regmap_read_poll_timeout(priv->regmap_slv, USS_USB_STATUS_REG, + val, val & PHY0_SRAM_INIT_DONE_MASK, + USEC_PER_MSEC, 10 * USEC_PER_MSEC); + if (ret) { + dev_err(priv->dev, "SRAM init not done: %d\n", ret); + return ret; + } + + ret = regmap_update_bits(priv->regmap_slv, USS_USB_PHY_CFG6, + PHY0_SRAM_EXT_LD_DONE_MASK, + FIELD_PREP(PHY0_SRAM_EXT_LD_DONE_MASK, 1)); + if (ret) { + dev_err(priv->dev, "error SRAM init done set: %d\n", ret); + return ret; + } + + /* + * According to Keem Bay datasheet, wait 20us after setting the + * SRAM load done bit, before releasing the controller reset. + */ + usleep_range(20, 40); + + return keembay_usb_core_on(priv); +} + +static const struct phy_ops ops = { + .init = keembay_usb_phy_init, + .owner = THIS_MODULE, +}; + +static int keembay_usb_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct keembay_usb_phy *priv; + struct phy *generic_phy; + struct phy_provider *phy_provider; + void __iomem *base; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + base = devm_platform_ioremap_resource_byname(pdev, "cpr-apb-base"); + if (IS_ERR(base)) + return PTR_ERR(base); + + priv->regmap_cpr = devm_regmap_init_mmio(dev, base, + &keembay_regmap_config); + if (IS_ERR(priv->regmap_cpr)) + return PTR_ERR(priv->regmap_cpr); + + base = devm_platform_ioremap_resource_byname(pdev, "slv-apb-base"); + if (IS_ERR(base)) + return PTR_ERR(base); + + priv->regmap_slv = devm_regmap_init_mmio(dev, base, + &keembay_regmap_config); + if (IS_ERR(priv->regmap_slv)) + return PTR_ERR(priv->regmap_slv); + + generic_phy = devm_phy_create(dev, dev->of_node, &ops); + if (IS_ERR(generic_phy)) + return dev_err_probe(dev, PTR_ERR(generic_phy), + "failed to create PHY\n"); + + phy_set_drvdata(generic_phy, priv); + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(phy_provider)) + return dev_err_probe(dev, PTR_ERR(phy_provider), + "failed to register phy provider\n"); + + /* Setup USB subsystem clocks */ + ret = keembay_usb_clocks_on(priv); + if (ret) + return ret; + + /* and turn on the DWC3 core, prior to DWC3 driver init. */ + return keembay_usb_core_on(priv); +} + +static const struct of_device_id keembay_usb_phy_dt_ids[] = { + { .compatible = "intel,keembay-usb-phy" }, + {} +}; +MODULE_DEVICE_TABLE(of, keembay_usb_phy_dt_ids); + +static struct platform_driver keembay_usb_phy_driver = { + .probe = keembay_usb_phy_probe, + .driver = { + .name = "keembay-usb-phy", + .of_match_table = keembay_usb_phy_dt_ids, + }, +}; +module_platform_driver(keembay_usb_phy_driver); + +MODULE_AUTHOR("Wan Ahmad Zainie "); +MODULE_DESCRIPTION("Intel Keem Bay USB PHY driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From d87da32372a03ce121fc65ccd2c9a43edf56b364 Mon Sep 17 00:00:00 2001 From: Sergio Paracuellos Date: Sat, 21 Nov 2020 16:50:35 +0100 Subject: phy: ralink: Add PHY driver for MT7621 PCIe PHY This patch adds a driver for the PCIe PHY of MT7621 SoC. Signed-off-by: Sergio Paracuellos Link: https://lore.kernel.org/r/20201121155037.21354-3-sergio.paracuellos@gmail.com Signed-off-by: Vinod Koul --- drivers/phy/ralink/Kconfig | 8 + drivers/phy/ralink/Makefile | 1 + drivers/phy/ralink/phy-mt7621-pci.c | 352 ++++++++++++++++++++++++++++++++++++ 3 files changed, 361 insertions(+) create mode 100644 drivers/phy/ralink/phy-mt7621-pci.c (limited to 'drivers') diff --git a/drivers/phy/ralink/Kconfig b/drivers/phy/ralink/Kconfig index da982c9cffb3..2fabb14d2998 100644 --- a/drivers/phy/ralink/Kconfig +++ b/drivers/phy/ralink/Kconfig @@ -2,6 +2,14 @@ # # PHY drivers for Ralink platforms. # +config PHY_MT7621_PCI + tristate "MediaTek MT7621 PCI PHY Driver" + depends on (RALINK || COMPILE_TEST) && OF + select GENERIC_PHY + select REGMAP_MMIO + help + Say 'Y' here to add support for MediaTek MT7621 PCI PHY driver, + config PHY_RALINK_USB tristate "Ralink USB PHY driver" depends on RALINK || COMPILE_TEST diff --git a/drivers/phy/ralink/Makefile b/drivers/phy/ralink/Makefile index d8d3ffcf0a15..cda2a4a7ca5e 100644 --- a/drivers/phy/ralink/Makefile +++ b/drivers/phy/ralink/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_PHY_MT7621_PCI) += phy-mt7621-pci.o obj-$(CONFIG_PHY_RALINK_USB) += phy-ralink-usb.o diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c new file mode 100644 index 000000000000..db79088d5362 --- /dev/null +++ b/drivers/phy/ralink/phy-mt7621-pci.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Mediatek MT7621 PCI PHY Driver + * Author: Sergio Paracuellos + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RG_PE1_PIPE_REG 0x02c +#define RG_PE1_PIPE_RST BIT(12) +#define RG_PE1_PIPE_CMD_FRC BIT(4) + +#define RG_P0_TO_P1_WIDTH 0x100 +#define RG_PE1_H_LCDDS_REG 0x49c +#define RG_PE1_H_LCDDS_PCW GENMASK(30, 0) + +#define RG_PE1_FRC_H_XTAL_REG 0x400 +#define RG_PE1_FRC_H_XTAL_TYPE BIT(8) +#define RG_PE1_H_XTAL_TYPE GENMASK(10, 9) + +#define RG_PE1_FRC_PHY_REG 0x000 +#define RG_PE1_FRC_PHY_EN BIT(4) +#define RG_PE1_PHY_EN BIT(5) + +#define RG_PE1_H_PLL_REG 0x490 +#define RG_PE1_H_PLL_BC GENMASK(23, 22) +#define RG_PE1_H_PLL_BP GENMASK(21, 18) +#define RG_PE1_H_PLL_IR GENMASK(15, 12) +#define RG_PE1_H_PLL_IC GENMASK(11, 8) +#define RG_PE1_H_PLL_PREDIV GENMASK(7, 6) +#define RG_PE1_PLL_DIVEN GENMASK(3, 1) + +#define RG_PE1_H_PLL_FBKSEL_REG 0x4bc +#define RG_PE1_H_PLL_FBKSEL GENMASK(5, 4) + +#define RG_PE1_H_LCDDS_SSC_PRD_REG 0x4a4 +#define RG_PE1_H_LCDDS_SSC_PRD GENMASK(15, 0) + +#define RG_PE1_H_LCDDS_SSC_DELTA_REG 0x4a8 +#define RG_PE1_H_LCDDS_SSC_DELTA GENMASK(11, 0) +#define RG_PE1_H_LCDDS_SSC_DELTA1 GENMASK(27, 16) + +#define RG_PE1_LCDDS_CLK_PH_INV_REG 0x4a0 +#define RG_PE1_LCDDS_CLK_PH_INV BIT(5) + +#define RG_PE1_H_PLL_BR_REG 0x4ac +#define RG_PE1_H_PLL_BR GENMASK(18, 16) + +#define RG_PE1_MSTCKDIV_REG 0x414 +#define RG_PE1_MSTCKDIV GENMASK(7, 6) + +#define RG_PE1_FRC_MSTCKDIV BIT(5) + +#define XTAL_MASK GENMASK(7, 6) + +#define MAX_PHYS 2 + +/** + * struct mt7621_pci_phy - Mt7621 Pcie PHY core + * @dev: pointer to device + * @regmap: kernel regmap pointer + * @phy: pointer to the kernel PHY device + * @port_base: base register + * @has_dual_port: if the phy has dual ports. + * @bypass_pipe_rst: mark if 'mt7621_bypass_pipe_rst' + * needs to be executed. Depends on chip revision. + */ +struct mt7621_pci_phy { + struct device *dev; + struct regmap *regmap; + struct phy *phy; + void __iomem *port_base; + bool has_dual_port; + bool bypass_pipe_rst; +}; + +static inline void mt7621_phy_rmw(struct mt7621_pci_phy *phy, + u32 reg, u32 clr, u32 set) +{ + u32 val; + + /* + * We cannot use 'regmap_write_bits' here because internally + * 'set' is masked before is set to the value that will be + * written to the register. That way results in no reliable + * pci setup. Avoid to mask 'set' before set value to 'val' + * completely avoid the problem. + */ + regmap_read(phy->regmap, reg, &val); + val &= ~clr; + val |= set; + regmap_write(phy->regmap, reg, val); +} + +static void mt7621_bypass_pipe_rst(struct mt7621_pci_phy *phy) +{ + mt7621_phy_rmw(phy, RG_PE1_PIPE_REG, 0, RG_PE1_PIPE_RST); + mt7621_phy_rmw(phy, RG_PE1_PIPE_REG, 0, RG_PE1_PIPE_CMD_FRC); + + if (phy->has_dual_port) { + mt7621_phy_rmw(phy, RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH, + 0, RG_PE1_PIPE_RST); + mt7621_phy_rmw(phy, RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH, + 0, RG_PE1_PIPE_CMD_FRC); + } +} + +static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy) +{ + struct device *dev = phy->dev; + u32 xtal_mode; + + xtal_mode = FIELD_GET(XTAL_MASK, rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0)); + + /* Set PCIe Port PHY to disable SSC */ + /* Debug Xtal Type */ + mt7621_phy_rmw(phy, RG_PE1_FRC_H_XTAL_REG, + RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE, + RG_PE1_FRC_H_XTAL_TYPE | + FIELD_PREP(RG_PE1_H_XTAL_TYPE, 0x00)); + + /* disable port */ + mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG, RG_PE1_PHY_EN, + RG_PE1_FRC_PHY_EN); + + if (phy->has_dual_port) { + mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH, + RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN); + } + + if (xtal_mode <= 5 && xtal_mode >= 3) { /* 40MHz Xtal */ + /* Set Pre-divider ratio (for host mode) */ + mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, RG_PE1_H_PLL_PREDIV, + FIELD_PREP(RG_PE1_H_PLL_PREDIV, 0x01)); + + dev_dbg(dev, "Xtal is 40MHz\n"); + } else if (xtal_mode >= 6) { /* 25MHz Xal */ + mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, RG_PE1_H_PLL_PREDIV, + FIELD_PREP(RG_PE1_H_PLL_PREDIV, 0x00)); + + /* Select feedback clock */ + mt7621_phy_rmw(phy, RG_PE1_H_PLL_FBKSEL_REG, + RG_PE1_H_PLL_FBKSEL, + FIELD_PREP(RG_PE1_H_PLL_FBKSEL, 0x01)); + + /* DDS NCPO PCW (for host mode) */ + mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_PRD_REG, + RG_PE1_H_LCDDS_SSC_PRD, + FIELD_PREP(RG_PE1_H_LCDDS_SSC_PRD, 0x00)); + + /* DDS SSC dither period control */ + mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_PRD_REG, + RG_PE1_H_LCDDS_SSC_PRD, + FIELD_PREP(RG_PE1_H_LCDDS_SSC_PRD, 0x18d)); + + /* DDS SSC dither amplitude control */ + mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_DELTA_REG, + RG_PE1_H_LCDDS_SSC_DELTA | + RG_PE1_H_LCDDS_SSC_DELTA1, + FIELD_PREP(RG_PE1_H_LCDDS_SSC_DELTA, 0x4a) | + FIELD_PREP(RG_PE1_H_LCDDS_SSC_DELTA1, 0x4a)); + + dev_dbg(dev, "Xtal is 25MHz\n"); + } else { /* 20MHz Xtal */ + mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, RG_PE1_H_PLL_PREDIV, + FIELD_PREP(RG_PE1_H_PLL_PREDIV, 0x00)); + + dev_dbg(dev, "Xtal is 20MHz\n"); + } + + /* DDS clock inversion */ + mt7621_phy_rmw(phy, RG_PE1_LCDDS_CLK_PH_INV_REG, + RG_PE1_LCDDS_CLK_PH_INV, RG_PE1_LCDDS_CLK_PH_INV); + + /* Set PLL bits */ + mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, + RG_PE1_H_PLL_BC | RG_PE1_H_PLL_BP | RG_PE1_H_PLL_IR | + RG_PE1_H_PLL_IC | RG_PE1_PLL_DIVEN, + FIELD_PREP(RG_PE1_H_PLL_BC, 0x02) | + FIELD_PREP(RG_PE1_H_PLL_BP, 0x06) | + FIELD_PREP(RG_PE1_H_PLL_IR, 0x02) | + FIELD_PREP(RG_PE1_H_PLL_IC, 0x01) | + FIELD_PREP(RG_PE1_PLL_DIVEN, 0x02)); + + mt7621_phy_rmw(phy, RG_PE1_H_PLL_BR_REG, RG_PE1_H_PLL_BR, + FIELD_PREP(RG_PE1_H_PLL_BR, 0x00)); + + if (xtal_mode <= 5 && xtal_mode >= 3) { /* 40MHz Xtal */ + /* set force mode enable of da_pe1_mstckdiv */ + mt7621_phy_rmw(phy, RG_PE1_MSTCKDIV_REG, + RG_PE1_MSTCKDIV | RG_PE1_FRC_MSTCKDIV, + FIELD_PREP(RG_PE1_MSTCKDIV, 0x01) | + RG_PE1_FRC_MSTCKDIV); + } +} + +static int mt7621_pci_phy_init(struct phy *phy) +{ + struct mt7621_pci_phy *mphy = phy_get_drvdata(phy); + + if (mphy->bypass_pipe_rst) + mt7621_bypass_pipe_rst(mphy); + + mt7621_set_phy_for_ssc(mphy); + + return 0; +} + +static int mt7621_pci_phy_power_on(struct phy *phy) +{ + struct mt7621_pci_phy *mphy = phy_get_drvdata(phy); + + /* Enable PHY and disable force mode */ + mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG, + RG_PE1_FRC_PHY_EN, RG_PE1_PHY_EN); + + if (mphy->has_dual_port) { + mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH, + RG_PE1_FRC_PHY_EN, RG_PE1_PHY_EN); + } + + return 0; +} + +static int mt7621_pci_phy_power_off(struct phy *phy) +{ + struct mt7621_pci_phy *mphy = phy_get_drvdata(phy); + + /* Disable PHY */ + mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG, + RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN); + + if (mphy->has_dual_port) { + mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH, + RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN); + } + + return 0; +} + +static int mt7621_pci_phy_exit(struct phy *phy) +{ + return 0; +} + +static const struct phy_ops mt7621_pci_phy_ops = { + .init = mt7621_pci_phy_init, + .exit = mt7621_pci_phy_exit, + .power_on = mt7621_pci_phy_power_on, + .power_off = mt7621_pci_phy_power_off, + .owner = THIS_MODULE, +}; + +static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct mt7621_pci_phy *mt7621_phy = dev_get_drvdata(dev); + + if (WARN_ON(args->args[0] >= MAX_PHYS)) + return ERR_PTR(-ENODEV); + + mt7621_phy->has_dual_port = args->args[0]; + + dev_info(dev, "PHY for 0x%08x (dual port = %d)\n", + (unsigned int)mt7621_phy->port_base, mt7621_phy->has_dual_port); + + return mt7621_phy->phy; +} + +static const struct soc_device_attribute mt7621_pci_quirks_match[] = { + { .soc_id = "mt7621", .revision = "E2" } +}; + +static const struct regmap_config mt7621_pci_phy_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = 0x700, +}; + +static int mt7621_pci_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct soc_device_attribute *attr; + struct phy_provider *provider; + struct mt7621_pci_phy *phy; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + + attr = soc_device_match(mt7621_pci_quirks_match); + if (attr) + phy->bypass_pipe_rst = true; + + phy->dev = dev; + platform_set_drvdata(pdev, phy); + + phy->port_base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(phy->port_base)) { + dev_err(dev, "failed to remap phy regs\n"); + return PTR_ERR(phy->port_base); + } + + phy->regmap = devm_regmap_init_mmio(phy->dev, phy->port_base, + &mt7621_pci_phy_regmap_config); + if (IS_ERR(phy->regmap)) + return PTR_ERR(phy->regmap); + + phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops); + if (IS_ERR(phy)) { + dev_err(dev, "failed to create phy\n"); + return PTR_ERR(phy); + } + + phy_set_drvdata(phy->phy, phy); + + provider = devm_of_phy_provider_register(dev, mt7621_pcie_phy_of_xlate); + + return PTR_ERR_OR_ZERO(provider); +} + +static const struct of_device_id mt7621_pci_phy_ids[] = { + { .compatible = "mediatek,mt7621-pci-phy" }, + {}, +}; +MODULE_DEVICE_TABLE(of, mt7621_pci_ids); + +static struct platform_driver mt7621_pci_phy_driver = { + .probe = mt7621_pci_phy_probe, + .driver = { + .name = "mt7621-pci-phy", + .of_match_table = of_match_ptr(mt7621_pci_phy_ids), + }, +}; + +builtin_platform_driver(mt7621_pci_phy_driver); + +MODULE_AUTHOR("Sergio Paracuellos "); +MODULE_DESCRIPTION("MediaTek MT7621 PCIe PHY driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 53e7c92c7fa0077f507951206bb329aadab785fb Mon Sep 17 00:00:00 2001 From: Sergio Paracuellos Date: Sat, 21 Nov 2020 16:50:37 +0100 Subject: staging: mt7621-pci-phy: remove driver from staging Remove this driver from staging because it has been moved into its properly place in the kernel. Signed-off-by: Sergio Paracuellos Acked-by: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20201121155037.21354-5-sergio.paracuellos@gmail.com Signed-off-by: Vinod Koul --- drivers/staging/Kconfig | 2 - drivers/staging/Makefile | 1 - drivers/staging/mt7621-pci-phy/Kconfig | 8 - drivers/staging/mt7621-pci-phy/Makefile | 2 - drivers/staging/mt7621-pci-phy/TODO | 4 - .../mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml | 36 -- drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c | 373 --------------------- 7 files changed, 426 deletions(-) delete mode 100644 drivers/staging/mt7621-pci-phy/Kconfig delete mode 100644 drivers/staging/mt7621-pci-phy/Makefile delete mode 100644 drivers/staging/mt7621-pci-phy/TODO delete mode 100644 drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml delete mode 100644 drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c (limited to 'drivers') diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 2d0310448eba..8a03ba56def5 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -94,8 +94,6 @@ source "drivers/staging/pi433/Kconfig" source "drivers/staging/mt7621-pci/Kconfig" -source "drivers/staging/mt7621-pci-phy/Kconfig" - source "drivers/staging/mt7621-pinctrl/Kconfig" source "drivers/staging/mt7621-dma/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 757a892ab5b9..39319161301c 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -37,7 +37,6 @@ obj-$(CONFIG_GREYBUS) += greybus/ obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/ obj-$(CONFIG_PI433) += pi433/ obj-$(CONFIG_PCI_MT7621) += mt7621-pci/ -obj-$(CONFIG_PCI_MT7621_PHY) += mt7621-pci-phy/ obj-$(CONFIG_PINCTRL_RT2880) += mt7621-pinctrl/ obj-$(CONFIG_SOC_MT7621) += mt7621-dma/ obj-$(CONFIG_DMA_RALINK) += ralink-gdma/ diff --git a/drivers/staging/mt7621-pci-phy/Kconfig b/drivers/staging/mt7621-pci-phy/Kconfig deleted file mode 100644 index 263e0a91c424..000000000000 --- a/drivers/staging/mt7621-pci-phy/Kconfig +++ /dev/null @@ -1,8 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -config PCI_MT7621_PHY - tristate "MediaTek MT7621 PCI PHY Driver" - depends on RALINK && OF - select GENERIC_PHY - help - Say 'Y' here to add support for MediaTek MT7621 PCI PHY driver, - diff --git a/drivers/staging/mt7621-pci-phy/Makefile b/drivers/staging/mt7621-pci-phy/Makefile deleted file mode 100644 index b4d99b9119e0..000000000000 --- a/drivers/staging/mt7621-pci-phy/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_PCI_MT7621_PHY) += pci-mt7621-phy.o diff --git a/drivers/staging/mt7621-pci-phy/TODO b/drivers/staging/mt7621-pci-phy/TODO deleted file mode 100644 index a255e8f753eb..000000000000 --- a/drivers/staging/mt7621-pci-phy/TODO +++ /dev/null @@ -1,4 +0,0 @@ - -- general code review and cleanup - -Cc: NeilBrown and Sergio Paracuellos diff --git a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml b/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml deleted file mode 100644 index cf32bbc45b5d..000000000000 --- a/drivers/staging/mt7621-pci-phy/mediatek,mt7621-pci-phy.yaml +++ /dev/null @@ -1,36 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -%YAML 1.2 ---- -$id: "http://devicetree.org/schemas/phy/mediatek,mt7621-pci-phy.yaml#" -$schema: "http://devicetree.org/meta-schemas/core.yaml#" - -title: Mediatek Mt7621 PCIe PHY Device Tree Bindings - -maintainers: - - Sergio Paracuellos - -properties: - compatible: - const: mediatek,mt7621-pci-phy - - reg: - maxItems: 1 - - "#phy-cells": - const: 1 - description: selects if the phy is dual-ported - -required: - - compatible - - reg - - "#phy-cells" - -additionalProperties: false - -examples: - - | - pcie0_phy: pcie-phy@1e149000 { - compatible = "mediatek,mt7621-pci-phy"; - reg = <0x1e149000 0x0700>; - #phy-cells = <1>; - }; diff --git a/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c b/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c deleted file mode 100644 index 57743fd22be4..000000000000 --- a/drivers/staging/mt7621-pci-phy/pci-mt7621-phy.c +++ /dev/null @@ -1,373 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Mediatek MT7621 PCI PHY Driver - * Author: Sergio Paracuellos - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define RG_PE1_PIPE_REG 0x02c -#define RG_PE1_PIPE_RST BIT(12) -#define RG_PE1_PIPE_CMD_FRC BIT(4) - -#define RG_P0_TO_P1_WIDTH 0x100 -#define RG_PE1_H_LCDDS_REG 0x49c -#define RG_PE1_H_LCDDS_PCW GENMASK(30, 0) -#define RG_PE1_H_LCDDS_PCW_VAL(x) ((0x7fffffff & (x)) << 0) - -#define RG_PE1_FRC_H_XTAL_REG 0x400 -#define RG_PE1_FRC_H_XTAL_TYPE BIT(8) -#define RG_PE1_H_XTAL_TYPE GENMASK(10, 9) -#define RG_PE1_H_XTAL_TYPE_VAL(x) ((0x3 & (x)) << 9) - -#define RG_PE1_FRC_PHY_REG 0x000 -#define RG_PE1_FRC_PHY_EN BIT(4) -#define RG_PE1_PHY_EN BIT(5) - -#define RG_PE1_H_PLL_REG 0x490 -#define RG_PE1_H_PLL_BC GENMASK(23, 22) -#define RG_PE1_H_PLL_BC_VAL(x) ((0x3 & (x)) << 22) -#define RG_PE1_H_PLL_BP GENMASK(21, 18) -#define RG_PE1_H_PLL_BP_VAL(x) ((0xf & (x)) << 18) -#define RG_PE1_H_PLL_IR GENMASK(15, 12) -#define RG_PE1_H_PLL_IR_VAL(x) ((0xf & (x)) << 12) -#define RG_PE1_H_PLL_IC GENMASK(11, 8) -#define RG_PE1_H_PLL_IC_VAL(x) ((0xf & (x)) << 8) -#define RG_PE1_H_PLL_PREDIV GENMASK(7, 6) -#define RG_PE1_H_PLL_PREDIV_VAL(x) ((0x3 & (x)) << 6) -#define RG_PE1_PLL_DIVEN GENMASK(3, 1) -#define RG_PE1_PLL_DIVEN_VAL(x) ((0x7 & (x)) << 1) - -#define RG_PE1_H_PLL_FBKSEL_REG 0x4bc -#define RG_PE1_H_PLL_FBKSEL GENMASK(5, 4) -#define RG_PE1_H_PLL_FBKSEL_VAL(x) ((0x3 & (x)) << 4) - -#define RG_PE1_H_LCDDS_SSC_PRD_REG 0x4a4 -#define RG_PE1_H_LCDDS_SSC_PRD GENMASK(15, 0) -#define RG_PE1_H_LCDDS_SSC_PRD_VAL(x) ((0xffff & (x)) << 0) - -#define RG_PE1_H_LCDDS_SSC_DELTA_REG 0x4a8 -#define RG_PE1_H_LCDDS_SSC_DELTA GENMASK(11, 0) -#define RG_PE1_H_LCDDS_SSC_DELTA_VAL(x) ((0xfff & (x)) << 0) -#define RG_PE1_H_LCDDS_SSC_DELTA1 GENMASK(27, 16) -#define RG_PE1_H_LCDDS_SSC_DELTA1_VAL(x) ((0xff & (x)) << 16) - -#define RG_PE1_LCDDS_CLK_PH_INV_REG 0x4a0 -#define RG_PE1_LCDDS_CLK_PH_INV BIT(5) - -#define RG_PE1_H_PLL_BR_REG 0x4ac -#define RG_PE1_H_PLL_BR GENMASK(18, 16) -#define RG_PE1_H_PLL_BR_VAL(x) ((0x7 & (x)) << 16) - -#define RG_PE1_MSTCKDIV_REG 0x414 -#define RG_PE1_MSTCKDIV GENMASK(7, 6) -#define RG_PE1_MSTCKDIV_VAL(x) ((0x3 & (x)) << 6) - -#define RG_PE1_FRC_MSTCKDIV BIT(5) - -#define XTAL_MODE_SEL_SHIFT 6 -#define XTAL_MODE_SEL_MASK 0x7 - -#define MAX_PHYS 2 - -/** - * struct mt7621_pci_phy - Mt7621 Pcie PHY core - * @dev: pointer to device - * @regmap: kernel regmap pointer - * @phy: pointer to the kernel PHY device - * @port_base: base register - * @has_dual_port: if the phy has dual ports. - * @bypass_pipe_rst: mark if 'mt7621_bypass_pipe_rst' - * needs to be executed. Depends on chip revision. - */ -struct mt7621_pci_phy { - struct device *dev; - struct regmap *regmap; - struct phy *phy; - void __iomem *port_base; - bool has_dual_port; - bool bypass_pipe_rst; -}; - -static inline u32 phy_read(struct mt7621_pci_phy *phy, u32 reg) -{ - u32 val; - - regmap_read(phy->regmap, reg, &val); - - return val; -} - -static inline void phy_write(struct mt7621_pci_phy *phy, u32 val, u32 reg) -{ - regmap_write(phy->regmap, reg, val); -} - -static inline void mt7621_phy_rmw(struct mt7621_pci_phy *phy, - u32 reg, u32 clr, u32 set) -{ - u32 val = phy_read(phy, reg); - - val &= ~clr; - val |= set; - phy_write(phy, val, reg); -} - -static void mt7621_bypass_pipe_rst(struct mt7621_pci_phy *phy) -{ - mt7621_phy_rmw(phy, RG_PE1_PIPE_REG, 0, RG_PE1_PIPE_RST); - mt7621_phy_rmw(phy, RG_PE1_PIPE_REG, 0, RG_PE1_PIPE_CMD_FRC); - - if (phy->has_dual_port) { - mt7621_phy_rmw(phy, RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH, - 0, RG_PE1_PIPE_RST); - mt7621_phy_rmw(phy, RG_PE1_PIPE_REG + RG_P0_TO_P1_WIDTH, - 0, RG_PE1_PIPE_CMD_FRC); - } -} - -static void mt7621_set_phy_for_ssc(struct mt7621_pci_phy *phy) -{ - struct device *dev = phy->dev; - u32 xtal_mode; - - xtal_mode = (rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG0) - >> XTAL_MODE_SEL_SHIFT) & XTAL_MODE_SEL_MASK; - - /* Set PCIe Port PHY to disable SSC */ - /* Debug Xtal Type */ - mt7621_phy_rmw(phy, RG_PE1_FRC_H_XTAL_REG, - RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE, - RG_PE1_FRC_H_XTAL_TYPE | RG_PE1_H_XTAL_TYPE_VAL(0x00)); - - /* disable port */ - mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG, - RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN); - - if (phy->has_dual_port) { - mt7621_phy_rmw(phy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH, - RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN); - } - - if (xtal_mode <= 5 && xtal_mode >= 3) { /* 40MHz Xtal */ - /* Set Pre-divider ratio (for host mode) */ - mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, - RG_PE1_H_PLL_PREDIV, - RG_PE1_H_PLL_PREDIV_VAL(0x01)); - dev_info(dev, "Xtal is 40MHz\n"); - } else if (xtal_mode >= 6) { /* 25MHz Xal */ - mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, - RG_PE1_H_PLL_PREDIV, - RG_PE1_H_PLL_PREDIV_VAL(0x00)); - /* Select feedback clock */ - mt7621_phy_rmw(phy, RG_PE1_H_PLL_FBKSEL_REG, - RG_PE1_H_PLL_FBKSEL, - RG_PE1_H_PLL_FBKSEL_VAL(0x01)); - /* DDS NCPO PCW (for host mode) */ - mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_PRD_REG, - RG_PE1_H_LCDDS_SSC_PRD, - RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18000000)); - /* DDS SSC dither period control */ - mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_PRD_REG, - RG_PE1_H_LCDDS_SSC_PRD, - RG_PE1_H_LCDDS_SSC_PRD_VAL(0x18d)); - /* DDS SSC dither amplitude control */ - mt7621_phy_rmw(phy, RG_PE1_H_LCDDS_SSC_DELTA_REG, - RG_PE1_H_LCDDS_SSC_DELTA | - RG_PE1_H_LCDDS_SSC_DELTA1, - RG_PE1_H_LCDDS_SSC_DELTA_VAL(0x4a) | - RG_PE1_H_LCDDS_SSC_DELTA1_VAL(0x4a)); - dev_info(dev, "Xtal is 25MHz\n"); - } else { /* 20MHz Xtal */ - mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, - RG_PE1_H_PLL_PREDIV, - RG_PE1_H_PLL_PREDIV_VAL(0x00)); - - dev_info(dev, "Xtal is 20MHz\n"); - } - - /* DDS clock inversion */ - mt7621_phy_rmw(phy, RG_PE1_LCDDS_CLK_PH_INV_REG, - RG_PE1_LCDDS_CLK_PH_INV, RG_PE1_LCDDS_CLK_PH_INV); - - /* Set PLL bits */ - mt7621_phy_rmw(phy, RG_PE1_H_PLL_REG, - RG_PE1_H_PLL_BC | RG_PE1_H_PLL_BP | RG_PE1_H_PLL_IR | - RG_PE1_H_PLL_IC | RG_PE1_PLL_DIVEN, - RG_PE1_H_PLL_BC_VAL(0x02) | RG_PE1_H_PLL_BP_VAL(0x06) | - RG_PE1_H_PLL_IR_VAL(0x02) | RG_PE1_H_PLL_IC_VAL(0x01) | - RG_PE1_PLL_DIVEN_VAL(0x02)); - - mt7621_phy_rmw(phy, RG_PE1_H_PLL_BR_REG, - RG_PE1_H_PLL_BR, RG_PE1_H_PLL_BR_VAL(0x00)); - - if (xtal_mode <= 5 && xtal_mode >= 3) { /* 40MHz Xtal */ - /* set force mode enable of da_pe1_mstckdiv */ - mt7621_phy_rmw(phy, RG_PE1_MSTCKDIV_REG, - RG_PE1_MSTCKDIV | RG_PE1_FRC_MSTCKDIV, - RG_PE1_MSTCKDIV_VAL(0x01) | RG_PE1_FRC_MSTCKDIV); - } -} - -static int mt7621_pci_phy_init(struct phy *phy) -{ - struct mt7621_pci_phy *mphy = phy_get_drvdata(phy); - - if (mphy->bypass_pipe_rst) - mt7621_bypass_pipe_rst(mphy); - - mt7621_set_phy_for_ssc(mphy); - - return 0; -} - -static int mt7621_pci_phy_power_on(struct phy *phy) -{ - struct mt7621_pci_phy *mphy = phy_get_drvdata(phy); - - /* Enable PHY and disable force mode */ - mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG, - RG_PE1_FRC_PHY_EN, RG_PE1_PHY_EN); - - if (mphy->has_dual_port) { - mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH, - RG_PE1_FRC_PHY_EN, RG_PE1_PHY_EN); - } - - return 0; -} - -static int mt7621_pci_phy_power_off(struct phy *phy) -{ - struct mt7621_pci_phy *mphy = phy_get_drvdata(phy); - - /* Disable PHY */ - mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG, - RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN); - - if (mphy->has_dual_port) { - mt7621_phy_rmw(mphy, RG_PE1_FRC_PHY_REG + RG_P0_TO_P1_WIDTH, - RG_PE1_PHY_EN, RG_PE1_FRC_PHY_EN); - } - - return 0; -} - -static int mt7621_pci_phy_exit(struct phy *phy) -{ - return 0; -} - -static const struct phy_ops mt7621_pci_phy_ops = { - .init = mt7621_pci_phy_init, - .exit = mt7621_pci_phy_exit, - .power_on = mt7621_pci_phy_power_on, - .power_off = mt7621_pci_phy_power_off, - .owner = THIS_MODULE, -}; - -static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev, - struct of_phandle_args *args) -{ - struct mt7621_pci_phy *mt7621_phy = dev_get_drvdata(dev); - - if (WARN_ON(args->args[0] >= MAX_PHYS)) - return ERR_PTR(-ENODEV); - - mt7621_phy->has_dual_port = args->args[0]; - - dev_info(dev, "PHY for 0x%08x (dual port = %d)\n", - (unsigned int)mt7621_phy->port_base, mt7621_phy->has_dual_port); - - return mt7621_phy->phy; -} - -static const struct soc_device_attribute mt7621_pci_quirks_match[] = { - { .soc_id = "mt7621", .revision = "E2" } -}; - -static const struct regmap_config mt7621_pci_phy_regmap_config = { - .reg_bits = 32, - .val_bits = 32, - .reg_stride = 4, - .max_register = 0x700, -}; - -static int mt7621_pci_phy_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - const struct soc_device_attribute *attr; - struct phy_provider *provider; - struct mt7621_pci_phy *phy; - struct resource *res; - - phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); - if (!phy) - return -ENOMEM; - - attr = soc_device_match(mt7621_pci_quirks_match); - if (attr) - phy->bypass_pipe_rst = true; - - phy->dev = dev; - platform_set_drvdata(pdev, phy); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "failed to get address resource\n"); - return -ENXIO; - } - - phy->port_base = devm_ioremap_resource(dev, res); - if (IS_ERR(phy->port_base)) { - dev_err(dev, "failed to remap phy regs\n"); - return PTR_ERR(phy->port_base); - } - - phy->regmap = devm_regmap_init_mmio(phy->dev, phy->port_base, - &mt7621_pci_phy_regmap_config); - if (IS_ERR(phy->regmap)) - return PTR_ERR(phy->regmap); - - phy->phy = devm_phy_create(dev, dev->of_node, &mt7621_pci_phy_ops); - if (IS_ERR(phy)) { - dev_err(dev, "failed to create phy\n"); - return PTR_ERR(phy); - } - - phy_set_drvdata(phy->phy, phy); - - provider = devm_of_phy_provider_register(dev, mt7621_pcie_phy_of_xlate); - - return PTR_ERR_OR_ZERO(provider); -} - -static const struct of_device_id mt7621_pci_phy_ids[] = { - { .compatible = "mediatek,mt7621-pci-phy" }, - {}, -}; -MODULE_DEVICE_TABLE(of, mt7621_pci_ids); - -static struct platform_driver mt7621_pci_phy_driver = { - .probe = mt7621_pci_phy_probe, - .driver = { - .name = "mt7621-pci-phy", - .of_match_table = of_match_ptr(mt7621_pci_phy_ids), - }, -}; - -builtin_platform_driver(mt7621_pci_phy_driver); - -MODULE_AUTHOR("Sergio Paracuellos "); -MODULE_DESCRIPTION("MediaTek MT7621 PCIe PHY driver"); -MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 81b534f7e9b27309b99ac2b870518c17381d2912 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Fri, 20 Nov 2020 09:56:36 +0100 Subject: phy: samsung: Add support for the Exynos5420 variant of the USB2 PHY Exynos5420 differs a bit from Exynos5250 in USB2 PHY related registers in the PMU region. Add a variant for the Exynos5420 case. Till now, USB2 PHY worked only because the bootloader enabled the PHY, but then driver messed USB 3.0 DRD related registers during the suspend/resume cycle. Signed-off-by: Marek Szyprowski Acked-by: Krzysztof Kozlowski Link: https://lore.kernel.org/r/20201120085637.7299-2-m.szyprowski@samsung.com Signed-off-by: Vinod Koul --- .../devicetree/bindings/phy/samsung-phy.txt | 1 + drivers/phy/samsung/Kconfig | 7 +++- drivers/phy/samsung/phy-exynos5250-usb2.c | 48 +++++++++++++++------- drivers/phy/samsung/phy-samsung-usb2.c | 6 +++ drivers/phy/samsung/phy-samsung-usb2.h | 1 + 5 files changed, 48 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/phy/samsung-phy.txt b/Documentation/devicetree/bindings/phy/samsung-phy.txt index 7510830a79bd..8f51aee91101 100644 --- a/Documentation/devicetree/bindings/phy/samsung-phy.txt +++ b/Documentation/devicetree/bindings/phy/samsung-phy.txt @@ -47,6 +47,7 @@ Required properties: - "samsung,exynos4210-usb2-phy" - "samsung,exynos4x12-usb2-phy" - "samsung,exynos5250-usb2-phy" + - "samsung,exynos5420-usb2-phy" - "samsung,s5pv210-usb2-phy" - reg : a list of registers used by phy driver - first and obligatory is the location of phy modules registers diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig index e20d2fcc9fe7..0f51d3bf38cc 100644 --- a/drivers/phy/samsung/Kconfig +++ b/drivers/phy/samsung/Kconfig @@ -64,7 +64,12 @@ config PHY_EXYNOS4X12_USB2 config PHY_EXYNOS5250_USB2 bool depends on PHY_SAMSUNG_USB2 - default SOC_EXYNOS5250 || SOC_EXYNOS5420 + default SOC_EXYNOS5250 + +config PHY_EXYNOS5420_USB2 + bool + depends on PHY_SAMSUNG_USB2 + default SOC_EXYNOS5420 config PHY_S5PV210_USB2 bool "Support for S5PV210" diff --git a/drivers/phy/samsung/phy-exynos5250-usb2.c b/drivers/phy/samsung/phy-exynos5250-usb2.c index 4f53b711fd6f..e198010e1bfd 100644 --- a/drivers/phy/samsung/phy-exynos5250-usb2.c +++ b/drivers/phy/samsung/phy-exynos5250-usb2.c @@ -117,9 +117,9 @@ /* Isolation, configured in the power management unit */ #define EXYNOS_5250_USB_ISOL_OTG_OFFSET 0x704 -#define EXYNOS_5250_USB_ISOL_OTG BIT(0) #define EXYNOS_5250_USB_ISOL_HOST_OFFSET 0x708 -#define EXYNOS_5250_USB_ISOL_HOST BIT(0) +#define EXYNOS_5420_USB_ISOL_HOST_OFFSET 0x70C +#define EXYNOS_5250_USB_ISOL_ENABLE BIT(0) /* Mode swtich register */ #define EXYNOS_5250_MODE_SWITCH_OFFSET 0x230 @@ -132,7 +132,6 @@ enum exynos4x12_phy_id { EXYNOS5250_HOST, EXYNOS5250_HSIC0, EXYNOS5250_HSIC1, - EXYNOS5250_NUM_PHYS, }; /* @@ -176,20 +175,19 @@ static void exynos5250_isol(struct samsung_usb2_phy_instance *inst, bool on) { struct samsung_usb2_phy_driver *drv = inst->drv; u32 offset; - u32 mask; + u32 mask = EXYNOS_5250_USB_ISOL_ENABLE; - switch (inst->cfg->id) { - case EXYNOS5250_DEVICE: + if (drv->cfg == &exynos5250_usb2_phy_config && + inst->cfg->id == EXYNOS5250_DEVICE) offset = EXYNOS_5250_USB_ISOL_OTG_OFFSET; - mask = EXYNOS_5250_USB_ISOL_OTG; - break; - case EXYNOS5250_HOST: + else if (drv->cfg == &exynos5250_usb2_phy_config && + inst->cfg->id == EXYNOS5250_HOST) offset = EXYNOS_5250_USB_ISOL_HOST_OFFSET; - mask = EXYNOS_5250_USB_ISOL_HOST; - break; - default: + else if (drv->cfg == &exynos5420_usb2_phy_config && + inst->cfg->id == EXYNOS5250_HOST) + offset = EXYNOS_5420_USB_ISOL_HOST_OFFSET; + else return; - } regmap_update_bits(drv->reg_pmu, offset, mask, on ? 0 : mask); } @@ -390,9 +388,31 @@ static const struct samsung_usb2_common_phy exynos5250_phys[] = { }, }; +static const struct samsung_usb2_common_phy exynos5420_phys[] = { + { + .label = "host", + .id = EXYNOS5250_HOST, + .power_on = exynos5250_power_on, + .power_off = exynos5250_power_off, + }, + { + .label = "hsic", + .id = EXYNOS5250_HSIC0, + .power_on = exynos5250_power_on, + .power_off = exynos5250_power_off, + }, +}; + const struct samsung_usb2_phy_config exynos5250_usb2_phy_config = { .has_mode_switch = 1, - .num_phys = EXYNOS5250_NUM_PHYS, + .num_phys = ARRAY_SIZE(exynos5250_phys), .phys = exynos5250_phys, .rate_to_clk = exynos5250_rate_to_clk, }; + +const struct samsung_usb2_phy_config exynos5420_usb2_phy_config = { + .has_mode_switch = 1, + .num_phys = ARRAY_SIZE(exynos5420_phys), + .phys = exynos5420_phys, + .rate_to_clk = exynos5250_rate_to_clk, +}; diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c index f79f605cff79..3908153f2ce5 100644 --- a/drivers/phy/samsung/phy-samsung-usb2.c +++ b/drivers/phy/samsung/phy-samsung-usb2.c @@ -128,6 +128,12 @@ static const struct of_device_id samsung_usb2_phy_of_match[] = { .data = &exynos5250_usb2_phy_config, }, #endif +#ifdef CONFIG_PHY_EXYNOS5420_USB2 + { + .compatible = "samsung,exynos5420-usb2-phy", + .data = &exynos5420_usb2_phy_config, + }, +#endif #ifdef CONFIG_PHY_S5PV210_USB2 { .compatible = "samsung,s5pv210-usb2-phy", diff --git a/drivers/phy/samsung/phy-samsung-usb2.h b/drivers/phy/samsung/phy-samsung-usb2.h index 77fb23bc218f..ebaf43bfc5a2 100644 --- a/drivers/phy/samsung/phy-samsung-usb2.h +++ b/drivers/phy/samsung/phy-samsung-usb2.h @@ -66,5 +66,6 @@ extern const struct samsung_usb2_phy_config exynos3250_usb2_phy_config; extern const struct samsung_usb2_phy_config exynos4210_usb2_phy_config; extern const struct samsung_usb2_phy_config exynos4x12_usb2_phy_config; extern const struct samsung_usb2_phy_config exynos5250_usb2_phy_config; +extern const struct samsung_usb2_phy_config exynos5420_usb2_phy_config; extern const struct samsung_usb2_phy_config s5pv210_usb2_phy_config; #endif -- cgit v1.2.3 From 4eed2812de6addc934497688503568435a540b51 Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Fri, 20 Nov 2020 16:03:47 +0100 Subject: phy: amlogic: meson-axg-mipi-pcie-analog: replace DSI_LANE definitions with BIT() macro For consistency, replace DSI_LANE definitions with BIT() macro and remove the unused DSI_LANE_MASK definition. Suggested-by: Vinod Koul Signed-off-by: Neil Armstrong Link: https://lore.kernel.org/r/20201120150347.3914901-1-narmstrong@baylibre.com Signed-off-by: Vinod Koul --- drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c index 6eb21551bdd9..1027ece6ca12 100644 --- a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c +++ b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c @@ -38,12 +38,11 @@ #define HHI_MIPI_CNTL2_CH_EN GENMASK(15, 11) #define HHI_MIPI_CNTL2_CH0_LP_CTL GENMASK(10, 1) -#define DSI_LANE_0 (1 << 4) -#define DSI_LANE_1 (1 << 3) -#define DSI_LANE_CLK (1 << 2) -#define DSI_LANE_2 (1 << 1) -#define DSI_LANE_3 (1 << 0) -#define DSI_LANE_MASK (0x1F) +#define DSI_LANE_0 BIT(4) +#define DSI_LANE_1 BIT(3) +#define DSI_LANE_CLK BIT(2) +#define DSI_LANE_2 BIT(1) +#define DSI_LANE_3 BIT(0) struct phy_axg_mipi_pcie_analog_priv { struct phy *phy; -- cgit v1.2.3 From 122586d62206c68e061f0329443ff65985e1aae5 Mon Sep 17 00:00:00 2001 From: Neil Armstrong Date: Fri, 20 Nov 2020 16:38:55 +0100 Subject: phy: amlogic: phy-meson-gxl-usb2: keep ID pull-up even in Host mode In order to keep OTG ID detection even when in Host mode, the ID line of the PHY (if the current phy is an OTG one) pull-up should be kept enable in both modes. This fixes OTG switch on GXL, GXM & AXG platforms, otherwise once switched to Host, the ID detection doesn't work anymore to switch back to Device. Signed-off-by: Neil Armstrong Reviewed-by: Kevin Hilman Reviewed-by: Martin Blumenstingl Link: https://lore.kernel.org/r/20201120153855.3920757-1-narmstrong@baylibre.com Signed-off-by: Vinod Koul --- drivers/phy/amlogic/phy-meson-gxl-usb2.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/phy/amlogic/phy-meson-gxl-usb2.c b/drivers/phy/amlogic/phy-meson-gxl-usb2.c index 875afb2672c7..2b3c0d730f20 100644 --- a/drivers/phy/amlogic/phy-meson-gxl-usb2.c +++ b/drivers/phy/amlogic/phy-meson-gxl-usb2.c @@ -158,7 +158,8 @@ static int phy_meson_gxl_usb2_set_mode(struct phy *phy, U2P_R0_DM_PULLDOWN); regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_DP_PULLDOWN, U2P_R0_DP_PULLDOWN); - regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_ID_PULLUP, 0); + regmap_update_bits(priv->regmap, U2P_R0, U2P_R0_ID_PULLUP, + U2P_R0_ID_PULLUP); break; case PHY_MODE_USB_DEVICE: -- cgit v1.2.3 From 15ee6277817f9eda605937c254dc8f15f1305c1f Mon Sep 17 00:00:00 2001 From: Yejune Deng Date: Wed, 18 Nov 2020 10:36:35 +0800 Subject: phy: amlogic: replace devm_reset_control_array_get() devm_reset_control_array_get_exclusive() looks more readable Signed-off-by: Yejune Deng Reviewed-by: Martin Blumenstingl Reviewed-by: Philipp Zabel Link: https://lore.kernel.org/r/1605666995-16462-1-git-send-email-yejune.deng@gmail.com Signed-off-by: Vinod Koul --- drivers/phy/amlogic/phy-meson-axg-pcie.c | 2 +- drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/amlogic/phy-meson-axg-pcie.c b/drivers/phy/amlogic/phy-meson-axg-pcie.c index 58a7507a8422..2299bab38e05 100644 --- a/drivers/phy/amlogic/phy-meson-axg-pcie.c +++ b/drivers/phy/amlogic/phy-meson-axg-pcie.c @@ -153,7 +153,7 @@ static int phy_axg_pcie_probe(struct platform_device *pdev) if (IS_ERR(priv->regmap)) return PTR_ERR(priv->regmap); - priv->reset = devm_reset_control_array_get(dev, false, false); + priv->reset = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(priv->reset)) return PTR_ERR(priv->reset); diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c index ebe3d0ddd304..5b471ab80fe2 100644 --- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c +++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c @@ -416,7 +416,7 @@ static int phy_g12a_usb3_pcie_probe(struct platform_device *pdev) if (ret) goto err_disable_clk_ref; - priv->reset = devm_reset_control_array_get(dev, false, false); + priv->reset = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(priv->reset)) return PTR_ERR(priv->reset); -- cgit v1.2.3 From be49d5b2985bca407d130305ad78021d3c17db19 Mon Sep 17 00:00:00 2001 From: Georgi Djakov Date: Tue, 13 Oct 2020 20:19:23 +0300 Subject: interconnect: qcom: Simplify the vcd compare function Let's simplify the cmp_vcd() function and replace the conditionals with just a single statement, which also improves readability. Reviewed-by: Mike Tipton Link: https://lore.kernel.org/r/20201013171923.7351-1-georgi.djakov@linaro.org Signed-off-by: Georgi Djakov --- drivers/interconnect/qcom/bcm-voter.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c index 887d13721e52..1cc565bce2f4 100644 --- a/drivers/interconnect/qcom/bcm-voter.c +++ b/drivers/interconnect/qcom/bcm-voter.c @@ -41,17 +41,10 @@ struct bcm_voter { static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b) { - const struct qcom_icc_bcm *bcm_a = - list_entry(a, struct qcom_icc_bcm, list); - const struct qcom_icc_bcm *bcm_b = - list_entry(b, struct qcom_icc_bcm, list); - - if (bcm_a->aux_data.vcd < bcm_b->aux_data.vcd) - return -1; - else if (bcm_a->aux_data.vcd == bcm_b->aux_data.vcd) - return 0; - else - return 1; + const struct qcom_icc_bcm *bcm_a = list_entry(a, struct qcom_icc_bcm, list); + const struct qcom_icc_bcm *bcm_b = list_entry(b, struct qcom_icc_bcm, list); + + return bcm_a->aux_data.vcd - bcm_b->aux_data.vcd; } static u64 bcm_div(u64 num, u32 base) -- cgit v1.2.3 From 2f95b9d5cf0b3d15154225e369558a3c6b40e948 Mon Sep 17 00:00:00 2001 From: Sylwester Nawrocki Date: Thu, 12 Nov 2020 15:09:28 +0100 Subject: interconnect: Add generic interconnect driver for Exynos SoCs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds a generic interconnect driver for Exynos SoCs in order to provide interconnect functionality for each "samsung,exynos-bus" compatible device. The SoC topology is a graph (or more specifically, a tree) and its edges are described by specifying in the 'interconnects' property the interconnect consumer path for each interconnect provider DT node. Each bus is now an interconnect provider and an interconnect node as well (cf. Documentation/interconnect/interconnect.rst), i.e. every bus registers itself as a node. Node IDs are not hard coded but rather assigned dynamically at runtime. This approach allows for using this driver with various Exynos SoCs. Frequencies requested via the interconnect API for a given node are propagated to devfreq using dev_pm_qos_update_request(). Please note that it is not an error when CONFIG_INTERCONNECT is 'n', in which case all interconnect API functions are no-op. The samsung,data-clk-ratio DT property is used to specify the ratio of the interconect bandwidth to the minimum data clock frequency for each bus. Due to unspecified relative probing order, -EPROBE_DEFER may be propagated to ensure that the parent is probed before its children. Reviewed-by: Chanwoo Choi Tested-by: Chanwoo Choi Acked-by: Krzysztof Kozlowski Signed-off-by: Artur Świgoń Signed-off-by: Sylwester Nawrocki Link: https://lore.kernel.org/r/20201112140931.31139-3-s.nawrocki@samsung.com Signed-off-by: Georgi Djakov --- drivers/interconnect/Kconfig | 1 + drivers/interconnect/Makefile | 1 + drivers/interconnect/samsung/Kconfig | 13 +++ drivers/interconnect/samsung/Makefile | 4 + drivers/interconnect/samsung/exynos.c | 199 ++++++++++++++++++++++++++++++++++ 5 files changed, 218 insertions(+) create mode 100644 drivers/interconnect/samsung/Kconfig create mode 100644 drivers/interconnect/samsung/Makefile create mode 100644 drivers/interconnect/samsung/exynos.c (limited to 'drivers') diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig index 5b7204ee2eb2..d637a89d4695 100644 --- a/drivers/interconnect/Kconfig +++ b/drivers/interconnect/Kconfig @@ -13,5 +13,6 @@ if INTERCONNECT source "drivers/interconnect/imx/Kconfig" source "drivers/interconnect/qcom/Kconfig" +source "drivers/interconnect/samsung/Kconfig" endif diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile index d203520b0a56..97d393fd638d 100644 --- a/drivers/interconnect/Makefile +++ b/drivers/interconnect/Makefile @@ -6,3 +6,4 @@ icc-core-objs := core.o bulk.o obj-$(CONFIG_INTERCONNECT) += icc-core.o obj-$(CONFIG_INTERCONNECT_IMX) += imx/ obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/ +obj-$(CONFIG_INTERCONNECT_SAMSUNG) += samsung/ diff --git a/drivers/interconnect/samsung/Kconfig b/drivers/interconnect/samsung/Kconfig new file mode 100644 index 000000000000..6820e4f772cc --- /dev/null +++ b/drivers/interconnect/samsung/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +config INTERCONNECT_SAMSUNG + bool "Samsung SoC interconnect drivers" + depends on ARCH_EXYNOS || COMPILE_TEST + help + Interconnect drivers for Samsung SoCs. + +config INTERCONNECT_EXYNOS + tristate "Exynos generic interconnect driver" + depends on INTERCONNECT_SAMSUNG + default y if ARCH_EXYNOS + help + Generic interconnect driver for Exynos SoCs. diff --git a/drivers/interconnect/samsung/Makefile b/drivers/interconnect/samsung/Makefile new file mode 100644 index 000000000000..e19d1df7da48 --- /dev/null +++ b/drivers/interconnect/samsung/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +exynos-interconnect-objs := exynos.o + +obj-$(CONFIG_INTERCONNECT_EXYNOS) += exynos-interconnect.o diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c new file mode 100644 index 000000000000..6559d8cf8068 --- /dev/null +++ b/drivers/interconnect/samsung/exynos.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Exynos generic interconnect provider driver + * + * Copyright (c) 2020 Samsung Electronics Co., Ltd. + * + * Authors: Artur Świgoń + * Sylwester Nawrocki + */ +#include +#include +#include +#include +#include +#include +#include + +#define EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO 8 + +struct exynos_icc_priv { + struct device *dev; + + /* One interconnect node per provider */ + struct icc_provider provider; + struct icc_node *node; + + struct dev_pm_qos_request qos_req; + u32 bus_clk_ratio; +}; + +static struct icc_node *exynos_icc_get_parent(struct device_node *np) +{ + struct of_phandle_args args; + struct icc_node_data *icc_node_data; + struct icc_node *icc_node; + int num, ret; + + num = of_count_phandle_with_args(np, "interconnects", + "#interconnect-cells"); + if (num < 1) + return NULL; /* parent nodes are optional */ + + /* Get the interconnect target node */ + ret = of_parse_phandle_with_args(np, "interconnects", + "#interconnect-cells", 0, &args); + if (ret < 0) + return ERR_PTR(ret); + + icc_node_data = of_icc_get_from_provider(&args); + of_node_put(args.np); + + if (IS_ERR(icc_node_data)) + return ERR_CAST(icc_node_data); + + icc_node = icc_node_data->node; + kfree(icc_node_data); + + return icc_node; +} + +static int exynos_generic_icc_set(struct icc_node *src, struct icc_node *dst) +{ + struct exynos_icc_priv *src_priv = src->data, *dst_priv = dst->data; + s32 src_freq = max(src->avg_bw, src->peak_bw) / src_priv->bus_clk_ratio; + s32 dst_freq = max(dst->avg_bw, dst->peak_bw) / dst_priv->bus_clk_ratio; + int ret; + + ret = dev_pm_qos_update_request(&src_priv->qos_req, src_freq); + if (ret < 0) { + dev_err(src_priv->dev, "failed to update PM QoS of %s (src)\n", + src->name); + return ret; + } + + ret = dev_pm_qos_update_request(&dst_priv->qos_req, dst_freq); + if (ret < 0) { + dev_err(dst_priv->dev, "failed to update PM QoS of %s (dst)\n", + dst->name); + return ret; + } + + return 0; +} + +static struct icc_node *exynos_generic_icc_xlate(struct of_phandle_args *spec, + void *data) +{ + struct exynos_icc_priv *priv = data; + + if (spec->np != priv->dev->parent->of_node) + return ERR_PTR(-EINVAL); + + return priv->node; +} + +static int exynos_generic_icc_remove(struct platform_device *pdev) +{ + struct exynos_icc_priv *priv = platform_get_drvdata(pdev); + struct icc_node *parent_node, *node = priv->node; + + parent_node = exynos_icc_get_parent(priv->dev->parent->of_node); + if (parent_node && !IS_ERR(parent_node)) + icc_link_destroy(node, parent_node); + + icc_nodes_remove(&priv->provider); + icc_provider_del(&priv->provider); + + return 0; +} + +static int exynos_generic_icc_probe(struct platform_device *pdev) +{ + struct device *bus_dev = pdev->dev.parent; + struct exynos_icc_priv *priv; + struct icc_provider *provider; + struct icc_node *icc_node, *icc_parent_node; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = &pdev->dev; + platform_set_drvdata(pdev, priv); + + provider = &priv->provider; + + provider->set = exynos_generic_icc_set; + provider->aggregate = icc_std_aggregate; + provider->xlate = exynos_generic_icc_xlate; + provider->dev = bus_dev; + provider->inter_set = true; + provider->data = priv; + + ret = icc_provider_add(provider); + if (ret < 0) + return ret; + + icc_node = icc_node_create(pdev->id); + if (IS_ERR(icc_node)) { + ret = PTR_ERR(icc_node); + goto err_prov_del; + } + + priv->node = icc_node; + icc_node->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn", + bus_dev->of_node); + if (of_property_read_u32(bus_dev->of_node, "samsung,data-clock-ratio", + &priv->bus_clk_ratio)) + priv->bus_clk_ratio = EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO; + + /* + * Register a PM QoS request for the parent (devfreq) device. + */ + ret = dev_pm_qos_add_request(bus_dev, &priv->qos_req, + DEV_PM_QOS_MIN_FREQUENCY, 0); + if (ret < 0) + goto err_node_del; + + icc_node->data = priv; + icc_node_add(icc_node, provider); + + icc_parent_node = exynos_icc_get_parent(bus_dev->of_node); + if (IS_ERR(icc_parent_node)) { + ret = PTR_ERR(icc_parent_node); + goto err_pmqos_del; + } + if (icc_parent_node) { + ret = icc_link_create(icc_node, icc_parent_node->id); + if (ret < 0) + goto err_pmqos_del; + } + + return 0; + +err_pmqos_del: + dev_pm_qos_remove_request(&priv->qos_req); +err_node_del: + icc_nodes_remove(provider); +err_prov_del: + icc_provider_del(provider); + return ret; +} + +static struct platform_driver exynos_generic_icc_driver = { + .driver = { + .name = "exynos-generic-icc", + .sync_state = icc_sync_state, + }, + .probe = exynos_generic_icc_probe, + .remove = exynos_generic_icc_remove, +}; +module_platform_driver(exynos_generic_icc_driver); + +MODULE_DESCRIPTION("Exynos generic interconnect driver"); +MODULE_AUTHOR("Artur Świgoń "); +MODULE_AUTHOR("Sylwester Nawrocki "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:exynos-generic-icc"); -- cgit v1.2.3 From 4ea0bf2a52f1eea76578eac5a9148d95f5e181c0 Mon Sep 17 00:00:00 2001 From: JC Kuo Date: Tue, 17 Nov 2020 16:38:03 +0800 Subject: phy: tegra: xusb: Fix usb_phy device driver field In commit "phy: tegra: xusb: Add usb-phy support", an OTG capable PHY device, such as phy-usb2.0 device of Jetson-TX1 platform, will be bound to the tegra-xusb-padctl driver by the following line in tegra_xusb_setup_usb_role_switch(). port->usb_phy.dev->driver = port->padctl->dev->driver; With this, dev_pm_ops set of tegra-xusb-padctl driver will be invoked for the OTG capable PHY incorrectly as below logs show. This commit fixes the issue by assigning an empty driver to it. [ 153.451108] tegra-xusb-padctl phy-usb2.0: > tegra_xusb_padctl_suspend_noirq(dev=ffff000080917000) [ 153.460353] tegra-xusb-padctl phy-usb2.0: driver: ffff8000114453e0 (tegra_xusb_padctl_driver) [ 153.469245] tegra-xusb-padctl phy-usb2.0: padctl: ffff0000829f6480 [ 153.475772] tegra-xusb-padctl phy-usb2.0: soc: ef7bdd7fffffffff (0xef7bdd7fffffffff) [ 153.484061] Unable to handle kernel paging request at virtual address 007bdd800000004f [ 153.492132] Mem abort info: [ 153.495083] ESR = 0x96000004 [ 153.498308] EC = 0x25: DABT (current EL), IL = 32 bits [ 153.503771] SET = 0, FnV = 0 [ 153.506979] EA = 0, S1PTW = 0 [ 153.510260] Data abort info: [ 153.513200] ISV = 0, ISS = 0x00000004 [ 153.517181] CM = 0, WnR = 0 [ 153.520302] [007bdd800000004f] address between user and kernel address ranges [ 153.527600] Internal error: Oops: 96000004 [#1] PREEMPT SMP [ 153.533231] Modules linked in: nouveau panel_simple tegra_video(C) tegra_drm drm_ttm_helper videobuf2_dma_contig ttm videobuf2_memops cec videobuf2_v4l2 videobuf2_common drm_kms_helper v4l2_fwnode videodev drm mc snd_hda_codec_hdmi cdc_ether usbnet snd_hda_tegra r8152 crct10dif_ce snd_hda_codec snd_hda_core tegra_xudc host1x lp855x_bl at24 ip_tables x_tables ipv6 [ 153.566417] CPU: 0 PID: 300 Comm: systemd-sleep Tainted: G C 5.10.0-rc3-next-20201113-00019-g5c064d5372b0-dirty #624 [ 153.578283] Hardware name: NVIDIA Jetson TX1 Developer Kit (DT) [ 153.584281] pstate: 40000005 (nZcv daif -PAN -UAO -TCO BTYPE=--) [ 153.590381] pc : tegra_xusb_padctl_suspend_noirq+0x88/0x100 [ 153.596016] lr : tegra_xusb_padctl_suspend_noirq+0x80/0x100 [ 153.601632] sp : ffff8000120dbb60 [ 153.604999] x29: ffff8000120dbb60 x28: ffff000080a1df00 [ 153.610430] x27: 0000000000000002 x26: ffff8000106f8540 [ 153.615858] x25: ffff8000113ac4a4 x24: ffff80001148c198 [ 153.621277] x23: ffff800010c4538c x22: 0000000000000002 [ 153.626692] x21: ffff800010ccde80 x20: ffff0000829f6480 [ 153.632107] x19: ffff000080917000 x18: 0000000000000030 [ 153.637521] x17: 0000000000000000 x16: 0000000000000000 [ 153.642933] x15: ffff000080a1e380 x14: 74636461702d6273 [ 153.648346] x13: ffff8000113ad058 x12: 0000000000000f39 [ 153.653759] x11: 0000000000000513 x10: ffff800011405058 [ 153.659176] x9 : 00000000fffff000 x8 : ffff8000113ad058 [ 153.664590] x7 : ffff800011405058 x6 : 0000000000000000 [ 153.670002] x5 : 0000000000000000 x4 : ffff0000fe908bc0 [ 153.675414] x3 : ffff0000fe910228 x2 : 162ef67e0581e700 [ 153.680826] x1 : 162ef67e0581e700 x0 : ef7bdd7fffffffff [ 153.686241] Call trace: [ 153.688769] tegra_xusb_padctl_suspend_noirq+0x88/0x100 [ 153.694077] __device_suspend_noirq+0x68/0x1cc [ 153.698594] dpm_noirq_suspend_devices+0x10c/0x1d0 [ 153.703456] dpm_suspend_noirq+0x28/0xa0 [ 153.707461] suspend_devices_and_enter+0x234/0x4bc [ 153.712314] pm_suspend+0x1e4/0x270 [ 153.715868] state_store+0x8c/0x110 [ 153.719440] kobj_attr_store+0x1c/0x30 [ 153.723259] sysfs_kf_write+0x4c/0x7c [ 153.726981] kernfs_fop_write+0x124/0x240 [ 153.731065] vfs_write+0xe4/0x204 [ 153.734449] ksys_write+0x6c/0x100 [ 153.737925] __arm64_sys_write+0x20/0x30 [ 153.741931] el0_svc_common.constprop.0+0x78/0x1a0 [ 153.746789] do_el0_svc+0x24/0x90 [ 153.750181] el0_sync_handler+0x254/0x260 [ 153.754251] el0_sync+0x174/0x180 [ 153.757663] Code: aa0303e2 94000f64 f9405680 b40000e0 (f9402803) [ 153.763826] ---[ end trace 81543a3394cb409d ]--- Fixes: e8f7d2f409a1 ("phy: tegra: xusb: Add usb-phy support") Signed-off-by: JC Kuo Acked-by: Thierry Reding Link: https://lore.kernel.org/r/20201117083803.185209-1-jckuo@nvidia.com Signed-off-by: Vinod Koul --- drivers/phy/tegra/xusb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 40e03c931340..a6e8afe7c5ee 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -688,7 +688,7 @@ static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port) * reference to retrieve usb-phy details. */ port->usb_phy.dev = &lane->pad->lanes[port->index]->dev; - port->usb_phy.dev->driver = port->padctl->dev->driver; + port->usb_phy.dev->driver = port->dev.driver; port->usb_phy.otg->usb_phy = &port->usb_phy; port->usb_phy.otg->set_peripheral = tegra_xusb_set_peripheral; port->usb_phy.otg->set_host = tegra_xusb_set_host; -- cgit v1.2.3 From d7bb92e3c908a9df47bd3a306f6fc3e009babc5b Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Wed, 25 Nov 2020 15:37:45 +0800 Subject: phy: mediatek: Make PHY_MTK_{XSPHY, TPHY} depend on HAS_IOMEM and OF_ADDRESS to fix build errors devm_ioremap_resource() will be not built in lib/devres.c if CONFIG_HAS_IOMEM is not set, of_address_to_resource() will be not built in drivers/of/address.c if CONFIG_OF_ADDRESS is not set, and then there exists two build errors about undefined reference to "devm_ioremap_resource" and "of_address_to_resource" in phy-mtk-xsphy.c under COMPILE_TEST and CONFIG_PHY_MTK_XSPHY, make PHY_MTK_XSPHY depend on HAS_IOMEM and OF_ADDRESS to fix it. The above issue is reported by kernel test robot , through the discussion in the v1 patch, as Chunfeng said we need also do this for config PHY_MTK_TPHY: drivers/phy/mediatek/phy-mtk-tphy.c:1157: retval = of_address_to_resource(child_np, 0, &res); drivers/phy/mediatek/phy-mtk-tphy.c:1123: tphy->sif_base = devm_ioremap_resource(dev, sif_res); drivers/phy/mediatek/phy-mtk-tphy.c:1164: instance->port_base = devm_ioremap_resource(&phy->dev, &res); Reported-by: kernel test robot Signed-off-by: Tiezhu Yang Acked-by: Randy Dunlap Acked-by: Chunfeng Yun Link: https://lore.kernel.org/r/1606289865-692-1-git-send-email-yangtiezhu@loongson.cn Signed-off-by: Vinod Koul --- drivers/phy/mediatek/Kconfig | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig index 50c5e9306e19..f44800b6a948 100644 --- a/drivers/phy/mediatek/Kconfig +++ b/drivers/phy/mediatek/Kconfig @@ -5,7 +5,8 @@ config PHY_MTK_TPHY tristate "MediaTek T-PHY Driver" depends on ARCH_MEDIATEK || COMPILE_TEST - depends on OF + depends on OF && OF_ADDRESS + depends on HAS_IOMEM select GENERIC_PHY help Say 'Y' here to add support for MediaTek T-PHY driver, @@ -29,7 +30,8 @@ config PHY_MTK_UFS config PHY_MTK_XSPHY tristate "MediaTek XS-PHY Driver" depends on ARCH_MEDIATEK || COMPILE_TEST - depends on OF + depends on OF && OF_ADDRESS + depends on HAS_IOMEM select GENERIC_PHY help Enable this to support the SuperSpeedPlus XS-PHY transceiver for -- cgit v1.2.3 From 0e055d179ea9368f882d2060b0d82341ee90bae3 Mon Sep 17 00:00:00 2001 From: Sergio Paracuellos Date: Tue, 1 Dec 2020 12:20:50 +0100 Subject: phy: ralink: phy-mt7621-pci: drop 'COMPILE_TEST' from Kconfig This driver includes the following two files directly: - mt7621.h - ralink_regs.h Compilation for its related platform properly works because its real path is included in 'arch/mips/ralink/Platform' as cflags. This driver depends on RALINK but also is enabled for COMPILE_TEST where nothing about its platform is known and this directly included files are not found at all breaking compilation. If we want 'COMPILE_TEST' we have to change cflags also inside 'phy/ralink' subdirectory Makefile which seems that does not like to linux-phy maintainers. Hence remove COMPILE_TEST from Kconfig to avoid the problem. Fixes: d87da32372a0 ("phy: ralink: Add PHY driver for MT7621 PCIe PHY") Reported-by: Stephen Rothwell Signed-off-by: Sergio Paracuellos Link: https://lore.kernel.org/r/20201201112051.17463-2-sergio.paracuellos@gmail.com Signed-off-by: Vinod Koul --- drivers/phy/ralink/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/phy/ralink/Kconfig b/drivers/phy/ralink/Kconfig index 2fabb14d2998..ecc309ba9fee 100644 --- a/drivers/phy/ralink/Kconfig +++ b/drivers/phy/ralink/Kconfig @@ -4,7 +4,7 @@ # config PHY_MT7621_PCI tristate "MediaTek MT7621 PCI PHY Driver" - depends on (RALINK || COMPILE_TEST) && OF + depends on RALINK && OF select GENERIC_PHY select REGMAP_MMIO help -- cgit v1.2.3 From 8145dcb07d0c8bdb1e8e76a5df18779431f7af8e Mon Sep 17 00:00:00 2001 From: Sergio Paracuellos Date: Tue, 1 Dec 2020 12:20:51 +0100 Subject: phy: ralink: phy-mt7621-pci: set correct name in MODULE_DEVICE_TABLE macro Correct name passed into 'MODULE_DEVICE_TABLE' which was wrong and was showing a warning when the driver is enabled for 'COMPILE_TEST'. Signed-off-by: Sergio Paracuellos Link: https://lore.kernel.org/r/20201201112051.17463-3-sergio.paracuellos@gmail.com Signed-off-by: Vinod Koul --- drivers/phy/ralink/phy-mt7621-pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c index db79088d5362..9a610b414b1f 100644 --- a/drivers/phy/ralink/phy-mt7621-pci.c +++ b/drivers/phy/ralink/phy-mt7621-pci.c @@ -335,7 +335,7 @@ static const struct of_device_id mt7621_pci_phy_ids[] = { { .compatible = "mediatek,mt7621-pci-phy" }, {}, }; -MODULE_DEVICE_TABLE(of, mt7621_pci_ids); +MODULE_DEVICE_TABLE(of, mt7621_pci_phy_ids); static struct platform_driver mt7621_pci_phy_driver = { .probe = mt7621_pci_phy_probe, -- cgit v1.2.3 From 89828f632dec8ab1e19f23771b313c1942e102c0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 1 Dec 2020 10:02:54 +0300 Subject: bus: mhi: core: Fix error handling in mhi_register_controller() There are a few problems with the error handling in this function. They mostly center around the alloc_ordered_workqueue() allocation. 1) If that allocation fails or if the kcalloc() prior to it fails then it leads to a NULL dereference when we call destroy_workqueue(mhi_cntrl->hiprio_wq). 2) The error code is not set. 3) The "mhi_cntrl->mhi_cmd" allocation is not freed. The error handling was slightly confusing and I re-ordered it to be in the exact mirror/reverse order of how things were allocated. I changed the label names to say what the goto does instead of describing where the goto comes from. Fixes: 8f7039787687 ("bus: mhi: core: Move to using high priority workqueue") Signed-off-by: Dan Carpenter Reviewed-by: Loic Poulain Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/core/init.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c index 96cde9c0034c..f0697f433c2f 100644 --- a/drivers/bus/mhi/core/init.c +++ b/drivers/bus/mhi/core/init.c @@ -871,7 +871,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); if (!mhi_cntrl->mhi_cmd) { ret = -ENOMEM; - goto error_alloc_cmd; + goto err_free_event; } INIT_LIST_HEAD(&mhi_cntrl->transition_list); @@ -886,7 +886,8 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, ("mhi_hiprio_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI); if (!mhi_cntrl->hiprio_wq) { dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n"); - goto error_alloc_cmd; + ret = -ENOMEM; + goto err_free_cmd; } mhi_cmd = mhi_cntrl->mhi_cmd; @@ -932,7 +933,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, SOC_HW_VERSION_OFFS, &soc_info); if (ret) - goto error_alloc_dev; + goto err_destroy_wq; mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >> SOC_HW_VERSION_FAM_NUM_SHFT; @@ -946,7 +947,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); if (mhi_cntrl->index < 0) { ret = mhi_cntrl->index; - goto error_ida_alloc; + goto err_destroy_wq; } /* Register controller with MHI bus */ @@ -954,7 +955,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, if (IS_ERR(mhi_dev)) { dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n"); ret = PTR_ERR(mhi_dev); - goto error_alloc_dev; + goto err_ida_free; } mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; @@ -967,7 +968,7 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, ret = device_add(&mhi_dev->dev); if (ret) - goto error_add_dev; + goto err_release_dev; mhi_cntrl->mhi_dev = mhi_dev; @@ -975,19 +976,17 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, return 0; -error_add_dev: +err_release_dev: put_device(&mhi_dev->dev); - -error_alloc_dev: +err_ida_free: ida_free(&mhi_controller_ida, mhi_cntrl->index); - -error_ida_alloc: +err_destroy_wq: + destroy_workqueue(mhi_cntrl->hiprio_wq); +err_free_cmd: kfree(mhi_cntrl->mhi_cmd); - -error_alloc_cmd: - vfree(mhi_cntrl->mhi_chan); +err_free_event: kfree(mhi_cntrl->mhi_event); - destroy_workqueue(mhi_cntrl->hiprio_wq); + vfree(mhi_cntrl->mhi_chan); return ret; } -- cgit v1.2.3 From 57d9352b6c651b090179f8b223b6681275c64a4f Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Sun, 15 Nov 2020 11:51:18 -0800 Subject: fpga: fpga-mgr: Add devm_fpga_mgr_register() API Add a devm_fpga_mgr_register() API that can be used to register a FPGA Manager that was created using devm_fpga_mgr_create(). Introduce a struct fpga_mgr_devres that makes the devres allocation a little bit more readable and gets reused for devm_fpga_mgr_create() devm_fpga_mgr_register(). Reviewed-by: Tom Rix Signed-off-by: Moritz Fischer Link: https://lore.kernel.org/r/20201115195127.284487-2-mdf@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/fpga-mgr.c | 81 ++++++++++++++++++++++++++++++++++++------- include/linux/fpga/fpga-mgr.h | 2 ++ 2 files changed, 71 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index f38bab01432e..b85bc47c91a9 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -21,6 +21,10 @@ static DEFINE_IDA(fpga_mgr_ida); static struct class *fpga_mgr_class; +struct fpga_mgr_devres { + struct fpga_manager *mgr; +}; + /** * fpga_image_info_alloc - Allocate a FPGA image info struct * @dev: owning device @@ -625,9 +629,9 @@ EXPORT_SYMBOL_GPL(fpga_mgr_free); static void devm_fpga_mgr_release(struct device *dev, void *res) { - struct fpga_manager *mgr = *(struct fpga_manager **)res; + struct fpga_mgr_devres *dr = res; - fpga_mgr_free(mgr); + fpga_mgr_free(dr->mgr); } /** @@ -651,21 +655,21 @@ struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name, const struct fpga_manager_ops *mops, void *priv) { - struct fpga_manager **ptr, *mgr; + struct fpga_mgr_devres *dr; - ptr = devres_alloc(devm_fpga_mgr_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) + dr = devres_alloc(devm_fpga_mgr_release, sizeof(*dr), GFP_KERNEL); + if (!dr) return NULL; - mgr = fpga_mgr_create(dev, name, mops, priv); - if (!mgr) { - devres_free(ptr); - } else { - *ptr = mgr; - devres_add(dev, ptr); + dr->mgr = fpga_mgr_create(dev, name, mops, priv); + if (!dr->mgr) { + devres_free(dr); + return NULL; } - return mgr; + devres_add(dev, dr); + + return dr->mgr; } EXPORT_SYMBOL_GPL(devm_fpga_mgr_create); @@ -722,6 +726,59 @@ void fpga_mgr_unregister(struct fpga_manager *mgr) } EXPORT_SYMBOL_GPL(fpga_mgr_unregister); +static int fpga_mgr_devres_match(struct device *dev, void *res, + void *match_data) +{ + struct fpga_mgr_devres *dr = res; + + return match_data == dr->mgr; +} + +static void devm_fpga_mgr_unregister(struct device *dev, void *res) +{ + struct fpga_mgr_devres *dr = res; + + fpga_mgr_unregister(dr->mgr); +} + +/** + * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register() + * @dev: managing device for this FPGA manager + * @mgr: fpga manager struct + * + * This is the devres variant of fpga_mgr_register() for which the unregister + * function will be called automatically when the managing device is detached. + */ +int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr) +{ + struct fpga_mgr_devres *dr; + int ret; + + /* + * Make sure that the struct fpga_manager * that is passed in is + * managed itself. + */ + if (WARN_ON(!devres_find(dev, devm_fpga_mgr_release, + fpga_mgr_devres_match, mgr))) + return -EINVAL; + + dr = devres_alloc(devm_fpga_mgr_unregister, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + ret = fpga_mgr_register(mgr); + if (ret) { + devres_free(dr); + return ret; + } + + dr->mgr = mgr; + devres_add(dev, dr); + + return 0; +} +EXPORT_SYMBOL_GPL(devm_fpga_mgr_register); + static void fpga_mgr_dev_release(struct device *dev) { } diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index e8ca62b2cb5b..2bc3030a69e5 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -198,6 +198,8 @@ void fpga_mgr_free(struct fpga_manager *mgr); int fpga_mgr_register(struct fpga_manager *mgr); void fpga_mgr_unregister(struct fpga_manager *mgr); +int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr); + struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name, const struct fpga_manager_ops *mops, void *priv); -- cgit v1.2.3 From c9d754d6f71c780caa2d496b39e54f86a6622a0d Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Sun, 15 Nov 2020 11:51:19 -0800 Subject: fpga: fpga-mgr: altera-ps-spi: Simplify registration Simplify registration by using new devm_fpga_mgr_register() API. Reviewed-by: Tom Rix Signed-off-by: Moritz Fischer Link: https://lore.kernel.org/r/20201115195127.284487-3-mdf@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/altera-ps-spi.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index 0221dee8dd4c..23bfd4d1ad0f 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -307,18 +307,7 @@ static int altera_ps_probe(struct spi_device *spi) if (!mgr) return -ENOMEM; - spi_set_drvdata(spi, mgr); - - return fpga_mgr_register(mgr); -} - -static int altera_ps_remove(struct spi_device *spi) -{ - struct fpga_manager *mgr = spi_get_drvdata(spi); - - fpga_mgr_unregister(mgr); - - return 0; + return devm_fpga_mgr_register(&spi->dev, mgr); } static const struct spi_device_id altera_ps_spi_ids[] = { @@ -337,7 +326,6 @@ static struct spi_driver altera_ps_driver = { }, .id_table = altera_ps_spi_ids, .probe = altera_ps_probe, - .remove = altera_ps_remove, }; module_spi_driver(altera_ps_driver) -- cgit v1.2.3 From 83eb4fbdcfda8b8e9a5c74c1d27e280183f1a77b Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Sun, 15 Nov 2020 11:51:20 -0800 Subject: fpga: fpga-mgr: dfl-fme-mgr: Simplify registration Simplify registration using new devm_fpga_mgr_register() API. Reviewed-by: Tom Rix Signed-off-by: Moritz Fischer Link: https://lore.kernel.org/r/20201115195127.284487-4-mdf@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/dfl-fme-mgr.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c index b3f7eee3c93f..d5861d13b306 100644 --- a/drivers/fpga/dfl-fme-mgr.c +++ b/drivers/fpga/dfl-fme-mgr.c @@ -314,18 +314,8 @@ static int fme_mgr_probe(struct platform_device *pdev) return -ENOMEM; mgr->compat_id = compat_id; - platform_set_drvdata(pdev, mgr); - return fpga_mgr_register(mgr); -} - -static int fme_mgr_remove(struct platform_device *pdev) -{ - struct fpga_manager *mgr = platform_get_drvdata(pdev); - - fpga_mgr_unregister(mgr); - - return 0; + return devm_fpga_mgr_register(dev, mgr); } static struct platform_driver fme_mgr_driver = { @@ -333,7 +323,6 @@ static struct platform_driver fme_mgr_driver = { .name = DFL_FPGA_FME_MGR, }, .probe = fme_mgr_probe, - .remove = fme_mgr_remove, }; module_platform_driver(fme_mgr_driver); -- cgit v1.2.3 From 7027b7305d16f7b4db36139cc0c063e5d2fd216d Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Sun, 15 Nov 2020 11:51:21 -0800 Subject: fpga: fpga-mgr: ice40-spi: Simplify registration Simplify registration using new devm_fpga_mgr_register() API. Reviewed-by: Tom Rix Signed-off-by: Moritz Fischer Link: https://lore.kernel.org/r/20201115195127.284487-5-mdf@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/ice40-spi.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c index 8d689fea0dab..69dec5af23c3 100644 --- a/drivers/fpga/ice40-spi.c +++ b/drivers/fpga/ice40-spi.c @@ -183,18 +183,7 @@ static int ice40_fpga_probe(struct spi_device *spi) if (!mgr) return -ENOMEM; - spi_set_drvdata(spi, mgr); - - return fpga_mgr_register(mgr); -} - -static int ice40_fpga_remove(struct spi_device *spi) -{ - struct fpga_manager *mgr = spi_get_drvdata(spi); - - fpga_mgr_unregister(mgr); - - return 0; + return devm_fpga_mgr_register(dev, mgr); } static const struct of_device_id ice40_fpga_of_match[] = { @@ -205,7 +194,6 @@ MODULE_DEVICE_TABLE(of, ice40_fpga_of_match); static struct spi_driver ice40_fpga_driver = { .probe = ice40_fpga_probe, - .remove = ice40_fpga_remove, .driver = { .name = "ice40spi", .of_match_table = of_match_ptr(ice40_fpga_of_match), -- cgit v1.2.3 From a3b79b2a58f0862378817998eecc023e93ab5a46 Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Sun, 15 Nov 2020 11:51:22 -0800 Subject: fpga: fpga-mgr: machxo2-spi: Simplify registration Simplify registration using new devm_fpga_mgr_register() API. Reviewed-by: Tom Rix Signed-off-by: Moritz Fischer Link: https://lore.kernel.org/r/20201115195127.284487-6-mdf@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/machxo2-spi.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c index b316369156fe..114a64d2b7a4 100644 --- a/drivers/fpga/machxo2-spi.c +++ b/drivers/fpga/machxo2-spi.c @@ -371,18 +371,7 @@ static int machxo2_spi_probe(struct spi_device *spi) if (!mgr) return -ENOMEM; - spi_set_drvdata(spi, mgr); - - return fpga_mgr_register(mgr); -} - -static int machxo2_spi_remove(struct spi_device *spi) -{ - struct fpga_manager *mgr = spi_get_drvdata(spi); - - fpga_mgr_unregister(mgr); - - return 0; + return devm_fpga_mgr_register(dev, mgr); } static const struct of_device_id of_match[] = { @@ -403,7 +392,6 @@ static struct spi_driver machxo2_spi_driver = { .of_match_table = of_match_ptr(of_match), }, .probe = machxo2_spi_probe, - .remove = machxo2_spi_remove, .id_table = lattice_ids, }; -- cgit v1.2.3 From 20e8963f4f38a9bfaffa74198efae3eb9a8bb7ed Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Sun, 15 Nov 2020 11:51:23 -0800 Subject: fpga: fpga-mgr: socfpga: Simplify registration Simplify registration using new devm_fpga_mgr_register() API. Reviewed-by: Tom Rix Signed-off-by: Moritz Fischer Link: https://lore.kernel.org/r/20201115195127.284487-7-mdf@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/socfpga.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c index 4a8a2fcd4e6c..1f467173fc1f 100644 --- a/drivers/fpga/socfpga.c +++ b/drivers/fpga/socfpga.c @@ -576,18 +576,7 @@ static int socfpga_fpga_probe(struct platform_device *pdev) if (!mgr) return -ENOMEM; - platform_set_drvdata(pdev, mgr); - - return fpga_mgr_register(mgr); -} - -static int socfpga_fpga_remove(struct platform_device *pdev) -{ - struct fpga_manager *mgr = platform_get_drvdata(pdev); - - fpga_mgr_unregister(mgr); - - return 0; + return devm_fpga_mgr_register(dev, mgr); } #ifdef CONFIG_OF @@ -601,7 +590,6 @@ MODULE_DEVICE_TABLE(of, socfpga_fpga_of_match); static struct platform_driver socfpga_fpga_driver = { .probe = socfpga_fpga_probe, - .remove = socfpga_fpga_remove, .driver = { .name = "socfpga_fpga_manager", .of_match_table = of_match_ptr(socfpga_fpga_of_match), -- cgit v1.2.3 From d6530c0a48b77c1460eb4125009ebb37e4e0d8de Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Sun, 15 Nov 2020 11:51:24 -0800 Subject: fpga: fpga-mgr: ts73xx: Simplify registration Simplify registration using new devm_fpga_mgr_register() API. Reviewed-by: Tom Rix Signed-off-by: Moritz Fischer Link: https://lore.kernel.org/r/20201115195127.284487-8-mdf@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/ts73xx-fpga.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c index 2888ff000e4d..101f016c6ed8 100644 --- a/drivers/fpga/ts73xx-fpga.c +++ b/drivers/fpga/ts73xx-fpga.c @@ -127,18 +127,7 @@ static int ts73xx_fpga_probe(struct platform_device *pdev) if (!mgr) return -ENOMEM; - platform_set_drvdata(pdev, mgr); - - return fpga_mgr_register(mgr); -} - -static int ts73xx_fpga_remove(struct platform_device *pdev) -{ - struct fpga_manager *mgr = platform_get_drvdata(pdev); - - fpga_mgr_unregister(mgr); - - return 0; + return devm_fpga_mgr_register(kdev, mgr); } static struct platform_driver ts73xx_fpga_driver = { @@ -146,7 +135,6 @@ static struct platform_driver ts73xx_fpga_driver = { .name = "ts73xx-fpga-mgr", }, .probe = ts73xx_fpga_probe, - .remove = ts73xx_fpga_remove, }; module_platform_driver(ts73xx_fpga_driver); -- cgit v1.2.3 From f4ce435b732adfcca1dc70fd3050f9ffc80d9add Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Sun, 15 Nov 2020 11:51:25 -0800 Subject: fpga: fpga-mgr: xilinx-spi: Simplify registration Simplify registration using new devm_fpga_mgr_register() API. Reviewed-by: Tom Rix Signed-off-by: Moritz Fischer Link: https://lore.kernel.org/r/20201115195127.284487-9-mdf@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/xilinx-spi.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c index 824abbbd631e..27defa98092d 100644 --- a/drivers/fpga/xilinx-spi.c +++ b/drivers/fpga/xilinx-spi.c @@ -259,18 +259,7 @@ static int xilinx_spi_probe(struct spi_device *spi) if (!mgr) return -ENOMEM; - spi_set_drvdata(spi, mgr); - - return fpga_mgr_register(mgr); -} - -static int xilinx_spi_remove(struct spi_device *spi) -{ - struct fpga_manager *mgr = spi_get_drvdata(spi); - - fpga_mgr_unregister(mgr); - - return 0; + return devm_fpga_mgr_register(&spi->dev, mgr); } static const struct of_device_id xlnx_spi_of_match[] = { @@ -285,7 +274,6 @@ static struct spi_driver xilinx_slave_spi_driver = { .of_match_table = of_match_ptr(xlnx_spi_of_match), }, .probe = xilinx_spi_probe, - .remove = xilinx_spi_remove, }; module_spi_driver(xilinx_slave_spi_driver) -- cgit v1.2.3 From 2630fa8d00318346b73f692d90863468d8cfe27d Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Sun, 15 Nov 2020 11:51:26 -0800 Subject: fpga: fpga-mgr: zynqmp: Simplify registration Simplify registration using new devm_fpga_mgr_register() API. Reviewed-by: Tom Rix Signed-off-by: Moritz Fischer Link: https://lore.kernel.org/r/20201115195127.284487-10-mdf@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/zynqmp-fpga.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c index 4a1139e05280..125743c9797f 100644 --- a/drivers/fpga/zynqmp-fpga.c +++ b/drivers/fpga/zynqmp-fpga.c @@ -95,7 +95,6 @@ static int zynqmp_fpga_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct zynqmp_fpga_priv *priv; struct fpga_manager *mgr; - int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -108,24 +107,7 @@ static int zynqmp_fpga_probe(struct platform_device *pdev) if (!mgr) return -ENOMEM; - platform_set_drvdata(pdev, mgr); - - ret = fpga_mgr_register(mgr); - if (ret) { - dev_err(dev, "unable to register FPGA manager"); - return ret; - } - - return 0; -} - -static int zynqmp_fpga_remove(struct platform_device *pdev) -{ - struct fpga_manager *mgr = platform_get_drvdata(pdev); - - fpga_mgr_unregister(mgr); - - return 0; + return devm_fpga_mgr_register(dev, mgr); } static const struct of_device_id zynqmp_fpga_of_match[] = { @@ -137,7 +119,6 @@ MODULE_DEVICE_TABLE(of, zynqmp_fpga_of_match); static struct platform_driver zynqmp_fpga_driver = { .probe = zynqmp_fpga_probe, - .remove = zynqmp_fpga_remove, .driver = { .name = "zynqmp_fpga_manager", .of_match_table = of_match_ptr(zynqmp_fpga_of_match), -- cgit v1.2.3 From 907d4ad59904a1b327c92c9fbaa990e961f9a8f2 Mon Sep 17 00:00:00 2001 From: Moritz Fischer Date: Sun, 15 Nov 2020 11:51:27 -0800 Subject: fpga: fpga-mgr: altera-pr-ip: Simplify registration Simplify registration using new devm_fpga_mgr_register() API. Reviewed-by: Tom Rix Signed-off-by: Moritz Fischer Link: https://lore.kernel.org/r/20201115195127.284487-11-mdf@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/altera-pr-ip-core-plat.c | 10 ---------- drivers/fpga/altera-pr-ip-core.c | 4 +--- 2 files changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c index 99b9cc0e70f0..b008a6b8d2d3 100644 --- a/drivers/fpga/altera-pr-ip-core-plat.c +++ b/drivers/fpga/altera-pr-ip-core-plat.c @@ -28,15 +28,6 @@ static int alt_pr_platform_probe(struct platform_device *pdev) return alt_pr_register(dev, reg_base); } -static int alt_pr_platform_remove(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - - alt_pr_unregister(dev); - - return 0; -} - static const struct of_device_id alt_pr_of_match[] = { { .compatible = "altr,a10-pr-ip", }, {}, @@ -46,7 +37,6 @@ MODULE_DEVICE_TABLE(of, alt_pr_of_match); static struct platform_driver alt_pr_platform_driver = { .probe = alt_pr_platform_probe, - .remove = alt_pr_platform_remove, .driver = { .name = "alt_a10_pr_ip", .of_match_table = alt_pr_of_match, diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c index 2cf25fd5e897..5b130c4d9882 100644 --- a/drivers/fpga/altera-pr-ip-core.c +++ b/drivers/fpga/altera-pr-ip-core.c @@ -195,9 +195,7 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base) if (!mgr) return -ENOMEM; - dev_set_drvdata(dev, mgr); - - return fpga_mgr_register(mgr); + return devm_fpga_mgr_register(dev, mgr); } EXPORT_SYMBOL_GPL(alt_pr_register); -- cgit v1.2.3 From 3eaf2da98993ecdf6e46a2b27cd31ef84a12e45f Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Wed, 2 Dec 2020 07:47:59 +0100 Subject: phy: samsung: Merge Kconfig for Exynos5420 and Exynos5250 Exynos5420 variant of USB2 PHY is handled by the same code as the Exynos5250 one. Introducing a separate Kconfig symbol for it was an over-engineering, which turned out to cause build break for certain configurations: ERROR: modpost: "exynos5420_usb2_phy_config" [drivers/phy/samsung/phy-exynos-usb2.ko] undefined! Fix this by removing PHY_EXYNOS5420_USB2 symbol and using PHY_EXYNOS5250_USB2 also for Exynos5420 SoCs. Reported-by: Markus Reichl Fixes: 81b534f7e9b2 ("phy: samsung: Add support for the Exynos5420 variant of the USB2 PHY") Signed-off-by: Marek Szyprowski Acked-by: Krzysztof Kozlowski Reviewed-by: Jaehoon Chung Link: https://lore.kernel.org/r/20201202064759.24300-1-m.szyprowski@samsung.com Signed-off-by: Vinod Koul --- drivers/phy/samsung/Kconfig | 7 +------ drivers/phy/samsung/phy-samsung-usb2.c | 2 -- 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/samsung/Kconfig b/drivers/phy/samsung/Kconfig index 0f51d3bf38cc..e20d2fcc9fe7 100644 --- a/drivers/phy/samsung/Kconfig +++ b/drivers/phy/samsung/Kconfig @@ -64,12 +64,7 @@ config PHY_EXYNOS4X12_USB2 config PHY_EXYNOS5250_USB2 bool depends on PHY_SAMSUNG_USB2 - default SOC_EXYNOS5250 - -config PHY_EXYNOS5420_USB2 - bool - depends on PHY_SAMSUNG_USB2 - default SOC_EXYNOS5420 + default SOC_EXYNOS5250 || SOC_EXYNOS5420 config PHY_S5PV210_USB2 bool "Support for S5PV210" diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c index 3908153f2ce5..ec2befabeea6 100644 --- a/drivers/phy/samsung/phy-samsung-usb2.c +++ b/drivers/phy/samsung/phy-samsung-usb2.c @@ -127,8 +127,6 @@ static const struct of_device_id samsung_usb2_phy_of_match[] = { .compatible = "samsung,exynos5250-usb2-phy", .data = &exynos5250_usb2_phy_config, }, -#endif -#ifdef CONFIG_PHY_EXYNOS5420_USB2 { .compatible = "samsung,exynos5420-usb2-phy", .data = &exynos5420_usb2_phy_config, -- cgit v1.2.3 From e04e60fce47e61743a8726d76b0149c1f4ad8957 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 24 Nov 2020 21:07:42 +0800 Subject: soundwire: master: use pm_runtime_set_active() on add The 'master' device acts as a glue layer used during bus initialization only, and it needs to be 'transparent' for pm_runtime management. Its behavior should be that it becomes active when one of its children becomes active, and suspends when all of its children are suspended. In our tests on Intel platforms, we routinely see these sort of warnings on the initial boot: [ 21.447345] rt715 sdw:3:25d:715:0: runtime PM trying to activate child device sdw:3:25d:715:0 but parent (sdw-master-3) is not active This is root-caused to a missing setup to make the device 'active' on probe. Since we don't want the device to remain active forever after the probe, the autosuspend configuration is also enabled at the end of the probe - the device will actually autosuspend only in the case where there are no devices physically attached. In practice, the master device will suspend when all its children are no longer active. Fixes: bd84256e86ecf ('soundwire: master: enable pm runtime') Signed-off-by: Pierre-Louis Bossart Reviewed-by: Rander Wang Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20201124130742.10986-1-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- drivers/soundwire/master.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers') diff --git a/drivers/soundwire/master.c b/drivers/soundwire/master.c index 3488bb824e84..9b05c9e25ebe 100644 --- a/drivers/soundwire/master.c +++ b/drivers/soundwire/master.c @@ -8,6 +8,15 @@ #include #include "bus.h" +/* + * The 3s value for autosuspend will only be used if there are no + * devices physically attached on a bus segment. In practice enabling + * the bus operation will result in children devices become active and + * the master device will only suspend when all its children are no + * longer active. + */ +#define SDW_MASTER_SUSPEND_DELAY_MS 3000 + /* * The sysfs for properties reflects the MIPI description as given * in the MIPI DisCo spec @@ -154,7 +163,12 @@ int sdw_master_device_add(struct sdw_bus *bus, struct device *parent, bus->dev = &md->dev; bus->md = md; + pm_runtime_set_autosuspend_delay(&bus->md->dev, SDW_MASTER_SUSPEND_DELAY_MS); + pm_runtime_use_autosuspend(&bus->md->dev); + pm_runtime_mark_last_busy(&bus->md->dev); + pm_runtime_set_active(&bus->md->dev); pm_runtime_enable(&bus->md->dev); + pm_runtime_idle(&bus->md->dev); device_register_err: return ret; } -- cgit v1.2.3 From ab7dd2008b29e48cff216aa984902d59b583d36d Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 24 Nov 2020 19:11:27 +0800 Subject: phy/rockchip: Make PHY_ROCKCHIP_INNO_HDMI depend on HAS_IOMEM to fix build error devm_ioremap_resource() will be not built in lib/devres.c if CONFIG_HAS_IOMEM is not set, and then there exists a build error about undefined reference to "devm_ioremap_resource" in the file phy-rockchip-inno-hdmi.c under COMPILE_TEST and CONFIG_PHY_ROCKCHIP_INNO_HDMI, make PHY_ROCKCHIP_INNO_HDMI depend on HAS_IOMEM to fix it. Reported-by: kernel test robot Signed-off-by: Tiezhu Yang Reviewed-by: Heiko Stuebner Link: https://lore.kernel.org/r/1606216287-14648-1-git-send-email-yangtiezhu@loongson.cn Signed-off-by: Vinod Koul --- drivers/phy/rockchip/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig index c2f22f90736c..159285f42e5c 100644 --- a/drivers/phy/rockchip/Kconfig +++ b/drivers/phy/rockchip/Kconfig @@ -32,6 +32,7 @@ config PHY_ROCKCHIP_INNO_HDMI tristate "Rockchip INNO HDMI PHY Driver" depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF depends on COMMON_CLK + depends on HAS_IOMEM select GENERIC_PHY help Enable this to support the Rockchip Innosilicon HDMI PHY. -- cgit v1.2.3 From 4ea6fa2cb921cb17812501a27c3761037d64a217 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Wed, 2 Dec 2020 09:12:30 +0100 Subject: mhi: pci_generic: Fix implicit conversion warning Fix the following warning with explicit cast: warning: implicit conversion from 'unsigned long long' to 'dma_addr_t' (aka 'unsigned int') mhi_cntrl->iova_stop = DMA_BIT_MASK(info->dma_data_width); Fixes: 855a70c12021 ("bus: mhi: Add MHI PCI support for WWAN modems") Signed-off-by: Loic Poulain Reported-by: kernel test robot Reviewed-by: Nathan Chancellor Reviewed-by: Manivannan Sadhasivam Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/pci_generic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c index e3df838c3c80..f5bee76ea061 100644 --- a/drivers/bus/mhi/pci_generic.c +++ b/drivers/bus/mhi/pci_generic.c @@ -273,7 +273,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mhi_cntrl_config = info->config; mhi_cntrl->cntrl_dev = &pdev->dev; mhi_cntrl->iova_start = 0; - mhi_cntrl->iova_stop = DMA_BIT_MASK(info->dma_data_width); + mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width); mhi_cntrl->fw_image = info->fw; mhi_cntrl->edl_image = info->edl; -- cgit v1.2.3 From 17e0da0b8979a53977f684813f5c9db817d170e2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 4 Dec 2020 00:04:56 +0100 Subject: soundwire: intel: fix another unused-function warning Without CONFIG_PM, there is another warning about an unused function: drivers/soundwire/intel.c:530:12: error: 'intel_link_power_down' defined but not used [-Werror=unused-function] After a previous fix, the driver already uses both an #ifdef and a __maybe_unused annotation but still gets it wrong. Remove the ifdef and instead use __maybe_unused consistently to avoid the problem for good. Fixes: f046b2334083 ("soundwire: intel: fix intel_suspend/resume defined but not used warning") Fixes: ebf878eddbb4 ("soundwire: intel: add pm_runtime support") Signed-off-by: Arnd Bergmann Reviewed-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20201203230502.1480063-1-arnd@kernel.org Signed-off-by: Vinod Koul --- drivers/soundwire/intel.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 6a1e862b16c3..66adb258a425 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -1585,8 +1585,6 @@ int intel_master_process_wakeen_event(struct platform_device *pdev) * PM calls */ -#ifdef CONFIG_PM - static int __maybe_unused intel_suspend(struct device *dev) { struct sdw_cdns *cdns = dev_get_drvdata(dev); @@ -1641,7 +1639,7 @@ static int __maybe_unused intel_suspend(struct device *dev) return 0; } -static int intel_suspend_runtime(struct device *dev) +static int __maybe_unused intel_suspend_runtime(struct device *dev) { struct sdw_cdns *cdns = dev_get_drvdata(dev); struct sdw_intel *sdw = cdns_to_intel(cdns); @@ -1796,7 +1794,7 @@ static int __maybe_unused intel_resume(struct device *dev) return ret; } -static int intel_resume_runtime(struct device *dev) +static int __maybe_unused intel_resume_runtime(struct device *dev) { struct sdw_cdns *cdns = dev_get_drvdata(dev); struct sdw_intel *sdw = cdns_to_intel(cdns); @@ -1969,8 +1967,6 @@ static int intel_resume_runtime(struct device *dev) return ret; } -#endif - static const struct dev_pm_ops intel_pm = { SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume) SET_RUNTIME_PM_OPS(intel_suspend_runtime, intel_resume_runtime, NULL) -- cgit v1.2.3 From f5f6e01f9164040ba120f30522efdb4deceb529a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 4 Dec 2020 14:56:43 +0100 Subject: phy: mediatek: allow compile-testing the hdmi phy Compile-testing the DRM_MEDIATEK_HDMI driver shows two missing dependencies, one results in a link failure: arm-linux-gnueabi-ld: drivers/phy/mediatek/phy-mtk-hdmi.o: in function `mtk_hdmi_phy_probe': phy-mtk-hdmi.c:(.text+0xd8): undefined reference to `__clk_get_name' arm-linux-gnueabi-ld: phy-mtk-hdmi.c:(.text+0x12c): undefined reference to `devm_clk_register' arm-linux-gnueabi-ld: phy-mtk-hdmi.c:(.text+0x250): undefined reference to `of_clk_add_provider' arm-linux-gnueabi-ld: phy-mtk-hdmi.c:(.text+0x298): undefined reference to `of_clk_src_simple_get' The other one is a harmless warning: WARNING: unmet direct dependencies detected for PHY_MTK_HDMI Depends on [n]: ARCH_MEDIATEK [=n] && OF [=y] Selected by [y]: - DRM_MEDIATEK_HDMI [=y] && HAS_IOMEM [=y] && DRM_MEDIATEK [=y] Fix these by adding dependencies on CONFIG_OF and CONFIG_COMMON_CLK. With that done, there is also no reason against adding CONFIG_COMPILE_TEST. Fixes: b28be59a2e26 ("phy: mediatek: Move mtk_hdmi_phy driver into drivers/phy/mediatek folder") Signed-off-by: Arnd Bergmann Link: https://lore.kernel.org/r/20201204135650.2744481-1-arnd@kernel.org Signed-off-by: Vinod Koul --- drivers/phy/mediatek/Kconfig | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig index f44800b6a948..789614116f1d 100644 --- a/drivers/phy/mediatek/Kconfig +++ b/drivers/phy/mediatek/Kconfig @@ -40,7 +40,9 @@ config PHY_MTK_XSPHY config PHY_MTK_HDMI tristate "MediaTek HDMI-PHY Driver" - depends on ARCH_MEDIATEK && OF + depends on ARCH_MEDIATEK || COMPILE_TEST + depends on COMMON_CLK + depends on OF select GENERIC_PHY help Support HDMI PHY for Mediatek SoCs. -- cgit v1.2.3 From 51e339deab1e51443f6ac3b1bd5cd6cc8e8fe1d9 Mon Sep 17 00:00:00 2001 From: Wang Li Date: Thu, 26 Nov 2020 10:44:12 +0800 Subject: phy: renesas: rcar-gen3-usb2: disable runtime pm in case of failure pm_runtime_enable() will decrease power disable depth. Thus a pairing increment is needed on the error handling path to keep it balanced. Fixes: 5d8042e95fd4 ("phy: rcar-gen3-usb2: Add support for r8a77470") Reported-by: Hulk Robot Signed-off-by: Wang Li Link: https://lore.kernel.org/r/20201126024412.4046845-1-wangli74@huawei.com Signed-off-by: Vinod Koul --- drivers/phy/renesas/phy-rcar-gen3-usb2.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index 1898bbe7c8e5..fbc55232120e 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -654,8 +654,10 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) */ pm_runtime_enable(dev); phy_usb2_ops = of_device_get_match_data(dev); - if (!phy_usb2_ops) - return -EINVAL; + if (!phy_usb2_ops) { + ret = -EINVAL; + goto error; + } mutex_init(&channel->lock); for (i = 0; i < NUM_OF_PHYS; i++) { -- cgit v1.2.3 From 8b5c2b45b8f0a11c9072da0f7baf9ee986d3151e Mon Sep 17 00:00:00 2001 From: Chris Ruehl Date: Sun, 29 Nov 2020 13:44:14 +0800 Subject: phy: rockchip: set pulldown for strobe line in dts This patch add support to set the internal pulldown via dt property and allow simplify the board design for the trace from emmc-phy to the eMMC chipset. Default to not set the pull-down. This patch was inspired from the 4.4 tree of the Rockchip SDK, where it is enabled unconditional. The patch had been tested with our rk3399 customized board. Signed-off-by: Chris Ruehl Link: https://lore.kernel.org/r/20201129054416.3980-2-chris.ruehl@gtsys.com.hk Signed-off-by: Vinod Koul --- drivers/phy/rockchip/phy-rockchip-emmc.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c index 2dc19ddd120f..48e2d75b1004 100644 --- a/drivers/phy/rockchip/phy-rockchip-emmc.c +++ b/drivers/phy/rockchip/phy-rockchip-emmc.c @@ -67,6 +67,10 @@ #define PHYCTRL_OTAPDLYENA_SHIFT 0xb #define PHYCTRL_OTAPDLYSEL_MASK 0xf #define PHYCTRL_OTAPDLYSEL_SHIFT 0x7 +#define PHYCTRL_REN_STRB_DISABLE 0x0 +#define PHYCTRL_REN_STRB_ENABLE 0x1 +#define PHYCTRL_REN_STRB_MASK 0x1 +#define PHYCTRL_REN_STRB_SHIFT 0x9 #define PHYCTRL_IS_CALDONE(x) \ ((((x) >> PHYCTRL_CALDONE_SHIFT) & \ @@ -80,6 +84,7 @@ struct rockchip_emmc_phy { struct regmap *reg_base; struct clk *emmcclk; unsigned int drive_impedance; + unsigned int enable_strobe_pulldown; }; static int rockchip_emmc_phy_power(struct phy *phy, bool on_off) @@ -295,6 +300,13 @@ static int rockchip_emmc_phy_power_on(struct phy *phy) PHYCTRL_OTAPDLYSEL_MASK, PHYCTRL_OTAPDLYSEL_SHIFT)); + /* Internal pull-down for strobe line */ + regmap_write(rk_phy->reg_base, + rk_phy->reg_offset + GRF_EMMCPHY_CON2, + HIWORD_UPDATE(rk_phy->enable_strobe_pulldown, + PHYCTRL_REN_STRB_MASK, + PHYCTRL_REN_STRB_SHIFT)); + /* Power up emmc phy analog blocks */ return rockchip_emmc_phy_power(phy, PHYCTRL_PDB_PWR_ON); } @@ -359,10 +371,14 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev) rk_phy->reg_offset = reg_offset; rk_phy->reg_base = grf; rk_phy->drive_impedance = PHYCTRL_DR_50OHM; + rk_phy->enable_strobe_pulldown = PHYCTRL_REN_STRB_DISABLE; if (!of_property_read_u32(dev->of_node, "drive-impedance-ohm", &val)) rk_phy->drive_impedance = convert_drive_impedance_ohm(pdev, val); + if (of_property_read_bool(dev->of_node, "enable-strobe-pulldown")) + rk_phy->enable_strobe_pulldown = PHYCTRL_REN_STRB_ENABLE; + generic_phy = devm_phy_create(dev, dev->of_node, &ops); if (IS_ERR(generic_phy)) { dev_err(dev, "failed to create PHY\n"); -- cgit v1.2.3 From 85e6225f401f95478ad52d77a6834a6ee1db8c32 Mon Sep 17 00:00:00 2001 From: "周琰杰 (Zhou Yanjie)" Date: Mon, 16 Nov 2020 22:19:04 +0800 Subject: USB: PHY: JZ4770: Remove unnecessary function calls. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove unnecessary "of_match_ptr()", because Ingenic SoCs all depend on Device Tree. Suggested-by: Paul Cercueil Signed-off-by: 周琰杰 (Zhou Yanjie) Reviewed-by: Paul Cercueil Link: https://lore.kernel.org/r/20201116141906.11758-2-zhouyanjie@wanyeetech.com Signed-off-by: Vinod Koul --- drivers/usb/phy/phy-jz4770.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/usb/phy/phy-jz4770.c b/drivers/usb/phy/phy-jz4770.c index f6d3731581eb..4025da20b3fd 100644 --- a/drivers/usb/phy/phy-jz4770.c +++ b/drivers/usb/phy/phy-jz4770.c @@ -350,7 +350,7 @@ static struct platform_driver ingenic_phy_driver = { .probe = jz4770_phy_probe, .driver = { .name = "jz4770-phy", - .of_match_table = of_match_ptr(ingenic_usb_phy_of_matches), + .of_match_table = ingenic_usb_phy_of_matches, }, }; module_platform_driver(ingenic_phy_driver); -- cgit v1.2.3 From 31de313dfdcf6971b0a1c30f86eabaa1eede74b3 Mon Sep 17 00:00:00 2001 From: "周琰杰 (Zhou Yanjie)" Date: Mon, 16 Nov 2020 22:19:06 +0800 Subject: PHY: Ingenic: Add USB PHY driver using generic PHY framework. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Used the generic PHY framework API to create the PHY, this driver supoorts USB OTG PHY used in JZ4770 SoC, JZ4775 SoC, JZ4780 SoC, X1000 SoC, X1830 SoC and X2000 SoC. Co-developed-by: 漆鹏振 (Qi Pengzhen) Signed-off-by: 漆鹏振 (Qi Pengzhen) Signed-off-by: 周琰杰 (Zhou Yanjie) Tested-by: 周正 (Zhou Zheng) Tested-by: H. Nikolaus Schaller Reviewed-by: Paul Cercueil Link: https://lore.kernel.org/r/20201116141906.11758-4-zhouyanjie@wanyeetech.com Signed-off-by: Vinod Koul --- drivers/phy/Kconfig | 1 + drivers/phy/Makefile | 1 + drivers/phy/ingenic/Kconfig | 12 + drivers/phy/ingenic/Makefile | 2 + drivers/phy/ingenic/phy-ingenic-usb.c | 412 ++++++++++++++++++++++++++++++++++ 5 files changed, 428 insertions(+) create mode 100644 drivers/phy/ingenic/Kconfig create mode 100644 drivers/phy/ingenic/Makefile create mode 100644 drivers/phy/ingenic/phy-ingenic-usb.c (limited to 'drivers') diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 01b53f86004c..00dabe5fab8a 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -66,6 +66,7 @@ source "drivers/phy/broadcom/Kconfig" source "drivers/phy/cadence/Kconfig" source "drivers/phy/freescale/Kconfig" source "drivers/phy/hisilicon/Kconfig" +source "drivers/phy/ingenic/Kconfig" source "drivers/phy/lantiq/Kconfig" source "drivers/phy/marvell/Kconfig" source "drivers/phy/mediatek/Kconfig" diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index 6eb2916773c5..32261e164abd 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -15,6 +15,7 @@ obj-y += allwinner/ \ cadence/ \ freescale/ \ hisilicon/ \ + ingenic/ \ intel/ \ lantiq/ \ marvell/ \ diff --git a/drivers/phy/ingenic/Kconfig b/drivers/phy/ingenic/Kconfig new file mode 100644 index 000000000000..912b14e512cb --- /dev/null +++ b/drivers/phy/ingenic/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Phy drivers for Ingenic platforms +# +config PHY_INGENIC_USB + tristate "Ingenic SoCs USB PHY Driver" + depends on MIPS || COMPILE_TEST + depends on USB_SUPPORT + select GENERIC_PHY + help + This driver provides USB PHY support for the USB controller found + on the JZ-series and X-series SoCs from Ingenic. diff --git a/drivers/phy/ingenic/Makefile b/drivers/phy/ingenic/Makefile new file mode 100644 index 000000000000..65d5ea00fc9d --- /dev/null +++ b/drivers/phy/ingenic/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y += phy-ingenic-usb.o diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c new file mode 100644 index 000000000000..4d1587d82286 --- /dev/null +++ b/drivers/phy/ingenic/phy-ingenic-usb.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Ingenic SoCs USB PHY driver + * Copyright (c) Paul Cercueil + * Copyright (c) 漆鹏振 (Qi Pengzhen) + * Copyright (c) 周琰杰 (Zhou Yanjie) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* OTGPHY register offsets */ +#define REG_USBPCR_OFFSET 0x00 +#define REG_USBRDT_OFFSET 0x04 +#define REG_USBVBFIL_OFFSET 0x08 +#define REG_USBPCR1_OFFSET 0x0c + +/* bits within the USBPCR register */ +#define USBPCR_USB_MODE BIT(31) +#define USBPCR_AVLD_REG BIT(30) +#define USBPCR_COMMONONN BIT(25) +#define USBPCR_VBUSVLDEXT BIT(24) +#define USBPCR_VBUSVLDEXTSEL BIT(23) +#define USBPCR_POR BIT(22) +#define USBPCR_SIDDQ BIT(21) +#define USBPCR_OTG_DISABLE BIT(20) +#define USBPCR_TXPREEMPHTUNE BIT(6) + +#define USBPCR_IDPULLUP_MASK GENMASK(29, 28) +#define USBPCR_IDPULLUP_ALWAYS 0x2 +#define USBPCR_IDPULLUP_SUSPEND 0x1 +#define USBPCR_IDPULLUP_OTG 0x0 + +#define USBPCR_COMPDISTUNE_MASK GENMASK(19, 17) +#define USBPCR_COMPDISTUNE_DFT 0x4 + +#define USBPCR_OTGTUNE_MASK GENMASK(16, 14) +#define USBPCR_OTGTUNE_DFT 0x4 + +#define USBPCR_SQRXTUNE_MASK GENMASK(13, 11) +#define USBPCR_SQRXTUNE_DCR_20PCT 0x7 +#define USBPCR_SQRXTUNE_DFT 0x3 + +#define USBPCR_TXFSLSTUNE_MASK GENMASK(10, 7) +#define USBPCR_TXFSLSTUNE_DCR_50PPT 0xf +#define USBPCR_TXFSLSTUNE_DCR_25PPT 0x7 +#define USBPCR_TXFSLSTUNE_DFT 0x3 +#define USBPCR_TXFSLSTUNE_INC_25PPT 0x1 +#define USBPCR_TXFSLSTUNE_INC_50PPT 0x0 + +#define USBPCR_TXHSXVTUNE_MASK GENMASK(5, 4) +#define USBPCR_TXHSXVTUNE_DFT 0x3 +#define USBPCR_TXHSXVTUNE_DCR_15MV 0x1 + +#define USBPCR_TXRISETUNE_MASK GENMASK(5, 4) +#define USBPCR_TXRISETUNE_DFT 0x3 + +#define USBPCR_TXVREFTUNE_MASK GENMASK(3, 0) +#define USBPCR_TXVREFTUNE_INC_75PPT 0xb +#define USBPCR_TXVREFTUNE_INC_25PPT 0x7 +#define USBPCR_TXVREFTUNE_DFT 0x5 + +/* bits within the USBRDTR register */ +#define USBRDT_UTMI_RST BIT(27) +#define USBRDT_HB_MASK BIT(26) +#define USBRDT_VBFIL_LD_EN BIT(25) +#define USBRDT_IDDIG_EN BIT(24) +#define USBRDT_IDDIG_REG BIT(23) +#define USBRDT_VBFIL_EN BIT(2) + +/* bits within the USBPCR1 register */ +#define USBPCR1_BVLD_REG BIT(31) +#define USBPCR1_DPPD BIT(29) +#define USBPCR1_DMPD BIT(28) +#define USBPCR1_USB_SEL BIT(28) +#define USBPCR1_PORT_RST BIT(21) +#define USBPCR1_WORD_IF_16BIT BIT(19) + +enum ingenic_usb_phy_version { + ID_JZ4770, + ID_JZ4775, + ID_JZ4780, + ID_X1000, + ID_X1830, + ID_X2000, +}; + +struct ingenic_soc_info { + enum ingenic_usb_phy_version version; + + void (*usb_phy_init)(struct phy *phy); +}; + +struct ingenic_usb_phy { + const struct ingenic_soc_info *soc_info; + + struct phy *phy; + void __iomem *base; + struct clk *clk; + struct regulator *vcc_supply; +}; + +static int ingenic_usb_phy_init(struct phy *phy) +{ + struct ingenic_usb_phy *priv = phy_get_drvdata(phy); + int err; + u32 reg; + + err = clk_prepare_enable(priv->clk); + if (err) { + dev_err(&phy->dev, "Unable to start clock: %d\n", err); + return err; + } + + priv->soc_info->usb_phy_init(phy); + + /* Wait for PHY to reset */ + usleep_range(30, 300); + reg = readl(priv->base + REG_USBPCR_OFFSET); + writel(reg & ~USBPCR_POR, priv->base + REG_USBPCR_OFFSET); + usleep_range(300, 1000); + + return 0; +} + +static int ingenic_usb_phy_exit(struct phy *phy) +{ + struct ingenic_usb_phy *priv = phy_get_drvdata(phy); + + clk_disable_unprepare(priv->clk); + regulator_disable(priv->vcc_supply); + + return 0; +} + +static int ingenic_usb_phy_power_on(struct phy *phy) +{ + struct ingenic_usb_phy *priv = phy_get_drvdata(phy); + int err; + + err = regulator_enable(priv->vcc_supply); + if (err) { + dev_err(&phy->dev, "Unable to enable VCC: %d\n", err); + return err; + } + + return 0; +} + +static int ingenic_usb_phy_power_off(struct phy *phy) +{ + struct ingenic_usb_phy *priv = phy_get_drvdata(phy); + + regulator_disable(priv->vcc_supply); + + return 0; +} + +static int ingenic_usb_phy_set_mode(struct phy *phy, + enum phy_mode mode, int submode) +{ + struct ingenic_usb_phy *priv = phy_get_drvdata(phy); + u32 reg; + + switch (mode) { + case PHY_MODE_USB_HOST: + reg = readl(priv->base + REG_USBPCR_OFFSET); + u32p_replace_bits(®, 1, USBPCR_USB_MODE); + u32p_replace_bits(®, 0, USBPCR_VBUSVLDEXT); + u32p_replace_bits(®, 0, USBPCR_VBUSVLDEXTSEL); + u32p_replace_bits(®, 0, USBPCR_OTG_DISABLE); + writel(reg, priv->base + REG_USBPCR_OFFSET); + + break; + case PHY_MODE_USB_DEVICE: + reg = readl(priv->base + REG_USBPCR_OFFSET); + u32p_replace_bits(®, 0, USBPCR_USB_MODE); + u32p_replace_bits(®, 1, USBPCR_VBUSVLDEXT); + u32p_replace_bits(®, 1, USBPCR_VBUSVLDEXTSEL); + u32p_replace_bits(®, 1, USBPCR_OTG_DISABLE); + writel(reg, priv->base + REG_USBPCR_OFFSET); + + break; + case PHY_MODE_USB_OTG: + reg = readl(priv->base + REG_USBPCR_OFFSET); + u32p_replace_bits(®, 1, USBPCR_USB_MODE); + u32p_replace_bits(®, 1, USBPCR_VBUSVLDEXT); + u32p_replace_bits(®, 1, USBPCR_VBUSVLDEXTSEL); + u32p_replace_bits(®, 0, USBPCR_OTG_DISABLE); + writel(reg, priv->base + REG_USBPCR_OFFSET); + + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct phy_ops ingenic_usb_phy_ops = { + .init = ingenic_usb_phy_init, + .exit = ingenic_usb_phy_exit, + .power_on = ingenic_usb_phy_power_on, + .power_off = ingenic_usb_phy_power_off, + .set_mode = ingenic_usb_phy_set_mode, + .owner = THIS_MODULE, +}; + +static void jz4770_usb_phy_init(struct phy *phy) +{ + struct ingenic_usb_phy *priv = phy_get_drvdata(phy); + u32 reg; + + reg = USBPCR_AVLD_REG | USBPCR_COMMONONN | USBPCR_POR | + FIELD_PREP(USBPCR_IDPULLUP_MASK, USBPCR_IDPULLUP_ALWAYS) | + FIELD_PREP(USBPCR_COMPDISTUNE_MASK, USBPCR_COMPDISTUNE_DFT) | + FIELD_PREP(USBPCR_OTGTUNE_MASK, USBPCR_OTGTUNE_DFT) | + FIELD_PREP(USBPCR_SQRXTUNE_MASK, USBPCR_SQRXTUNE_DFT) | + FIELD_PREP(USBPCR_TXFSLSTUNE_MASK, USBPCR_TXFSLSTUNE_DFT) | + FIELD_PREP(USBPCR_TXRISETUNE_MASK, USBPCR_TXRISETUNE_DFT) | + FIELD_PREP(USBPCR_TXVREFTUNE_MASK, USBPCR_TXVREFTUNE_DFT); + writel(reg, priv->base + REG_USBPCR_OFFSET); +} + +static void jz4775_usb_phy_init(struct phy *phy) +{ + struct ingenic_usb_phy *priv = phy_get_drvdata(phy); + u32 reg; + + reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_USB_SEL | + USBPCR1_WORD_IF_16BIT; + writel(reg, priv->base + REG_USBPCR1_OFFSET); + + reg = USBPCR_COMMONONN | USBPCR_POR | + FIELD_PREP(USBPCR_TXVREFTUNE_MASK, USBPCR_TXVREFTUNE_INC_75PPT); + writel(reg, priv->base + REG_USBPCR_OFFSET); +} + +static void jz4780_usb_phy_init(struct phy *phy) +{ + struct ingenic_usb_phy *priv = phy_get_drvdata(phy); + u32 reg; + + reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_USB_SEL | + USBPCR1_WORD_IF_16BIT; + writel(reg, priv->base + REG_USBPCR1_OFFSET); + + reg = USBPCR_TXPREEMPHTUNE | USBPCR_COMMONONN | USBPCR_POR; + writel(reg, priv->base + REG_USBPCR_OFFSET); +} + +static void x1000_usb_phy_init(struct phy *phy) +{ + struct ingenic_usb_phy *priv = phy_get_drvdata(phy); + u32 reg; + + reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_WORD_IF_16BIT; + writel(reg, priv->base + REG_USBPCR1_OFFSET); + + reg = USBPCR_TXPREEMPHTUNE | USBPCR_COMMONONN | USBPCR_POR | + FIELD_PREP(USBPCR_SQRXTUNE_MASK, USBPCR_SQRXTUNE_DCR_20PCT) | + FIELD_PREP(USBPCR_TXHSXVTUNE_MASK, USBPCR_TXHSXVTUNE_DCR_15MV) | + FIELD_PREP(USBPCR_TXVREFTUNE_MASK, USBPCR_TXVREFTUNE_INC_25PPT); + writel(reg, priv->base + REG_USBPCR_OFFSET); +} + +static void x1830_usb_phy_init(struct phy *phy) +{ + struct ingenic_usb_phy *priv = phy_get_drvdata(phy); + u32 reg; + + /* rdt */ + writel(USBRDT_VBFIL_EN | USBRDT_UTMI_RST, priv->base + REG_USBRDT_OFFSET); + + reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_WORD_IF_16BIT | + USBPCR1_DMPD | USBPCR1_DPPD; + writel(reg, priv->base + REG_USBPCR1_OFFSET); + + reg = USBPCR_VBUSVLDEXT | USBPCR_TXPREEMPHTUNE | USBPCR_COMMONONN | USBPCR_POR | + FIELD_PREP(USBPCR_IDPULLUP_MASK, USBPCR_IDPULLUP_OTG); + writel(reg, priv->base + REG_USBPCR_OFFSET); +} + +static void x2000_usb_phy_init(struct phy *phy) +{ + struct ingenic_usb_phy *priv = phy_get_drvdata(phy); + u32 reg; + + reg = readl(priv->base + REG_USBPCR1_OFFSET) | USBPCR1_DPPD | USBPCR1_DMPD; + writel(reg & ~USBPCR1_PORT_RST, priv->base + REG_USBPCR1_OFFSET); + + reg = USBPCR_POR | FIELD_PREP(USBPCR_IDPULLUP_MASK, USBPCR_IDPULLUP_OTG); + writel(reg, priv->base + REG_USBPCR_OFFSET); +} + +static const struct ingenic_soc_info jz4770_soc_info = { + .version = ID_JZ4770, + + .usb_phy_init = jz4770_usb_phy_init, +}; + +static const struct ingenic_soc_info jz4775_soc_info = { + .version = ID_JZ4775, + + .usb_phy_init = jz4775_usb_phy_init, +}; + +static const struct ingenic_soc_info jz4780_soc_info = { + .version = ID_JZ4780, + + .usb_phy_init = jz4780_usb_phy_init, +}; + +static const struct ingenic_soc_info x1000_soc_info = { + .version = ID_X1000, + + .usb_phy_init = x1000_usb_phy_init, +}; + +static const struct ingenic_soc_info x1830_soc_info = { + .version = ID_X1830, + + .usb_phy_init = x1830_usb_phy_init, +}; + +static const struct ingenic_soc_info x2000_soc_info = { + .version = ID_X2000, + + .usb_phy_init = x2000_usb_phy_init, +}; + +static int ingenic_usb_phy_probe(struct platform_device *pdev) +{ + struct ingenic_usb_phy *priv; + struct phy_provider *provider; + struct device *dev = &pdev->dev; + int err; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->soc_info = device_get_match_data(dev); + if (!priv->soc_info) { + dev_err(dev, "Error: No device match found\n"); + return -ENODEV; + } + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) { + dev_err(dev, "Failed to map registers\n"); + return PTR_ERR(priv->base); + } + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + err = PTR_ERR(priv->clk); + if (err != -EPROBE_DEFER) + dev_err(dev, "Failed to get clock\n"); + return err; + } + + priv->vcc_supply = devm_regulator_get(dev, "vcc"); + if (IS_ERR(priv->vcc_supply)) { + err = PTR_ERR(priv->vcc_supply); + if (err != -EPROBE_DEFER) + dev_err(dev, "Failed to get regulator\n"); + return err; + } + + priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops); + if (IS_ERR(priv)) + return PTR_ERR(priv); + + phy_set_drvdata(priv->phy, priv); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(provider); +} + +static const struct of_device_id ingenic_usb_phy_of_matches[] = { + { .compatible = "ingenic,jz4770-phy", .data = &jz4770_soc_info }, + { .compatible = "ingenic,jz4775-phy", .data = &jz4775_soc_info }, + { .compatible = "ingenic,jz4780-phy", .data = &jz4780_soc_info }, + { .compatible = "ingenic,x1000-phy", .data = &x1000_soc_info }, + { .compatible = "ingenic,x1830-phy", .data = &x1830_soc_info }, + { .compatible = "ingenic,x2000-phy", .data = &x2000_soc_info }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ingenic_usb_phy_of_matches); + +static struct platform_driver ingenic_usb_phy_driver = { + .probe = ingenic_usb_phy_probe, + .driver = { + .name = "ingenic-usb-phy", + .of_match_table = ingenic_usb_phy_of_matches, + }, +}; +module_platform_driver(ingenic_usb_phy_driver); + +MODULE_AUTHOR("周琰杰 (Zhou Yanjie) "); +MODULE_AUTHOR("漆鹏振 (Qi Pengzhen) "); +MODULE_AUTHOR("Paul Cercueil "); +MODULE_DESCRIPTION("Ingenic SoCs USB PHY driver"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From a8cef928276bbf8254d0067cda21dbe6210ea1aa Mon Sep 17 00:00:00 2001 From: Chris Ruehl Date: Wed, 2 Dec 2020 16:25:06 +0800 Subject: phy: rockchip-emmc: output tap delay dt property Update the rockchip-emmc phy to set the otapdlysec register with a dt property. This was mentioned from Brian Norris when he sent the path to set the default value in the driver. This patch add a dt property 'output-tapdelay-select' u32 and allow to set the 0x0-0xf. If not set in dts, the old default 0x4 applies. Tested with our customized rk3399 to tune the eMMC. Signed-off-by: Chris Ruehl Link: https://lore.kernel.org/r/20201202082507.3536-2-chris.ruehl@gtsys.com.hk Signed-off-by: Vinod Koul --- drivers/phy/rockchip/phy-rockchip-emmc.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c index 48e2d75b1004..1e424f263e7a 100644 --- a/drivers/phy/rockchip/phy-rockchip-emmc.c +++ b/drivers/phy/rockchip/phy-rockchip-emmc.c @@ -65,6 +65,8 @@ #define PHYCTRL_OTAPDLYENA 0x1 #define PHYCTRL_OTAPDLYENA_MASK 0x1 #define PHYCTRL_OTAPDLYENA_SHIFT 0xb +#define PHYCTRL_OTAPDLYSEL_DEFAULT 0x4 +#define PHYCTRL_OTAPDLYSEL_MAXVALUE 0xf #define PHYCTRL_OTAPDLYSEL_MASK 0xf #define PHYCTRL_OTAPDLYSEL_SHIFT 0x7 #define PHYCTRL_REN_STRB_DISABLE 0x0 @@ -85,6 +87,7 @@ struct rockchip_emmc_phy { struct clk *emmcclk; unsigned int drive_impedance; unsigned int enable_strobe_pulldown; + unsigned int output_tapdelay_select; }; static int rockchip_emmc_phy_power(struct phy *phy, bool on_off) @@ -296,7 +299,7 @@ static int rockchip_emmc_phy_power_on(struct phy *phy) /* Output tap delay */ regmap_write(rk_phy->reg_base, rk_phy->reg_offset + GRF_EMMCPHY_CON0, - HIWORD_UPDATE(4, + HIWORD_UPDATE(rk_phy->output_tapdelay_select, PHYCTRL_OTAPDLYSEL_MASK, PHYCTRL_OTAPDLYSEL_SHIFT)); @@ -372,6 +375,7 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev) rk_phy->reg_base = grf; rk_phy->drive_impedance = PHYCTRL_DR_50OHM; rk_phy->enable_strobe_pulldown = PHYCTRL_REN_STRB_DISABLE; + rk_phy->output_tapdelay_select = PHYCTRL_OTAPDLYSEL_DEFAULT; if (!of_property_read_u32(dev->of_node, "drive-impedance-ohm", &val)) rk_phy->drive_impedance = convert_drive_impedance_ohm(pdev, val); @@ -379,6 +383,13 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev) if (of_property_read_bool(dev->of_node, "enable-strobe-pulldown")) rk_phy->enable_strobe_pulldown = PHYCTRL_REN_STRB_ENABLE; + if (!of_property_read_u32(dev->of_node, "output-tapdelay-select", &val)) { + if (val <= PHYCTRL_OTAPDLYSEL_MAXVALUE) + rk_phy->output_tapdelay_select = val; + else + dev_err(dev, "output-tapdelay-select exceeds limit, apply default\n"); + } + generic_phy = devm_phy_create(dev, dev->of_node, &ops); if (IS_ERR(generic_phy)) { dev_err(dev, "failed to create PHY\n"); -- cgit v1.2.3 From 18b648322d44e8d8950de8aa31dba9bad0a33b89 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sat, 5 Dec 2020 14:41:46 +0530 Subject: phy: mediatek: statify mtk_hdmi_phy_driver mtk_hdmi_phy_driver is not declared as static, so statify it. drivers/phy/mediatek/phy-mtk-hdmi.c:204:24: warning: symbol 'mtk_hdmi_phy_driver' was not declared. Should it be static? Acked-by: Chun-Kuang Hu Link: https://lore.kernel.org/r/20201205091146.3184305-1-vkoul@kernel.org Signed-off-by: Vinod Koul --- drivers/phy/mediatek/phy-mtk-hdmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c index 47c029d4b270..c5c61f5a9ea0 100644 --- a/drivers/phy/mediatek/phy-mtk-hdmi.c +++ b/drivers/phy/mediatek/phy-mtk-hdmi.c @@ -201,7 +201,7 @@ static const struct of_device_id mtk_hdmi_phy_match[] = { {}, }; -struct platform_driver mtk_hdmi_phy_driver = { +static struct platform_driver mtk_hdmi_phy_driver = { .probe = mtk_hdmi_phy_probe, .driver = { .name = "mediatek-hdmi-phy", -- cgit v1.2.3 From 85261c1ff156eb60fc26c378748387f2e85c6878 Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Mon, 16 Nov 2020 14:56:11 +0200 Subject: mei: bus: add vtag support Add API to support vtag in communication on mei bus. Add mei_cldev_send_vtag, mei_cldev_recv_vtag and mei_cldev_recv_nonblock_vtag functions to allow sending a message with vtag set and to receive vtag of an incoming message. Cc: Sean Z Huang Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20201116125612.1660971-1-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus-fixup.c | 13 +++--- drivers/misc/mei/bus.c | 101 +++++++++++++++++++++++++++++++++++-------- drivers/misc/mei/client.c | 6 ++- drivers/misc/mei/mei_dev.h | 4 +- include/linux/mei_cl_bus.h | 6 +++ 5 files changed, 104 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 4e30fa98fe7d..6cc3145bb716 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -148,7 +148,7 @@ static int mei_osver(struct mei_cl_device *cldev) os_ver = (struct mei_os_ver *)fwcaps->data; os_ver->os_type = OSTYPE_LINUX; - return __mei_cl_send(cldev->cl, buf, size, mode); + return __mei_cl_send(cldev->cl, buf, size, 0, mode); } #define MKHI_FWVER_BUF_LEN (sizeof(struct mkhi_msg_hdr) + \ @@ -169,7 +169,7 @@ static int mei_fwver(struct mei_cl_device *cldev) req.hdr.group_id = MKHI_GEN_GROUP_ID; req.hdr.command = MKHI_GEN_GET_FW_VERSION_CMD; - ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), + ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0, MEI_CL_IO_TX_BLOCKING); if (ret < 0) { dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n"); @@ -177,7 +177,7 @@ static int mei_fwver(struct mei_cl_device *cldev) } ret = 0; - bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), 0, + bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), NULL, 0, MKHI_RCV_TIMEOUT); if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) { /* @@ -324,13 +324,15 @@ static int mei_nfc_if_version(struct mei_cl *cl, }; struct mei_nfc_reply *reply = NULL; size_t if_version_length; + u8 vtag; int bytes_recv, ret; bus = cl->dev; WARN_ON(mutex_is_locked(&bus->device_lock)); - ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), MEI_CL_IO_TX_BLOCKING); + ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), 0, + MEI_CL_IO_TX_BLOCKING); if (ret < 0) { dev_err(bus->dev, "Could not send IF version cmd\n"); return ret; @@ -344,7 +346,8 @@ static int mei_nfc_if_version(struct mei_cl *cl, return -ENOMEM; ret = 0; - bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, 0, 0); + bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, &vtag, + 0, 0); if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) { dev_err(bus->dev, "Could not read IF version\n"); ret = -EIO; diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index 7fe48baa103a..2907db260fba 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -26,11 +26,12 @@ * @cl: host client * @buf: buffer to send * @length: buffer length + * @vtag: virtual tag * @mode: sending mode * * Return: written size bytes or < 0 on error */ -ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, +ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag, unsigned int mode) { struct mei_device *bus; @@ -86,6 +87,7 @@ ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, rets = -ENOMEM; goto out; } + cb->vtag = vtag; cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL); cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING); @@ -106,11 +108,12 @@ out: * @buf: buffer to receive * @length: buffer length * @mode: io mode + * @vtag: virtual tag * @timeout: recv timeout, 0 for infinite timeout * * Return: read size in bytes of < 0 on error */ -ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, +ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag, unsigned int mode, unsigned long timeout) { struct mei_device *bus; @@ -196,6 +199,8 @@ copy: r_length = min_t(size_t, length, cb->buf_idx); memcpy(buf, cb->buf.data, r_length); rets = r_length; + if (vtag) + *vtag = cb->vtag; free: mei_cl_del_rd_completed(cl, cb); @@ -206,40 +211,87 @@ out: } /** - * mei_cldev_send - me device send (write) + * mei_cldev_send_vtag - me device send with vtag (write) * * @cldev: me client device * @buf: buffer to send * @length: buffer length + * @vtag: virtual tag * - * Return: written size in bytes or < 0 on error + * Return: + * * written size in bytes + * * < 0 on error */ -ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length) + +ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, + u8 vtag) { struct mei_cl *cl = cldev->cl; - return __mei_cl_send(cl, buf, length, MEI_CL_IO_TX_BLOCKING); + return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING); } -EXPORT_SYMBOL_GPL(mei_cldev_send); +EXPORT_SYMBOL_GPL(mei_cldev_send_vtag); /** - * mei_cldev_recv_nonblock - non block client receive (read) + * mei_cldev_recv_vtag - client receive with vtag (read) * * @cldev: me client device * @buf: buffer to receive * @length: buffer length + * @vtag: virtual tag * - * Return: read size in bytes of < 0 on error - * -EAGAIN if function will block. + * Return: + * * read size in bytes + * * < 0 on error */ -ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, - size_t length) + +ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, + u8 *vtag) { struct mei_cl *cl = cldev->cl; - return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK, 0); + return __mei_cl_recv(cl, buf, length, vtag, 0, 0); } -EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); +EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag); + +/** + * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read) + * + * @cldev: me client device + * @buf: buffer to receive + * @length: buffer length + * @vtag: virtual tag + * + * Return: + * * read size in bytes + * * -EAGAIN if function will block. + * * < 0 on other error + */ +ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, + size_t length, u8 *vtag) +{ + struct mei_cl *cl = cldev->cl; + + return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0); +} +EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag); + +/** + * mei_cldev_send - me device send (write) + * + * @cldev: me client device + * @buf: buffer to send + * @length: buffer length + * + * Return: + * * written size in bytes + * * < 0 on error + */ +ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length) +{ + return mei_cldev_send_vtag(cldev, buf, length, 0); +} +EXPORT_SYMBOL_GPL(mei_cldev_send); /** * mei_cldev_recv - client receive (read) @@ -252,12 +304,27 @@ EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); */ ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length) { - struct mei_cl *cl = cldev->cl; - - return __mei_cl_recv(cl, buf, length, 0, 0); + return mei_cldev_recv_vtag(cldev, buf, length, NULL); } EXPORT_SYMBOL_GPL(mei_cldev_recv); +/** + * mei_cldev_recv_nonblock - non block client receive (read) + * + * @cldev: me client device + * @buf: buffer to receive + * @length: buffer length + * + * Return: read size in bytes of < 0 on error + * -EAGAIN if function will block. + */ +ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, + size_t length) +{ + return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL); +} +EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock); + /** * mei_cl_bus_rx_work - dispatch rx event for a bus device * diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index d5c3f7d54634..a56d41321f32 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -1306,7 +1306,7 @@ struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag) * mei_cl_fp_by_vtag - obtain the file pointer by vtag * * @cl: host client - * @vtag: vm tag + * @vtag: virtual tag * * Return: * * A file pointer - on success @@ -1317,7 +1317,9 @@ const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag) struct mei_cl_vtag *vtag_l; list_for_each_entry(vtag_l, &cl->vtag_map, list) - if (vtag_l->vtag == vtag) + /* The client on bus has one fixed fp */ + if ((cl->cldev && mei_cldev_enabled(cl->cldev)) || + vtag_l->vtag == vtag) return vtag_l->fp; return ERR_PTR(-ENOENT); diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index 2f4cc1a8aae8..8c395bfdf6f3 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h @@ -340,9 +340,9 @@ struct mei_hw_ops { /* MEI bus API*/ void mei_cl_bus_rescan_work(struct work_struct *work); void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); -ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, +ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length, u8 vtag, unsigned int mode); -ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, +ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag, unsigned int mode, unsigned long timeout); bool mei_cl_bus_rx_event(struct mei_cl *cl); bool mei_cl_bus_notify_event(struct mei_cl *cl); diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 52aa4821093a..959ad7d850b4 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -95,6 +95,12 @@ ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, + u8 vtag); +ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, + u8 *vtag); +ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, + size_t length, u8 *vtag); int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, -- cgit v1.2.3 From ee64ed8153abf6668d662ba451ecf539cad63017 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Mon, 16 Nov 2020 14:56:12 +0200 Subject: mei: bus: enable pavp device. Enable protected audio video path client on mei client bus. Cc: Sean Z Huang Signed-off-by: Tomas Winkler Link: https://lore.kernel.org/r/20201116125612.1660971-2-tomas.winkler@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus-fixup.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 6cc3145bb716..d8e760b11ae3 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -33,6 +33,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO; #define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, \ 0xA5, 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04) +#define MEI_UUID_PAVP UUID_LE(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \ + 0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1) + #define MEI_UUID_ANY NULL_UUID_LE /** @@ -491,6 +494,7 @@ static struct mei_fixup { MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix), MEI_FIXUP(MEI_UUID_HDCP, whitelist), MEI_FIXUP(MEI_UUID_ANY, vt_support), + MEI_FIXUP(MEI_UUID_PAVP, whitelist), }; /** -- cgit v1.2.3 From 1dfd7b7849ea8bdbbe26d280d7b43c4ae66e730f Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Tue, 8 Dec 2020 10:23:00 +0530 Subject: phy: ingenic: depend on HAS_IOMEM The driver uses devm_ioremap_resource() which will not be built if CONFIG_HAS_IOMEM is not selected, so add depends on it to fix the build failure on few archs s390-linux-ld: drivers/phy/ingenic/phy-ingenic-usb.o: in function `ingenic_usb_phy_probe': >> phy-ingenic-usb.c:(.text+0xb66): undefined reference to `devm_platform_ioremap_resource' Reported-by: kernel test robot Link: https://lore.kernel.org/r/20201208045300.3637026-1-vkoul@kernel.org Signed-off-by: Vinod Koul --- drivers/phy/ingenic/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/phy/ingenic/Kconfig b/drivers/phy/ingenic/Kconfig index 912b14e512cb..f23cc109324b 100644 --- a/drivers/phy/ingenic/Kconfig +++ b/drivers/phy/ingenic/Kconfig @@ -6,6 +6,7 @@ config PHY_INGENIC_USB tristate "Ingenic SoCs USB PHY Driver" depends on MIPS || COMPILE_TEST depends on USB_SUPPORT + depends on HAS_IOMEM select GENERIC_PHY help This driver provides USB PHY support for the USB controller found -- cgit v1.2.3 From b097efba9580d1f7cbc80cda84e768983e3de541 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 7 Dec 2020 15:09:37 +0000 Subject: drm/mediatek: avoid dereferencing a null hdmi_phy on an error message Currently there is a null pointer check for hdmi_phy that implies it may be null, however a dev_err messages dereferences this potential null pointer. Avoid a null pointer dereference by only emitting the dev_err message if hdmi_phy is non-null. It is a moot point if the error message needs to be printed at all, but since this is a relatively new piece of code it may be useful to keep the message in for the moment in case there are unforseen errors that need to be reported. Fixes: be28b6507c46 ("drm/mediatek: separate hdmi phy to different file") Signed-off-by: Colin Ian King Addresses-Coverity: ("Dereference after null check") Link: https://lore.kernel.org/r/20201207150937.170435-1-colin.king@canonical.com [vkoul: fix indent of return call] Signed-off-by: Vinod Koul --- drivers/phy/mediatek/phy-mtk-hdmi.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c index c5c61f5a9ea0..45be8aa724f3 100644 --- a/drivers/phy/mediatek/phy-mtk-hdmi.c +++ b/drivers/phy/mediatek/phy-mtk-hdmi.c @@ -84,8 +84,9 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy) hdmi_phy->conf->hdmi_phy_disable_tmds) return &mtk_hdmi_phy_dev_ops; - dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n"); - return NULL; + if (hdmi_phy) + dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n"); + return NULL; } static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy, -- cgit v1.2.3 From 83be0b84fe846edf0c722fefe225482d5f0d7395 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Tue, 8 Dec 2020 11:26:49 -0700 Subject: coresight: tmc-etr: Fix barrier packet insertion for perf buffer When the ETR is used in perf mode with a larger buffer (configured via sysfs or the default size of 1M) than the perf aux buffer size, we end up inserting the barrier packet at the wrong offset, while moving the offset forward. i.e, instead of the "new moved offset", we insert it at the current hardware buffer offset. These packets will not be visible as they are never copied and could lead to corruption in the trace decoding side, as the decoder is not aware that it needs to reset the decoding. Fixes: ec13c78d7b45 ("coresight: tmc-etr: Add barrier packets when moving offset forward") Cc: Mathieu Poirier Cc: stable@vger.kernel.org Reported-by: Al Grant Tested-by: Mike Leach Signed-off-by: Suzuki K Poulose Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201208182651.1597945-2-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-tmc-etr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index a31a4d7ae25e..bf5230e39c5b 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -1552,7 +1552,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev, /* Insert barrier packets at the beginning, if there was an overflow */ if (lost) - tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset); + tmc_etr_buf_insert_barrier_packet(etr_buf, offset); tmc_etr_sync_perf_buffer(etr_perf, offset, size); /* -- cgit v1.2.3 From 45fe7befe0db5e61cd3c846315f0ac48541e8445 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 8 Dec 2020 11:26:50 -0700 Subject: coresight: remove broken __exit annotations Functions that are annotated __exit are discarded for built-in drivers, but the .remove callback in a device driver must still be kept around to allow bind/unbind operations. There is now a linker warning for the discarded symbol references: `tmc_remove' referenced in section `.data' of drivers/hwtracing/coresight/coresight-tmc-core.o: defined in discarded section `.exit.text' of drivers/hwtracing/coresight/coresight-tmc-core.o `tpiu_remove' referenced in section `.data' of drivers/hwtracing/coresight/coresight-tpiu.o: defined in discarded section `.exit.text' of drivers/hwtracing/coresight/coresight-tpiu.o `etb_remove' referenced in section `.data' of drivers/hwtracing/coresight/coresight-etb10.o: defined in discarded section `.exit.text' of drivers/hwtracing/coresight/coresight-etb10.o `static_funnel_remove' referenced in section `.data' of drivers/hwtracing/coresight/coresight-funnel.o: defined in discarded section `.exit.text' of drivers/hwtracing/coresight/coresight-funnel.o `dynamic_funnel_remove' referenced in section `.data' of drivers/hwtracing/coresight/coresight-funnel.o: defined in discarded section `.exit.text' of drivers/hwtracing/coresight/coresight-funnel.o `static_replicator_remove' referenced in section `.data' of drivers/hwtracing/coresight/coresight-replicator.o: defined in discarded section `.exit.text' of drivers/hwtracing/coresight/coresight-replicator.o `dynamic_replicator_remove' referenced in section `.data' of drivers/hwtracing/coresight/coresight-replicator.o: defined in discarded section `.exit.text' of drivers/hwtracing/coresight/coresight-replicator.o `catu_remove' referenced in section `.data' of drivers/hwtracing/coresight/coresight-catu.o: defined in discarded section `.exit.text' of drivers/hwtracing/coresight/coresight-catu.o Remove all those annotations. Fixes: 8b0cf82677d1 ("coresight: stm: Allow to build coresight-stm as a module") Reviewed-by: Stephen Boyd Signed-off-by: Arnd Bergmann Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201208182651.1597945-3-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-catu.c | 2 +- drivers/hwtracing/coresight/coresight-cti-core.c | 2 +- drivers/hwtracing/coresight/coresight-etb10.c | 2 +- drivers/hwtracing/coresight/coresight-etm3x-core.c | 4 ++-- drivers/hwtracing/coresight/coresight-etm4x-core.c | 4 ++-- drivers/hwtracing/coresight/coresight-funnel.c | 6 +++--- drivers/hwtracing/coresight/coresight-replicator.c | 6 +++--- drivers/hwtracing/coresight/coresight-stm.c | 2 +- drivers/hwtracing/coresight/coresight-tmc-core.c | 2 +- drivers/hwtracing/coresight/coresight-tpiu.c | 2 +- 10 files changed, 16 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c index 99430f6cf5a5..a61313f320bd 100644 --- a/drivers/hwtracing/coresight/coresight-catu.c +++ b/drivers/hwtracing/coresight/coresight-catu.c @@ -567,7 +567,7 @@ out: return ret; } -static int __exit catu_remove(struct amba_device *adev) +static int catu_remove(struct amba_device *adev) { struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev); diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c index d28eae93e55c..61dbc1afd8da 100644 --- a/drivers/hwtracing/coresight/coresight-cti-core.c +++ b/drivers/hwtracing/coresight/coresight-cti-core.c @@ -836,7 +836,7 @@ static void cti_device_release(struct device *dev) if (drvdata->csdev_release) drvdata->csdev_release(dev); } -static int __exit cti_remove(struct amba_device *adev) +static int cti_remove(struct amba_device *adev) { struct cti_drvdata *drvdata = dev_get_drvdata(&adev->dev); diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c index 1b320ab581ca..0cf6f0b947b6 100644 --- a/drivers/hwtracing/coresight/coresight-etb10.c +++ b/drivers/hwtracing/coresight/coresight-etb10.c @@ -803,7 +803,7 @@ err_misc_register: return ret; } -static int __exit etb_remove(struct amba_device *adev) +static int etb_remove(struct amba_device *adev) { struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev); diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c index 47f610b1c2b1..5bf5a5a4ce6d 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c @@ -902,14 +902,14 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) return 0; } -static void __exit clear_etmdrvdata(void *info) +static void clear_etmdrvdata(void *info) { int cpu = *(int *)info; etmdrvdata[cpu] = NULL; } -static int __exit etm_remove(struct amba_device *adev) +static int etm_remove(struct amba_device *adev) { struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev); diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index d78a37b6592c..3ad5c2a01033 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -1575,14 +1575,14 @@ static struct amba_cs_uci_id uci_id_etm4[] = { } }; -static void __exit clear_etmdrvdata(void *info) +static void clear_etmdrvdata(void *info) { int cpu = *(int *)info; etmdrvdata[cpu] = NULL; } -static int __exit etm4_remove(struct amba_device *adev) +static int etm4_remove(struct amba_device *adev) { struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev); diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index 39be46b74dfe..071c723227db 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -274,7 +274,7 @@ out_disable_clk: return ret; } -static int __exit funnel_remove(struct device *dev) +static int funnel_remove(struct device *dev) { struct funnel_drvdata *drvdata = dev_get_drvdata(dev); @@ -328,7 +328,7 @@ static int static_funnel_probe(struct platform_device *pdev) return ret; } -static int __exit static_funnel_remove(struct platform_device *pdev) +static int static_funnel_remove(struct platform_device *pdev) { funnel_remove(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -370,7 +370,7 @@ static int dynamic_funnel_probe(struct amba_device *adev, return funnel_probe(&adev->dev, &adev->res); } -static int __exit dynamic_funnel_remove(struct amba_device *adev) +static int dynamic_funnel_remove(struct amba_device *adev) { return funnel_remove(&adev->dev); } diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 6772f23e5c4b..7e2a2b7f503f 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -291,7 +291,7 @@ out_disable_clk: return ret; } -static int __exit replicator_remove(struct device *dev) +static int replicator_remove(struct device *dev) { struct replicator_drvdata *drvdata = dev_get_drvdata(dev); @@ -318,7 +318,7 @@ static int static_replicator_probe(struct platform_device *pdev) return ret; } -static int __exit static_replicator_remove(struct platform_device *pdev) +static int static_replicator_remove(struct platform_device *pdev) { replicator_remove(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -388,7 +388,7 @@ static int dynamic_replicator_probe(struct amba_device *adev, return replicator_probe(&adev->dev, &adev->res); } -static int __exit dynamic_replicator_remove(struct amba_device *adev) +static int dynamic_replicator_remove(struct amba_device *adev) { return replicator_remove(&adev->dev); } diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c index 32d29704206b..99791773f682 100644 --- a/drivers/hwtracing/coresight/coresight-stm.c +++ b/drivers/hwtracing/coresight/coresight-stm.c @@ -951,7 +951,7 @@ stm_unregister: return ret; } -static int __exit stm_remove(struct amba_device *adev) +static int stm_remove(struct amba_device *adev) { struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev); diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c index 5653e0945c74..8169dff5a9f6 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-core.c +++ b/drivers/hwtracing/coresight/coresight-tmc-core.c @@ -559,7 +559,7 @@ out: spin_unlock_irqrestore(&drvdata->spinlock, flags); } -static int __exit tmc_remove(struct amba_device *adev) +static int tmc_remove(struct amba_device *adev) { struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev); diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 010762a46087..d5dfee9ee556 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -173,7 +173,7 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id) return PTR_ERR(drvdata->csdev); } -static int __exit tpiu_remove(struct amba_device *adev) +static int tpiu_remove(struct amba_device *adev) { struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev); -- cgit v1.2.3 From e72550928ff052ca721777875bd23a7abf3efb13 Mon Sep 17 00:00:00 2001 From: Qi Liu Date: Tue, 8 Dec 2020 11:26:51 -0700 Subject: coresight: etm4x: Modify core-commit to avoid HiSilicon ETM overflow The ETM device can't keep up with the core pipeline when cpu core is at full speed. This may cause overflow within core and its ETM. This is a common phenomenon on ETM devices. On HiSilicon Hip08 platform, a specific feature is added to set core pipeline. So commit rate can be reduced manually to avoid ETM overflow. Reviewed-by: Suzuki K Poulose Signed-off-by: Qi Liu [Modified changelog title and Kconfig description] Signed-off-by: Mathieu Poirier Link: https://lore.kernel.org/r/20201208182651.1597945-4-mathieu.poirier@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/Kconfig | 8 ++ drivers/hwtracing/coresight/coresight-etm4x-core.c | 98 ++++++++++++++++++++++ drivers/hwtracing/coresight/coresight-etm4x.h | 8 ++ 3 files changed, 114 insertions(+) (limited to 'drivers') diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index c1198245461d..7b44ba22cbe1 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig @@ -110,6 +110,14 @@ config CORESIGHT_SOURCE_ETM4X To compile this driver as a module, choose M here: the module will be called coresight-etm4x. +config ETM4X_IMPDEF_FEATURE + bool "Control implementation defined overflow support in ETM 4.x driver" + depends on CORESIGHT_SOURCE_ETM4X + help + This control provides implementation define control for CoreSight + ETM 4.x tracer module that can't reduce commit rate automatically. + This avoids overflow between the ETM tracer module and the cpu core. + config CORESIGHT_STM tristate "CoreSight System Trace Macrocell driver" depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64 diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 3ad5c2a01033..b20b6ff17cf6 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -3,6 +3,7 @@ * Copyright (c) 2014, The Linux Foundation. All rights reserved. */ +#include #include #include #include @@ -28,7 +29,9 @@ #include #include #include + #include +#include #include #include @@ -103,6 +106,97 @@ struct etm4_enable_arg { int rc; }; +#ifdef CONFIG_ETM4X_IMPDEF_FEATURE + +#define HISI_HIP08_AMBA_ID 0x000b6d01 +#define ETM4_AMBA_MASK 0xfffff +#define HISI_HIP08_CORE_COMMIT_MASK 0x3000 +#define HISI_HIP08_CORE_COMMIT_SHIFT 12 +#define HISI_HIP08_CORE_COMMIT_FULL 0b00 +#define HISI_HIP08_CORE_COMMIT_LVL_1 0b01 +#define HISI_HIP08_CORE_COMMIT_REG sys_reg(3, 1, 15, 2, 5) + +struct etm4_arch_features { + void (*arch_callback)(bool enable); +}; + +static bool etm4_hisi_match_pid(unsigned int id) +{ + return (id & ETM4_AMBA_MASK) == HISI_HIP08_AMBA_ID; +} + +static void etm4_hisi_config_core_commit(bool enable) +{ + u8 commit = enable ? HISI_HIP08_CORE_COMMIT_LVL_1 : + HISI_HIP08_CORE_COMMIT_FULL; + u64 val; + + /* + * bit 12 and 13 of HISI_HIP08_CORE_COMMIT_REG are used together + * to set core-commit, 2'b00 means cpu is at full speed, 2'b01, + * 2'b10, 2'b11 mean reduce pipeline speed, and 2'b01 means level-1 + * speed(minimun value). So bit 12 and 13 should be cleared together. + */ + val = read_sysreg_s(HISI_HIP08_CORE_COMMIT_REG); + val &= ~HISI_HIP08_CORE_COMMIT_MASK; + val |= commit << HISI_HIP08_CORE_COMMIT_SHIFT; + write_sysreg_s(val, HISI_HIP08_CORE_COMMIT_REG); +} + +static struct etm4_arch_features etm4_features[] = { + [ETM4_IMPDEF_HISI_CORE_COMMIT] = { + .arch_callback = etm4_hisi_config_core_commit, + }, + {}, +}; + +static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata) +{ + struct etm4_arch_features *ftr; + int bit; + + for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) { + ftr = &etm4_features[bit]; + + if (ftr->arch_callback) + ftr->arch_callback(true); + } +} + +static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata) +{ + struct etm4_arch_features *ftr; + int bit; + + for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) { + ftr = &etm4_features[bit]; + + if (ftr->arch_callback) + ftr->arch_callback(false); + } +} + +static void etm4_check_arch_features(struct etmv4_drvdata *drvdata, + unsigned int id) +{ + if (etm4_hisi_match_pid(id)) + set_bit(ETM4_IMPDEF_HISI_CORE_COMMIT, drvdata->arch_features); +} +#else +static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata) +{ +} + +static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata) +{ +} + +static void etm4_check_arch_features(struct etmv4_drvdata *drvdata, + unsigned int id) +{ +} +#endif /* CONFIG_ETM4X_IMPDEF_FEATURE */ + static int etm4_enable_hw(struct etmv4_drvdata *drvdata) { int i, rc; @@ -110,6 +204,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) struct device *etm_dev = &drvdata->csdev->dev; CS_UNLOCK(drvdata->base); + etm4_enable_arch_specific(drvdata); etm4_os_unlock(drvdata); @@ -479,6 +574,7 @@ static void etm4_disable_hw(void *info) int i; CS_UNLOCK(drvdata->base); + etm4_disable_arch_specific(drvdata); if (!drvdata->skip_power_up) { /* power can be removed from the trace unit now */ @@ -1563,6 +1659,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) drvdata->boot_enable = true; } + etm4_check_arch_features(drvdata, id->id); + return 0; } diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index eefc7371c6c4..3dd3e0633328 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -8,6 +8,7 @@ #include #include +#include #include "coresight-priv.h" /* @@ -203,6 +204,11 @@ /* Interpretation of resource numbers change at ETM v4.3 architecture */ #define ETM4X_ARCH_4V3 0x43 +enum etm_impdef_type { + ETM4_IMPDEF_HISI_CORE_COMMIT, + ETM4_IMPDEF_FEATURE_MAX, +}; + /** * struct etmv4_config - configuration information related to an ETMv4 * @mode: Controls various modes supported by this ETM. @@ -415,6 +421,7 @@ struct etmv4_save_state { * @state_needs_restore: True when there is context to restore after PM exit * @skip_power_up: Indicates if an implementation can skip powering up * the trace unit. + * @arch_features: Bitmap of arch features of etmv4 devices. */ struct etmv4_drvdata { void __iomem *base; @@ -463,6 +470,7 @@ struct etmv4_drvdata { struct etmv4_save_state *save_state; bool state_needs_restore; bool skip_power_up; + DECLARE_BITMAP(arch_features, ETM4_IMPDEF_FEATURE_MAX); }; /* Address comparator access types */ -- cgit v1.2.3 From d1b928ee1cfa965a3327bbaa59bfa005d97fa0fe Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Tue, 17 Nov 2020 09:22:29 +0800 Subject: speakup: fix uninitialized flush_lock The flush_lock is uninitialized, use DEFINE_SPINLOCK to define and initialize flush_lock. Fixes: c6e3fd22cd53 ("Staging: add speakup to the staging directory") Reported-by: Hulk Robot Reviewed-by: Samuel Thibault Signed-off-by: Yang Yingliang Link: https://lore.kernel.org/r/20201117012229.3395186-1-yangyingliang@huawei.com Signed-off-by: Greg Kroah-Hartman --- drivers/accessibility/speakup/speakup_dectlk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c index 780214b5ca16..ab6d61e80b1c 100644 --- a/drivers/accessibility/speakup/speakup_dectlk.c +++ b/drivers/accessibility/speakup/speakup_dectlk.c @@ -37,7 +37,7 @@ static unsigned char get_index(struct spk_synth *synth); static int in_escape; static int is_flushing; -static spinlock_t flush_lock; +static DEFINE_SPINLOCK(flush_lock); static DECLARE_WAIT_QUEUE_HEAD(flush); static struct var_t vars[] = { -- cgit v1.2.3 From 0f966cba95c78029f491b433ea95ff38f414a761 Mon Sep 17 00:00:00 2001 From: Todd Kjos Date: Fri, 20 Nov 2020 15:37:43 -0800 Subject: binder: add flag to clear buffer on txn complete Add a per-transaction flag to indicate that the buffer must be cleared when the transaction is complete to prevent copies of sensitive data from being preserved in memory. Signed-off-by: Todd Kjos Link: https://lore.kernel.org/r/20201120233743.3617529-1-tkjos@google.com Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 1 + drivers/android/binder_alloc.c | 48 +++++++++++++++++++++++++++++++++++++ drivers/android/binder_alloc.h | 4 +++- include/uapi/linux/android/binder.h | 1 + 4 files changed, 53 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 20b08f52e788..1338209f9f86 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2756,6 +2756,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; + t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); trace_binder_transaction_alloc_buf(t->buffer); if (binder_alloc_copy_user_to_buffer( diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 2f846b7ae8b8..7caf74ad2405 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -696,6 +696,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_insert_free_buffer(alloc, buffer); } +static void binder_alloc_clear_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer); /** * binder_alloc_free_buf() - free a binder buffer * @alloc: binder_alloc for this proc @@ -706,6 +708,18 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, void binder_alloc_free_buf(struct binder_alloc *alloc, struct binder_buffer *buffer) { + /* + * We could eliminate the call to binder_alloc_clear_buf() + * from binder_alloc_deferred_release() by moving this to + * binder_alloc_free_buf_locked(). However, that could + * increase contention for the alloc mutex if clear_on_free + * is used frequently for large buffers. The mutex is not + * needed for correctness here. + */ + if (buffer->clear_on_free) { + binder_alloc_clear_buf(alloc, buffer); + buffer->clear_on_free = false; + } mutex_lock(&alloc->mutex); binder_free_buf_locked(alloc, buffer); mutex_unlock(&alloc->mutex); @@ -802,6 +816,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) /* Transaction should already have been freed */ BUG_ON(buffer->transaction); + if (buffer->clear_on_free) { + binder_alloc_clear_buf(alloc, buffer); + buffer->clear_on_free = false; + } binder_free_buf_locked(alloc, buffer); buffers++; } @@ -1135,6 +1153,36 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc, return lru_page->page_ptr; } +/** + * binder_alloc_clear_buf() - zero out buffer + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be cleared + * + * memset the given buffer to 0 + */ +static void binder_alloc_clear_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + size_t bytes = binder_alloc_buffer_size(alloc, buffer); + binder_size_t buffer_offset = 0; + + while (bytes) { + unsigned long size; + struct page *page; + pgoff_t pgoff; + void *kptr; + + page = binder_alloc_get_page(alloc, buffer, + buffer_offset, &pgoff); + size = min_t(size_t, bytes, PAGE_SIZE - pgoff); + kptr = kmap(page) + pgoff; + memset(kptr, 0, size); + kunmap(page); + bytes -= size; + buffer_offset += size; + } +} + /** * binder_alloc_copy_user_to_buffer() - copy src user to tgt user * @alloc: binder_alloc for this proc diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 55d8b4106766..6e8e001381af 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -23,6 +23,7 @@ struct binder_transaction; * @entry: entry alloc->buffers * @rb_node: node for allocated_buffers/free_buffers rb trees * @free: %true if buffer is free + * @clear_on_free: %true if buffer must be zeroed after use * @allow_user_free: %true if user is allowed to free buffer * @async_transaction: %true if buffer is in use for an async txn * @debug_id: unique ID for debugging @@ -41,9 +42,10 @@ struct binder_buffer { struct rb_node rb_node; /* free entry by size or allocated entry */ /* by address */ unsigned free:1; + unsigned clear_on_free:1; unsigned allow_user_free:1; unsigned async_transaction:1; - unsigned debug_id:29; + unsigned debug_id:28; struct binder_transaction *transaction; diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index f1ce2c4c077e..ec84ad106568 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -248,6 +248,7 @@ enum transaction_flags { TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ + TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */ }; struct binder_transaction_data { -- cgit v1.2.3 From 54da51a841ea4c9b6cbac60ef85f55b4695a9612 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 4 Dec 2020 19:22:50 +0000 Subject: firmware: fix a spelling mistake "managament" -> "management" in Kconfig There is a spelling mistake in the Kconfig help text. Fix it. Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20201204192250.1151316-1-colin.king@canonical.com Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 3315e3c21586..3f14dffb9669 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -51,7 +51,7 @@ config ARM_SCPI_PROTOCOL provides a mechanism for inter-processor communication between SCP and AP. - SCP controls most of the power managament on the Application + SCP controls most of the power management on the Application Processors. It offers control and management of: the core/cluster power states, various power domain DVFS including the core/cluster, certain system clocks configuration, thermal sensors and many -- cgit v1.2.3 From 3f618ab3323407ee4c6a6734a37eb6e9663ebfb9 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 7 Dec 2020 17:05:33 +0000 Subject: lkdtm: don't move ctors to .rodata When building with KASAN and LKDTM, clang may implictly generate an asan.module_ctor function in the LKDTM rodata object. The Makefile moves the lkdtm_rodata_do_nothing() function into .rodata by renaming the file's .text section to .rodata, and consequently also moves the ctor function into .rodata, leading to a boot time crash (splat below) when the ctor is invoked by do_ctors(). Let's prevent this by marking the function as noinstr rather than notrace, and renaming the file's .noinstr.text to .rodata. Marking the function as noinstr will prevent tracing and kprobes, and will inhibit any undesireable compiler instrumentation. The ctor function (if any) will be placed in .text and will work correctly. Example splat before this patch is applied: [ 0.916359] Unable to handle kernel execute from non-executable memory at virtual address ffffa0006b60f5ac [ 0.922088] Mem abort info: [ 0.922828] ESR = 0x8600000e [ 0.923635] EC = 0x21: IABT (current EL), IL = 32 bits [ 0.925036] SET = 0, FnV = 0 [ 0.925838] EA = 0, S1PTW = 0 [ 0.926714] swapper pgtable: 4k pages, 48-bit VAs, pgdp=00000000427b3000 [ 0.928489] [ffffa0006b60f5ac] pgd=000000023ffff003, p4d=000000023ffff003, pud=000000023fffe003, pmd=0068000042000f01 [ 0.931330] Internal error: Oops: 8600000e [#1] PREEMPT SMP [ 0.932806] Modules linked in: [ 0.933617] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.10.0-rc7 #2 [ 0.935620] Hardware name: linux,dummy-virt (DT) [ 0.936924] pstate: 40400005 (nZcv daif +PAN -UAO -TCO BTYPE=--) [ 0.938609] pc : asan.module_ctor+0x0/0x14 [ 0.939759] lr : do_basic_setup+0x4c/0x70 [ 0.940889] sp : ffff27b600177e30 [ 0.941815] x29: ffff27b600177e30 x28: 0000000000000000 [ 0.943306] x27: 0000000000000000 x26: 0000000000000000 [ 0.944803] x25: 0000000000000000 x24: 0000000000000000 [ 0.946289] x23: 0000000000000001 x22: 0000000000000000 [ 0.947777] x21: ffffa0006bf4a890 x20: ffffa0006befb6c0 [ 0.949271] x19: ffffa0006bef9358 x18: 0000000000000068 [ 0.950756] x17: fffffffffffffff8 x16: 0000000000000000 [ 0.952246] x15: 0000000000000000 x14: 0000000000000000 [ 0.953734] x13: 00000000838a16d5 x12: 0000000000000001 [ 0.955223] x11: ffff94000da74041 x10: dfffa00000000000 [ 0.956715] x9 : 0000000000000000 x8 : ffffa0006b60f5ac [ 0.958199] x7 : f9f9f9f9f9f9f9f9 x6 : 000000000000003f [ 0.959683] x5 : 0000000000000040 x4 : 0000000000000000 [ 0.961178] x3 : ffffa0006bdc15a0 x2 : 0000000000000005 [ 0.962662] x1 : 00000000000000f9 x0 : ffffa0006bef9350 [ 0.964155] Call trace: [ 0.964844] asan.module_ctor+0x0/0x14 [ 0.965895] kernel_init_freeable+0x158/0x198 [ 0.967115] kernel_init+0x14/0x19c [ 0.968104] ret_from_fork+0x10/0x30 [ 0.969110] Code: 00000003 00000000 00000000 00000000 (00000000) [ 0.970815] ---[ end trace b5339784e20d015c ]--- Cc: Arnd Bergmann Cc: Greg Kroah-Hartman Cc: Kees Cook Acked-by: Kees Cook Signed-off-by: Mark Rutland Link: https://lore.kernel.org/r/20201207170533.10738-1-mark.rutland@arm.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/lkdtm/Makefile | 2 +- drivers/misc/lkdtm/rodata.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index c70b3822013f..30c8ac24635d 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -16,7 +16,7 @@ KCOV_INSTRUMENT_rodata.o := n OBJCOPYFLAGS := OBJCOPYFLAGS_rodata_objcopy.o := \ - --rename-section .text=.rodata,alloc,readonly,load + --rename-section .noinstr.text=.rodata,alloc,readonly,load targets += rodata.o rodata_objcopy.o $(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE $(call if_changed,objcopy) diff --git a/drivers/misc/lkdtm/rodata.c b/drivers/misc/lkdtm/rodata.c index 58d180af72cf..baacb876d1d9 100644 --- a/drivers/misc/lkdtm/rodata.c +++ b/drivers/misc/lkdtm/rodata.c @@ -5,7 +5,7 @@ */ #include "lkdtm.h" -void notrace lkdtm_rodata_do_nothing(void) +void noinstr lkdtm_rodata_do_nothing(void) { /* Does nothing. We just want an architecture agnostic "return". */ } -- cgit v1.2.3 From a73a0712745300f17480e729cef4422cb9c9c2df Mon Sep 17 00:00:00 2001 From: Zhou Xingxing Date: Mon, 7 Dec 2020 17:49:05 +0800 Subject: misc: isl29003: Fix typo for get/set mode The operation of get/set mode was same with get/set resolution. It is a typo absolutely. This patch updates these bits operated by get/set mode. Signed-off-by: Zhou Xingxing Link: https://lore.kernel.org/r/1607334545-2091-1-git-send-email-zhou_x1@hoperun.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/isl29003.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c index c12406f610d5..703d20e83ebd 100644 --- a/drivers/misc/isl29003.c +++ b/drivers/misc/isl29003.c @@ -127,13 +127,13 @@ static int isl29003_set_resolution(struct i2c_client *client, int res) static int isl29003_get_mode(struct i2c_client *client) { return __isl29003_read_reg(client, ISL29003_REG_COMMAND, - ISL29003_RES_MASK, ISL29003_RES_SHIFT); + ISL29003_MODE_MASK, ISL29003_MODE_SHIFT); } static int isl29003_set_mode(struct i2c_client *client, int mode) { return __isl29003_write_reg(client, ISL29003_REG_COMMAND, - ISL29003_RES_MASK, ISL29003_RES_SHIFT, mode); + ISL29003_MODE_MASK, ISL29003_MODE_SHIFT, mode); } /* power_state */ -- cgit v1.2.3 From 997754f114efeb290310ea8e39c8b04c2ce93961 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 19 Nov 2020 11:31:51 +0100 Subject: misc/sgi-xp: Replace in_interrupt() usage The usage of in_interrupt() in xpc_partition_disengaged() is clearly intended to avoid canceling the timeout timer when the function is invoked from the timer callback. While in_interrupt() is deprecated and ill defined as it does not provide what the name suggests it catches the intended case. Add an argument to xpc_partition_disengaged() which is true if called from timer and otherwise false. Use del_timer_sync() instead of del_singleshot_timer_sync() which is the same thing. Note: This does not prevent reentrancy into the function as the function has no concurrency control and timer callback and regular task context callers can happen concurrently on different CPUs or the timer can interrupt the task context before it is able to cancel it. While the only driver which is providing the arch_xpc_ops callbacks (xpc_uv) seems not to have a reentrancy problem and the only negative effect would be a double dev_info() entry in dmesg, the whole mechanism is conceptually broken. But that's not subject of this cleanup endeavour and left as an exercise to the folks who might have interest to make that code fully correct. [bigeasy: Add the argument, use del_timer_sync().] Cc: Greg Kroah-Hartman Cc: Cliff Whickman Cc: Arnd Bergmann Cc: Robin Holt Cc: Steve Wahl Cc: Dimitri Sivanich Cc: Russ Anderson Reviewed-by: Steve Wahl Signed-off-by: Thomas Gleixner Signed-off-by: Sebastian Andrzej Siewior Link: https://lore.kernel.org/r/20201119103151.ppo45mj53ulbxjx4@linutronix.de Signed-off-by: Greg Kroah-Hartman --- drivers/misc/sgi-xp/xpc.h | 1 + drivers/misc/sgi-xp/xpc_main.c | 2 +- drivers/misc/sgi-xp/xpc_partition.c | 20 +++++++++++++++----- 3 files changed, 17 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 71db60edff65..225f2bb84e39 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -634,6 +634,7 @@ extern int xpc_setup_rsvd_page(void); extern void xpc_teardown_rsvd_page(void); extern int xpc_identify_activate_IRQ_sender(void); extern int xpc_partition_disengaged(struct xpc_partition *); +extern int xpc_partition_disengaged_from_timer(struct xpc_partition *part); extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); extern void xpc_mark_partition_inactive(struct xpc_partition *); extern void xpc_discovery(void); diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index e5244fc1dab3..84610bbcc131 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -179,7 +179,7 @@ xpc_timeout_partition_disengage(struct timer_list *t) DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); - (void)xpc_partition_disengaged(part); + xpc_partition_disengaged_from_timer(part); DBUG_ON(part->disengage_timeout != 0); DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 57df06820bae..1999d02923de 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -262,8 +262,8 @@ xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, * from us. Though we requested the remote partition to deactivate with regard * to us, we really only need to wait for the other side to disengage from us. */ -int -xpc_partition_disengaged(struct xpc_partition *part) +static int __xpc_partition_disengaged(struct xpc_partition *part, + bool from_timer) { short partid = XPC_PARTID(part); int disengaged; @@ -289,9 +289,9 @@ xpc_partition_disengaged(struct xpc_partition *part) } part->disengage_timeout = 0; - /* cancel the timer function, provided it's not us */ - if (!in_interrupt()) - del_singleshot_timer_sync(&part->disengage_timer); + /* Cancel the timer function if not called from it */ + if (!from_timer) + del_timer_sync(&part->disengage_timer); DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING && part->act_state != XPC_P_AS_INACTIVE); @@ -303,6 +303,16 @@ xpc_partition_disengaged(struct xpc_partition *part) return disengaged; } +int xpc_partition_disengaged(struct xpc_partition *part) +{ + return __xpc_partition_disengaged(part, false); +} + +int xpc_partition_disengaged_from_timer(struct xpc_partition *part) +{ + return __xpc_partition_disengaged(part, true); +} + /* * Mark specified partition as active. */ -- cgit v1.2.3 From 31dcb6c30a26d32650ce134820f27de3c675a45a Mon Sep 17 00:00:00 2001 From: Anant Thazhemadam Date: Mon, 23 Nov 2020 04:15:34 +0530 Subject: misc: vmw_vmci: fix kernel info-leak by initializing dbells in vmci_ctx_get_chkpt_doorbells() A kernel-infoleak was reported by syzbot, which was caused because dbells was left uninitialized. Using kzalloc() instead of kmalloc() fixes this issue. Reported-by: syzbot+a79e17c39564bedf0930@syzkaller.appspotmail.com Tested-by: syzbot+a79e17c39564bedf0930@syzkaller.appspotmail.com Signed-off-by: Anant Thazhemadam Link: https://lore.kernel.org/r/20201122224534.333471-1-anant.thazhemadam@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_context.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c index 16695366ec92..26ff49fdf0f7 100644 --- a/drivers/misc/vmw_vmci/vmci_context.c +++ b/drivers/misc/vmw_vmci/vmci_context.c @@ -743,7 +743,7 @@ static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, return VMCI_ERROR_MORE_DATA; } - dbells = kmalloc(data_size, GFP_ATOMIC); + dbells = kzalloc(data_size, GFP_ATOMIC); if (!dbells) return VMCI_ERROR_NO_MEM; -- cgit v1.2.3 From d928061c3143de36c17650ce7b60760fefb8336c Mon Sep 17 00:00:00 2001 From: Ricky Wu Date: Wed, 2 Dec 2020 14:31:24 +0800 Subject: misc: rtsx: modify en/disable aspm function enable/disable device ASPM function: changed write ASPM configuration directly to use write register Signed-off-by: Ricky Wu Link: https://lore.kernel.org/r/20201202063124.18262-1-ricky_wu@realtek.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rtsx_pcr.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 5d15607027e9..c87f791bdcb5 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -89,9 +89,15 @@ static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable) if (pcr->aspm_enabled == enable) return; - pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_ASPMC, - enable ? pcr->aspm_en : 0); + if (pcr->aspm_en & 0x02) + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 | + FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1); + else + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 | + FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1); + + if (!enable && (pcr->aspm_en & 0x02)) + mdelay(10); pcr->aspm_enabled = enable; } -- cgit v1.2.3 From 121e9c6b5c4cad63e078f1fc5890f265521d2994 Mon Sep 17 00:00:00 2001 From: Ricky Wu Date: Wed, 2 Dec 2020 14:32:28 +0800 Subject: misc: rtsx: modify and fix init_hw function changed rtsx_pci_disable_aspm() to rtsx_disable_aspm() do not access ASPM configuration directly changed pcie_capability_write_word() to _clear_and_set_word() make sure only change PCI_EXP_LNKCTL bit8 make sure ASPM disable after extra_init_hw() Signed-off-by: Ricky Wu Link: https://lore.kernel.org/r/20201202063228.18319-1-ricky_wu@realtek.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rtsx_pcr.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index c87f791bdcb5..3612063cab09 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1289,7 +1289,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) /* Wait SSC power stable */ udelay(200); - rtsx_pci_disable_aspm(pcr); + rtsx_disable_aspm(pcr); if (pcr->ops->optimize_phy) { err = pcr->ops->optimize_phy(pcr); if (err < 0) @@ -1363,8 +1363,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) rtsx_pci_init_ocp(pcr); /* Enable clk_request_n to enable clock power management */ - pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, - PCI_EXP_LNKCTL_CLKREQ_EN); + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + 0, PCI_EXP_LNKCTL_CLKREQ_EN); /* Enter L1 when host tx idle */ pci_write_config_byte(pdev, 0x70F, 0x5B); @@ -1374,6 +1374,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) return err; } + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30); + /* No CD interrupt if probing driver with card inserted. * So we need to initialize pcr->card_exist here. */ -- cgit v1.2.3 From 5b4258f6721f41b092c63f6ee71be76e9616718b Mon Sep 17 00:00:00 2001 From: Ricky Wu Date: Wed, 2 Dec 2020 14:58:57 +0800 Subject: misc: rtsx: rts5249 support runtime PM rtsx_pcr: add callback functions to support runtime PM add delay_work to put device to D3 after idle over 10 sec rts5249: add extra init flow for rtd3 and set rtd3_en from config setting rtsx_pci_sdmmc: child device support autosuspend Signed-off-by: Ricky Wu Link: https://lore.kernel.org/r/20201202065857.19412-1-ricky_wu@realtek.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rts5249.c | 26 +++++++-- drivers/misc/cardreader/rtsx_pcr.c | 106 ++++++++++++++++++++++++++++++++++++- drivers/misc/cardreader/rtsx_pcr.h | 1 + drivers/mmc/host/rtsx_pci_sdmmc.c | 16 ++++++ include/linux/rtsx_pci.h | 2 + 5 files changed, 145 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index b85279f1fc5e..b2676e7f5027 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -73,6 +73,9 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr) pci_read_config_dword(pdev, PCR_SETTING_REG2, ®); pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg); + + pcr->rtd3_en = rtsx_reg_to_rtd3_uhsii(reg); + if (rtsx_check_mmc_support(reg)) pcr->extra_caps |= EXTRA_CAPS_NO_MMC; pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg); @@ -278,15 +281,28 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr) rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF); - if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { + if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN); - rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00); - rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20); + + if (pcr->rtd3_en) { + if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { + rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x01); + rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x30); + } else { + rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x01); + rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x33); + } } else { - rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x30); - rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x00); + if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) { + rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00); + rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20); + } else { + rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x30); + rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x00); + } } + /* * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced * to drive low, and we forcibly request clock. diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 3612063cab09..2700d1997750 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include "rtsx_pcr.h" #include "rts5261.h" @@ -150,6 +152,12 @@ void rtsx_pci_start_run(struct rtsx_pcr *pcr) if (pcr->remove_pci) return; + if (pcr->rtd3_en) + if (pcr->is_runtime_suspended) { + pm_runtime_get(&(pcr->pci->dev)); + pcr->is_runtime_suspended = false; + } + if (pcr->state != PDEV_STAT_RUN) { pcr->state = PDEV_STAT_RUN; if (pcr->ops->enable_auto_blink) @@ -1081,6 +1089,16 @@ static void rtsx_pm_power_saving(struct rtsx_pcr *pcr) rtsx_comm_pm_power_saving(pcr); } +static void rtsx_pci_rtd3_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, rtd3_work); + + pcr_dbg(pcr, "--> %s\n", __func__); + if (!pcr->is_runtime_suspended) + pm_runtime_put(&(pcr->pci->dev)); +} + static void rtsx_pci_idle_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); @@ -1100,6 +1118,9 @@ static void rtsx_pci_idle_work(struct work_struct *work) rtsx_pm_power_saving(pcr); mutex_unlock(&pcr->pcr_mutex); + + if (pcr->rtd3_en) + mod_delayed_work(system_wq, &pcr->rtd3_work, msecs_to_jiffies(10000)); } static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) @@ -1579,6 +1600,15 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, rtsx_pcr_cells[i].platform_data = handle; rtsx_pcr_cells[i].pdata_size = sizeof(*handle); } + + if (pcr->rtd3_en) { + INIT_DELAYED_WORK(&pcr->rtd3_work, rtsx_pci_rtd3_work); + pm_runtime_allow(&pcidev->dev); + pm_runtime_enable(&pcidev->dev); + pcr->is_runtime_suspended = false; + } + + ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells, ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL); if (ret < 0) @@ -1616,6 +1646,9 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) struct pcr_handle *handle = pci_get_drvdata(pcidev); struct rtsx_pcr *pcr = handle->pcr; + if (pcr->rtd3_en) + pm_runtime_get_noresume(&pcr->pci->dev); + pcr->remove_pci = true; /* Disable interrupts at the pcr level */ @@ -1626,6 +1659,8 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) cancel_delayed_work_sync(&pcr->carddet_work); cancel_delayed_work_sync(&pcr->idle_work); + if (pcr->rtd3_en) + cancel_delayed_work_sync(&pcr->rtd3_work); mfd_remove_devices(&pcidev->dev); @@ -1643,6 +1678,11 @@ static void rtsx_pci_remove(struct pci_dev *pcidev) idr_remove(&rtsx_pci_idr, pcr->id); spin_unlock(&rtsx_pci_lock); + if (pcr->rtd3_en) { + pm_runtime_disable(&pcr->pci->dev); + pm_runtime_put_noidle(&pcr->pci->dev); + } + kfree(pcr->slots); kfree(pcr); kfree(handle); @@ -1724,13 +1764,77 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev) pci_disable_msi(pcr->pci); } +static int rtsx_pci_runtime_suspend(struct device *device) +{ + struct pci_dev *pcidev = to_pci_dev(device); + struct pcr_handle *handle; + struct rtsx_pcr *pcr; + + handle = pci_get_drvdata(pcidev); + pcr = handle->pcr; + dev_dbg(&(pcidev->dev), "--> %s\n", __func__); + + cancel_delayed_work(&pcr->carddet_work); + cancel_delayed_work(&pcr->rtd3_work); + cancel_delayed_work(&pcr->idle_work); + + mutex_lock(&pcr->pcr_mutex); + rtsx_pci_power_off(pcr, HOST_ENTER_S3); + + free_irq(pcr->irq, (void *)pcr); + + mutex_unlock(&pcr->pcr_mutex); + + pcr->is_runtime_suspended = true; + + return 0; +} + +static int rtsx_pci_runtime_resume(struct device *device) +{ + struct pci_dev *pcidev = to_pci_dev(device); + struct pcr_handle *handle; + struct rtsx_pcr *pcr; + int ret = 0; + + handle = pci_get_drvdata(pcidev); + pcr = handle->pcr; + dev_dbg(&(pcidev->dev), "--> %s\n", __func__); + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00); + rtsx_pci_acquire_irq(pcr); + synchronize_irq(pcr->irq); + + if (pcr->ops->fetch_vendor_settings) + pcr->ops->fetch_vendor_settings(pcr); + + rtsx_pci_init_hw(pcr); + + if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) { + pcr->slots[RTSX_SD_CARD].card_event( + pcr->slots[RTSX_SD_CARD].p_dev); + } + + schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); + + mutex_unlock(&pcr->pcr_mutex); + return ret; +} + #else /* CONFIG_PM */ #define rtsx_pci_shutdown NULL +#define rtsx_pci_runtime_suspend NULL +#define rtsx_pic_runtime_resume NULL #endif /* CONFIG_PM */ -static SIMPLE_DEV_PM_OPS(rtsx_pci_pm_ops, rtsx_pci_suspend, rtsx_pci_resume); +static const struct dev_pm_ops rtsx_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume) + SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, NULL) +}; static struct pci_driver rtsx_pci_driver = { .name = DRV_NAME_RTSX_PCI, diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h index fe5f4ca0f937..daf057c4eea6 100644 --- a/drivers/misc/cardreader/rtsx_pcr.h +++ b/drivers/misc/cardreader/rtsx_pcr.h @@ -90,6 +90,7 @@ static inline u8 map_sd_drive(int idx) #define rtsx_check_mmc_support(reg) ((reg) & 0x10) #define rtsx_reg_to_rtd3(reg) ((reg) & 0x02) +#define rtsx_reg_to_rtd3_uhsii(reg) ((reg) & 0x04) #define rtsx_reg_to_aspm(reg) (((reg) >> 28) & 0x03) #define rtsx_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 26) & 0x03) #define rtsx_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x03) diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index eb395e144207..a7b5ad17bcf5 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -20,6 +20,7 @@ #include #include #include +#include struct realtek_pci_sdmmc { struct platform_device *pdev; @@ -1343,6 +1344,7 @@ static void init_extra_caps(struct realtek_pci_sdmmc *host) static void realtek_init_host(struct realtek_pci_sdmmc *host) { struct mmc_host *mmc = host->mmc; + struct rtsx_pcr *pcr = host->pcr; mmc->f_min = 250000; mmc->f_max = 208000000; @@ -1350,6 +1352,8 @@ static void realtek_init_host(struct realtek_pci_sdmmc *host) mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; + if (pcr->rtd3_en) + mmc->caps = mmc->caps | MMC_CAP_AGGRESSIVE_PM; mmc->caps2 = MMC_CAP2_NO_PRESCAN_POWERUP | MMC_CAP2_FULL_PWR_CYCLE; mmc->max_current_330 = 400; mmc->max_current_180 = 800; @@ -1407,6 +1411,13 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) realtek_init_host(host); + if (pcr->rtd3_en) { + pm_runtime_set_autosuspend_delay(&pdev->dev, 5000); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + } + + mmc_add_host(mmc); return 0; @@ -1426,6 +1437,11 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) pcr->slots[RTSX_SD_CARD].card_event = NULL; mmc = host->mmc; + if (pcr->rtd3_en) { + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); + } + cancel_work_sync(&host->work); mutex_lock(&host->host_mutex); diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 745f5e73f99a..f895ccabbe29 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -1174,6 +1174,7 @@ struct rtsx_pcr { struct delayed_work carddet_work; struct delayed_work idle_work; + struct delayed_work rtd3_work; spinlock_t lock; struct mutex pcr_mutex; @@ -1183,6 +1184,7 @@ struct rtsx_pcr { unsigned int cur_clock; bool remove_pci; bool msi_en; + bool is_runtime_suspended; #define EXTRA_CAPS_SD_SDR50 (1 << 0) #define EXTRA_CAPS_SD_SDR104 (1 << 1) -- cgit v1.2.3 From 505b08777d78868e6c47051a4e7f011718a3aba1 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 1 Dec 2020 22:01:47 +0100 Subject: misc: genwqe: Use dma_set_mask_and_coherent to simplify code 'pci_set_dma_mask()' + 'pci_set_consistent_dma_mask()' can be replaced by an equivalent 'dma_set_mask_and_coherent()' which is much less verbose. Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/20201201210147.7917-1-christophe.jaillet@wanadoo.fr Signed-off-by: Greg Kroah-Hartman --- drivers/misc/genwqe/card_base.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c index c9b886618071..2e1befbd1ad9 100644 --- a/drivers/misc/genwqe/card_base.c +++ b/drivers/misc/genwqe/card_base.c @@ -1089,24 +1089,9 @@ static int genwqe_pci_setup(struct genwqe_dev *cd) } /* check for 64-bit DMA address supported (DAC) */ - if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { - err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pci_dev->dev, - "err: DMA64 consistent mask error\n"); - err = -EIO; - goto out_release_resources; - } /* check for 32-bit DMA address supported (SAC) */ - } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { - err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pci_dev->dev, - "err: DMA32 consistent mask error\n"); - err = -EIO; - goto out_release_resources; - } - } else { + if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) || + dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) { dev_err(&pci_dev->dev, "err: neither DMA32 nor DMA64 supported\n"); err = -EIO; -- cgit v1.2.3 From 1749c90489f2afa6b59dbf3ab59d58a9014c84a1 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Thu, 19 Nov 2020 20:49:18 +0800 Subject: misc: pci_endpoint_test: fix return value of error branch We return 'err' in the error branch, but this variable may be set as zero before. Fix it by setting 'err' as a negative value before we goto the error label. Fixes: e03327122e2c ("pci_endpoint_test: Add 2 ioctl commands") Reported-by: Hulk Robot Signed-off-by: Xiongfeng Wang Link: https://lore.kernel.org/r/1605790158-6780-1-git-send-email-wangxiongfeng2@huawei.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/pci_endpoint_test.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index ea9c2c085298..eff481ce08ee 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -811,8 +811,10 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, pci_set_master(pdev); - if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) + if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) { + err = -EINVAL; goto err_disable_irq; + } for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { @@ -849,8 +851,10 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, goto err_ida_remove; } - if (!pci_endpoint_test_request_irq(test)) + if (!pci_endpoint_test_request_irq(test)) { + err = -EINVAL; goto err_kfree_test_name; + } misc_device = &test->miscdev; misc_device->minor = MISC_DYNAMIC_MINOR; -- cgit v1.2.3 From 660745a569463da96595a3287af02461882b86e6 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 6 Dec 2020 08:13:52 +0100 Subject: vme: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'ca91cx42_alloc_consistent()' and 'tsi148_alloc_consistent()' GFP_KERNEL can be used because both functions are called only from 'vme_alloc_consistent()' (vme.c). This function is only called from the 'vme_user_probe()' probe function and no lock is taken in the between. When memory is allocated in 'ca91cx42_crcsr_init()' and 'tsi148_crcsr_init()' GFP_KERNEL can be used because both functions are called only from their corresponding probe function and no lock is taken in the between. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/20201206071352.21949-1-christophe.jaillet@wanadoo.fr Signed-off-by: Greg Kroah-Hartman --- drivers/vme/bridges/vme_ca91cx42.c | 13 +++++++------ drivers/vme/bridges/vme_tsi148.c | 13 +++++++------ 2 files changed, 14 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c index ea938dc29c5e..439b0edeca08 100644 --- a/drivers/vme/bridges/vme_ca91cx42.c +++ b/drivers/vme/bridges/vme_ca91cx42.c @@ -1510,7 +1510,7 @@ static void *ca91cx42_alloc_consistent(struct device *parent, size_t size, /* Find pci_dev container of dev */ pdev = to_pci_dev(parent); - return pci_alloc_consistent(pdev, size, dma); + return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL); } static void ca91cx42_free_consistent(struct device *parent, size_t size, @@ -1521,7 +1521,7 @@ static void ca91cx42_free_consistent(struct device *parent, size_t size, /* Find pci_dev container of dev */ pdev = to_pci_dev(parent); - pci_free_consistent(pdev, size, vaddr, dma); + dma_free_coherent(&pdev->dev, size, vaddr, dma); } /* @@ -1555,8 +1555,9 @@ static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge, } /* Allocate mem for CR/CSR image */ - bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE, - &bridge->crcsr_bus); + bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev, + VME_CRCSR_BUF_SIZE, + &bridge->crcsr_bus, GFP_KERNEL); if (!bridge->crcsr_kernel) { dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR " "image\n"); @@ -1589,8 +1590,8 @@ static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge, /* Free image */ iowrite32(0, bridge->base + VCSR_TO); - pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel, - bridge->crcsr_bus); + dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE, + bridge->crcsr_kernel, bridge->crcsr_bus); } static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c index 1227ea937059..be9051b02f24 100644 --- a/drivers/vme/bridges/vme_tsi148.c +++ b/drivers/vme/bridges/vme_tsi148.c @@ -2155,7 +2155,7 @@ static void *tsi148_alloc_consistent(struct device *parent, size_t size, /* Find pci_dev container of dev */ pdev = to_pci_dev(parent); - return pci_alloc_consistent(pdev, size, dma); + return dma_alloc_coherent(&pdev->dev, size, dma, GFP_KERNEL); } static void tsi148_free_consistent(struct device *parent, size_t size, @@ -2166,7 +2166,7 @@ static void tsi148_free_consistent(struct device *parent, size_t size, /* Find pci_dev container of dev */ pdev = to_pci_dev(parent); - pci_free_consistent(pdev, size, vaddr, dma); + dma_free_coherent(&pdev->dev, size, vaddr, dma); } /* @@ -2192,8 +2192,9 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge, bridge = tsi148_bridge->driver_priv; /* Allocate mem for CR/CSR image */ - bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE, - &bridge->crcsr_bus); + bridge->crcsr_kernel = dma_alloc_coherent(&pdev->dev, + VME_CRCSR_BUF_SIZE, + &bridge->crcsr_bus, GFP_KERNEL); if (!bridge->crcsr_kernel) { dev_err(tsi148_bridge->parent, "Failed to allocate memory for " "CR/CSR image\n"); @@ -2261,8 +2262,8 @@ static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge, iowrite32be(0, bridge->base + TSI148_LCSR_CROU); iowrite32be(0, bridge->base + TSI148_LCSR_CROL); - pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel, - bridge->crcsr_bus); + dma_free_coherent(&pdev->dev, VME_CRCSR_BUF_SIZE, + bridge->crcsr_kernel, bridge->crcsr_bus); } static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id) -- cgit v1.2.3 From 61243c03dde238170001093a29716c2369e8358f Mon Sep 17 00:00:00 2001 From: Laurentiu Tudor Date: Thu, 5 Nov 2020 17:30:49 +0200 Subject: bus: fsl-mc: add back accidentally dropped error check A previous patch accidentally dropped an error check, so add it back. Fixes: aef85b56c3c1 ("bus: fsl-mc: MC control registers are not always available") Signed-off-by: Laurentiu Tudor Link: https://lore.kernel.org/r/20201105153050.19662-1-laurentiu.tudor@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/bus/fsl-mc/fsl-mc-bus.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 76a6ee505d33..806766b1b45f 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -967,8 +967,11 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mc); plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (plat_res) + if (plat_res) { mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res); + if (IS_ERR(mc->fsl_mc_regs)) + return PTR_ERR(mc->fsl_mc_regs); + } if (mc->fsl_mc_regs && IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) { -- cgit v1.2.3 From 74abd1f2d49a2a9660eadd9486da333554c4a23b Mon Sep 17 00:00:00 2001 From: Laurentiu Tudor Date: Thu, 5 Nov 2020 17:30:50 +0200 Subject: bus: fsl-mc: make sure MC firmware is up and running Some bootloaders might pause the MC firmware before starting the kernel to ensure that MC will not cause faults as soon as SMMU probes due to no configuration being in place for the firmware. Make sure that MC is resumed at probe time as its SMMU setup should be done by now. Also included, a comment fix on how PL and BMT bits are packed in the StreamID. Signed-off-by: Laurentiu Tudor Link: https://lore.kernel.org/r/20201105153050.19662-2-laurentiu.tudor@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/bus/fsl-mc/fsl-mc-bus.c | 42 +++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 806766b1b45f..b8e6acdf932e 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -60,6 +60,9 @@ struct fsl_mc_addr_translation_range { phys_addr_t start_phys_addr; }; +#define FSL_MC_GCR1 0x0 +#define GCR1_P1_STOP BIT(31) + #define FSL_MC_FAPR 0x28 #define MC_FAPR_PL BIT(18) #define MC_FAPR_BMT BIT(17) @@ -973,21 +976,36 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) return PTR_ERR(mc->fsl_mc_regs); } - if (mc->fsl_mc_regs && IS_ENABLED(CONFIG_ACPI) && - !dev_of_node(&pdev->dev)) { - mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR); + if (mc->fsl_mc_regs) { /* - * HW ORs the PL and BMT bit, places the result in bit 15 of - * the StreamID and ORs in the ICID. Calculate it accordingly. + * Some bootloaders pause the MC firmware before booting the + * kernel so that MC will not cause faults as soon as the + * SMMU probes due to the fact that there's no configuration + * in place for MC. + * At this point MC should have all its SMMU setup done so make + * sure it is resumed. */ - mc_stream_id = (mc_stream_id & 0xffff) | + writel(readl(mc->fsl_mc_regs + FSL_MC_GCR1) & (~GCR1_P1_STOP), + mc->fsl_mc_regs + FSL_MC_GCR1); + + if (IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) { + mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR); + /* + * HW ORs the PL and BMT bit, places the result in bit + * 14 of the StreamID and ORs in the ICID. Calculate it + * accordingly. + */ + mc_stream_id = (mc_stream_id & 0xffff) | ((mc_stream_id & (MC_FAPR_PL | MC_FAPR_BMT)) ? - 0x4000 : 0); - error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT, - &mc_stream_id); - if (error) - dev_warn(&pdev->dev, "failed to configure dma: %d.\n", - error); + BIT(14) : 0); + error = acpi_dma_configure_id(&pdev->dev, + DEV_DMA_COHERENT, + &mc_stream_id); + if (error) + dev_warn(&pdev->dev, + "failed to configure dma: %d.\n", + error); + } } /* -- cgit v1.2.3 From ca43fec96b439b0e957121065404c0042a730501 Mon Sep 17 00:00:00 2001 From: Laurentiu Tudor Date: Tue, 24 Nov 2020 13:12:00 +0200 Subject: bus: fsl-mc: added missing fields to dprc_rsp_get_obj_region structure 'type' and 'flags' fields were missing from dprc_rsp_get_obj_region structure therefore the MC Bus driver was not receiving proper flags from MC like DPRC_REGION_CACHEABLE. Reviewed-by: Ioana Ciornei Signed-off-by: Cristian Sovaiala Signed-off-by: Laurentiu Tudor Link: https://lore.kernel.org/r/20201124111200.1391-1-laurentiu.tudor@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/bus/fsl-mc/dprc.c | 2 ++ drivers/bus/fsl-mc/fsl-mc-private.h | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c index 57b097caf255..27b0a01bad9b 100644 --- a/drivers/bus/fsl-mc/dprc.c +++ b/drivers/bus/fsl-mc/dprc.c @@ -576,6 +576,8 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io, rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params; region_desc->base_offset = le64_to_cpu(rsp_params->base_offset); region_desc->size = le32_to_cpu(rsp_params->size); + region_desc->type = rsp_params->type; + region_desc->flags = le32_to_cpu(rsp_params->flags); if (dprc_major_ver > 6 || (dprc_major_ver == 6 && dprc_minor_ver >= 3)) region_desc->base_address = le64_to_cpu(rsp_params->base_addr); else diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h index 85ca5fdee581..c932387641fa 100644 --- a/drivers/bus/fsl-mc/fsl-mc-private.h +++ b/drivers/bus/fsl-mc/fsl-mc-private.h @@ -211,12 +211,13 @@ struct dprc_cmd_get_obj_region { struct dprc_rsp_get_obj_region { /* response word 0 */ - __le64 pad; + __le64 pad0; /* response word 1 */ __le64 base_offset; /* response word 2 */ __le32 size; - __le32 pad2; + u8 type; + u8 pad2[3]; /* response word 3 */ __le32 flags; __le32 pad3; -- cgit v1.2.3 From 3d70fb03711c37bc64e8e9aea5830f498835f6bf Mon Sep 17 00:00:00 2001 From: Zhang Changzhong Date: Fri, 4 Dec 2020 16:02:47 +0800 Subject: bus: fsl-mc: fix error return code in fsl_mc_object_allocate() Fix to return a negative error code from the error handling case instead of 0, as done elsewhere in this function. Fixes: 197f4d6a4a00 ("staging: fsl-mc: fsl-mc object allocator driver") Reported-by: Hulk Robot Acked-by: Laurentiu Tudor Signed-off-by: Zhang Changzhong Link: https://lore.kernel.org/r/1607068967-31991-1-git-send-email-zhangchangzhong@huawei.com Signed-off-by: Greg Kroah-Hartman --- drivers/bus/fsl-mc/fsl-mc-allocator.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c index e71a6f52ea0c..2d7c764bb7dc 100644 --- a/drivers/bus/fsl-mc/fsl-mc-allocator.c +++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c @@ -292,8 +292,10 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, goto error; mc_adev = resource->data; - if (!mc_adev) + if (!mc_adev) { + error = -EINVAL; goto error; + } mc_adev->consumer_link = device_link_add(&mc_dev->dev, &mc_adev->dev, -- cgit v1.2.3 From fe34761d9f5febafd189de608eb9f50d78aac107 Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Mon, 23 Nov 2020 18:48:39 +0200 Subject: bus: fsl-mc: simplify DPRC version check Because the minimum supported DPRC version is 6.0, there is no need to check for incompatible 6.x versions lower to the minimum one. Just remove the second half of the check to simplify the logic. Signed-off-by: Ioana Ciornei Link: https://lore.kernel.org/r/20201123164839.1668409-1-ciorneiioana@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/bus/fsl-mc/dprc-driver.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c index 91dc015963a8..68488a7ad0d6 100644 --- a/drivers/bus/fsl-mc/dprc-driver.c +++ b/drivers/bus/fsl-mc/dprc-driver.c @@ -670,9 +670,7 @@ int dprc_setup(struct fsl_mc_device *mc_dev) goto error_cleanup_open; } - if (major_ver < DPRC_MIN_VER_MAJOR || - (major_ver == DPRC_MIN_VER_MAJOR && - minor_ver < DPRC_MIN_VER_MINOR)) { + if (major_ver < DPRC_MIN_VER_MAJOR) { dev_err(&mc_dev->dev, "ERROR: DPRC version %d.%d not supported\n", major_ver, minor_ver); -- cgit v1.2.3 From dfd7f2c1c532efaeff6084970bb60ec2f2e44191 Mon Sep 17 00:00:00 2001 From: Eddie James Date: Fri, 20 Nov 2020 11:19:29 +1030 Subject: fsi: Aspeed: Add mutex to protect HW access There is nothing to prevent multiple commands being executed simultaneously. Add a mutex to prevent this. Fixes: 606397d67f41 ("fsi: Add ast2600 master driver") Reviewed-by: Joel Stanley Reviewed-by: Milton Miller Signed-off-by: Eddie James Signed-off-by: Joel Stanley Link: https://lore.kernel.org/r/20201120004929.185239-1-joel@jms.id.au Signed-off-by: Greg Kroah-Hartman --- drivers/fsi/fsi-master-aspeed.c | 45 ++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c index c006ec008a1a..90dbe58ca1ed 100644 --- a/drivers/fsi/fsi-master-aspeed.c +++ b/drivers/fsi/fsi-master-aspeed.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -19,6 +20,7 @@ struct fsi_master_aspeed { struct fsi_master master; + struct mutex lock; /* protect HW access */ struct device *dev; void __iomem *base; struct clk *clk; @@ -254,6 +256,8 @@ static int aspeed_master_read(struct fsi_master *master, int link, addr |= id << 21; addr += link * FSI_HUB_LINK_SIZE; + mutex_lock(&aspeed->lock); + switch (size) { case 1: ret = opb_readb(aspeed, fsi_base + addr, val); @@ -265,14 +269,14 @@ static int aspeed_master_read(struct fsi_master *master, int link, ret = opb_readl(aspeed, fsi_base + addr, val); break; default: - return -EINVAL; + ret = -EINVAL; + goto done; } ret = check_errors(aspeed, ret); - if (ret) - return ret; - - return 0; +done: + mutex_unlock(&aspeed->lock); + return ret; } static int aspeed_master_write(struct fsi_master *master, int link, @@ -287,6 +291,8 @@ static int aspeed_master_write(struct fsi_master *master, int link, addr |= id << 21; addr += link * FSI_HUB_LINK_SIZE; + mutex_lock(&aspeed->lock); + switch (size) { case 1: ret = opb_writeb(aspeed, fsi_base + addr, *(u8 *)val); @@ -298,14 +304,14 @@ static int aspeed_master_write(struct fsi_master *master, int link, ret = opb_writel(aspeed, fsi_base + addr, *(__be32 *)val); break; default: - return -EINVAL; + ret = -EINVAL; + goto done; } ret = check_errors(aspeed, ret); - if (ret) - return ret; - - return 0; +done: + mutex_unlock(&aspeed->lock); + return ret; } static int aspeed_master_link_enable(struct fsi_master *master, int link, @@ -320,17 +326,21 @@ static int aspeed_master_link_enable(struct fsi_master *master, int link, reg = cpu_to_be32(0x80000000 >> bit); - if (!enable) - return opb_writel(aspeed, ctrl_base + FSI_MCENP0 + (4 * idx), - reg); + mutex_lock(&aspeed->lock); + + if (!enable) { + ret = opb_writel(aspeed, ctrl_base + FSI_MCENP0 + (4 * idx), reg); + goto done; + } ret = opb_writel(aspeed, ctrl_base + FSI_MSENP0 + (4 * idx), reg); if (ret) - return ret; + goto done; mdelay(FSI_LINK_ENABLE_SETUP_TIME); - - return 0; +done: + mutex_unlock(&aspeed->lock); + return ret; } static int aspeed_master_term(struct fsi_master *master, int link, uint8_t id) @@ -431,9 +441,11 @@ static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *att { struct fsi_master_aspeed *aspeed = dev_get_drvdata(dev); + mutex_lock(&aspeed->lock); gpiod_set_value(aspeed->cfam_reset_gpio, 1); usleep_range(900, 1000); gpiod_set_value(aspeed->cfam_reset_gpio, 0); + mutex_unlock(&aspeed->lock); return count; } @@ -597,6 +609,7 @@ static int fsi_master_aspeed_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, aspeed); + mutex_init(&aspeed->lock); aspeed_master_init(aspeed); rc = fsi_master_register(&aspeed->master); -- cgit v1.2.3 From b4f473cf4605e063f1250c6c7c37140cdd162353 Mon Sep 17 00:00:00 2001 From: Kaixu Xia Date: Sat, 14 Nov 2020 00:14:31 +0800 Subject: altera-stapl: remove the unreached switch case The value of the variable status must be one of the 0, -EIO and -EILSEQ, so the switch case -ENODATA is unreached. Remove it. Reported-by: Tosk Robot Signed-off-by: Kaixu Xia Link: https://lore.kernel.org/r/1605284071-6901-1-git-send-email-kaixuxia@tencent.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/altera-stapl/altera.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers') diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c index 5bdf57472314..92c0611034b0 100644 --- a/drivers/misc/altera-stapl/altera.c +++ b/drivers/misc/altera-stapl/altera.c @@ -2265,11 +2265,6 @@ static int altera_check_crc(u8 *p, s32 program_size) "actual %04x\n", __func__, local_expected, local_actual); break; - case -ENODATA: - printk(KERN_ERR "%s: expected CRC not found, " - "actual CRC = %04x\n", __func__, - local_actual); - break; case -EIO: printk(KERN_ERR "%s: error: format isn't " "recognized.\n", __func__); -- cgit v1.2.3 From 81113b0421a5199a1f425beaef7dad0d7c16a891 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Tue, 1 Dec 2020 09:38:43 +0000 Subject: slimbus: qcom-ngd-ctrl: fix SSR dependencies NGD should depend on QCOM_RPROC_COMMON instead of selecting it, as this will be selected by respective remoteproc driver. Reported-by: kernel test robot Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20201201093843.20126-1-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/slimbus/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig index 7d59956b5dfb..1235b7dc8496 100644 --- a/drivers/slimbus/Kconfig +++ b/drivers/slimbus/Kconfig @@ -22,10 +22,9 @@ config SLIM_QCOM_CTRL config SLIM_QCOM_NGD_CTRL tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component" - depends on HAS_IOMEM && DMA_ENGINE && NET && REMOTEPROC + depends on HAS_IOMEM && DMA_ENGINE && NET && QCOM_RPROC_COMMON depends on ARCH_QCOM || COMPILE_TEST select QCOM_QMI_HELPERS - select QCOM_RPROC_COMMON select QCOM_PDR_HELPERS help Select driver if Qualcomm's SLIMbus Satellite Non-Generic Device -- cgit v1.2.3 From 68d621197162eb503d32676452e7281e3fb6c8cc Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Thu, 19 Nov 2020 16:50:59 +0200 Subject: uio: pruss: use devm_clk_get() for clk init This change uses devm_clk_get() to obtain a reference to the clock. It has the benefit that clk_put() is no longer required, and cleans up the exit & error path. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201119145059.48326-1-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_pruss.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c index 41470c4dba02..e9096f53b4cc 100644 --- a/drivers/uio/uio_pruss.c +++ b/drivers/uio/uio_pruss.c @@ -110,7 +110,6 @@ static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev) gdev->sram_vaddr, sram_pool_sz); clk_disable(gdev->pruss_clk); - clk_put(gdev->pruss_clk); } static int pruss_probe(struct platform_device *pdev) @@ -131,7 +130,7 @@ static int pruss_probe(struct platform_device *pdev) return -ENOMEM; /* Power on PRU in case its not done as part of boot-loader */ - gdev->pruss_clk = clk_get(dev, "pruss"); + gdev->pruss_clk = devm_clk_get(dev, "pruss"); if (IS_ERR(gdev->pruss_clk)) { dev_err(dev, "Failed to get clock\n"); return PTR_ERR(gdev->pruss_clk); @@ -140,7 +139,7 @@ static int pruss_probe(struct platform_device *pdev) ret = clk_enable(gdev->pruss_clk); if (ret) { dev_err(dev, "Failed to enable clock\n"); - goto err_clk_put; + return ret; } regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -229,8 +228,6 @@ err_free_sram: gen_pool_free(gdev->sram_pool, gdev->sram_vaddr, sram_pool_sz); err_clk_disable: clk_disable(gdev->pruss_clk); -err_clk_put: - clk_put(gdev->pruss_clk); return ret; } -- cgit v1.2.3 From 4849e0eda387931fe251fe956cbfddeee852dacf Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Mon, 23 Nov 2020 16:34:47 +0200 Subject: uio/uio_pci_generic: remove unneeded pci_set_drvdata() The pci_get_drvdata() was moved during commit ef84928cff58 ("uio/uio_pci_generic: use device-managed function equivalents"). Storing a private object with pci_set_drvdata() doesn't make sense since that change, since there is no more pci_get_drvdata() call in the driver to retrieve the information. This change removes it. Fixes: ef84928cff58 ("uio/uio_pci_generic: use device-managed function equivalents") Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201123143447.16829-1-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_pci_generic.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c index 1c6c09e1280d..b8e44d16279f 100644 --- a/drivers/uio/uio_pci_generic.c +++ b/drivers/uio/uio_pci_generic.c @@ -101,13 +101,7 @@ static int probe(struct pci_dev *pdev, "no support for interrupts?\n"); } - err = devm_uio_register_device(&pdev->dev, &gdev->info); - if (err) - return err; - - pci_set_drvdata(pdev, gdev); - - return 0; + return devm_uio_register_device(&pdev->dev, &gdev->info); } static struct pci_driver uio_pci_driver = { -- cgit v1.2.3 From 44dccc4a2bd10abfacd25b7d125fb5c3abbb263d Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Fri, 20 Nov 2020 09:56:24 +0200 Subject: uio: uio_dmem_genirq: convert simple allocations to device-managed This change converts the simple allocations in the driver to used device-managed allocation functions. This removes the error path entirely in the probe function, and reduces some code in the remove function. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201120075625.12272-1-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_dmem_genirq.c | 34 ++++++++-------------------------- 1 file changed, 8 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c index ec7f66f4555a..72aa372d6ba6 100644 --- a/drivers/uio/uio_dmem_genirq.c +++ b/drivers/uio/uio_dmem_genirq.c @@ -154,11 +154,10 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) if (pdev->dev.of_node) { /* alloc uioinfo for one device */ - uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL); + uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo), GFP_KERNEL); if (!uioinfo) { - ret = -ENOMEM; dev_err(&pdev->dev, "unable to kmalloc\n"); - goto bad2; + return -ENOMEM; } uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn", pdev->dev.of_node); @@ -167,20 +166,19 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) if (!uioinfo || !uioinfo->name || !uioinfo->version) { dev_err(&pdev->dev, "missing platform_data\n"); - goto bad0; + return -EINVAL; } if (uioinfo->handler || uioinfo->irqcontrol || uioinfo->irq_flags & IRQF_SHARED) { dev_err(&pdev->dev, "interrupt configuration error\n"); - goto bad0; + return -EINVAL; } - priv = kzalloc(sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) { - ret = -ENOMEM; dev_err(&pdev->dev, "unable to kmalloc\n"); - goto bad0; + return -ENOMEM; } dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); @@ -197,7 +195,7 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) if (ret == -ENXIO && pdev->dev.of_node) ret = UIO_IRQ_NONE; else if (ret < 0) - goto bad1; + return ret; uioinfo->irq = ret; } @@ -286,19 +284,11 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "unable to register uio device\n"); pm_runtime_disable(&pdev->dev); - goto bad1; + return ret; } platform_set_drvdata(pdev, priv); return 0; - bad1: - kfree(priv); - bad0: - /* kfree uioinfo for OF */ - if (pdev->dev.of_node) - kfree(uioinfo); - bad2: - return ret; } static int uio_dmem_genirq_remove(struct platform_device *pdev) @@ -308,14 +298,6 @@ static int uio_dmem_genirq_remove(struct platform_device *pdev) uio_unregister_device(priv->uioinfo); pm_runtime_disable(&pdev->dev); - priv->uioinfo->handler = NULL; - priv->uioinfo->irqcontrol = NULL; - - /* kfree uioinfo for OF */ - if (pdev->dev.of_node) - kfree(priv->uioinfo); - - kfree(priv); return 0; } -- cgit v1.2.3 From ba022851f3b161040e79cc61c2bd760917490016 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Fri, 20 Nov 2020 09:56:25 +0200 Subject: uio: uio_dmem_genirq: finalize conversion of probe to devm_ handlers This moves move pm_runtime_disable on a devm_add_action_or_reset() handler. And with the use of the devm_uio_register_device() function, the remove hook is no longer required. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201120075625.12272-2-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_dmem_genirq.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c index 72aa372d6ba6..6b5cfa5b0673 100644 --- a/drivers/uio/uio_dmem_genirq.c +++ b/drivers/uio/uio_dmem_genirq.c @@ -143,6 +143,13 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) return 0; } +static void uio_dmem_genirq_pm_disable(void *data) +{ + struct device *dev = data; + + pm_runtime_disable(dev); +} + static int uio_dmem_genirq_probe(struct platform_device *pdev) { struct uio_dmem_genirq_pdata *pdata = dev_get_platdata(&pdev->dev); @@ -280,25 +287,11 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev) */ pm_runtime_enable(&pdev->dev); - ret = uio_register_device(&pdev->dev, priv->uioinfo); - if (ret) { - dev_err(&pdev->dev, "unable to register uio device\n"); - pm_runtime_disable(&pdev->dev); + ret = devm_add_action_or_reset(&pdev->dev, uio_dmem_genirq_pm_disable, &pdev->dev); + if (ret) return ret; - } - - platform_set_drvdata(pdev, priv); - return 0; -} - -static int uio_dmem_genirq_remove(struct platform_device *pdev) -{ - struct uio_dmem_genirq_platdata *priv = platform_get_drvdata(pdev); - - uio_unregister_device(priv->uioinfo); - pm_runtime_disable(&pdev->dev); - return 0; + return devm_uio_register_device(&pdev->dev, priv->uioinfo); } static int uio_dmem_genirq_runtime_nop(struct device *dev) @@ -332,7 +325,6 @@ MODULE_DEVICE_TABLE(of, uio_of_genirq_match); static struct platform_driver uio_dmem_genirq = { .probe = uio_dmem_genirq_probe, - .remove = uio_dmem_genirq_remove, .driver = { .name = DRIVER_NAME, .pm = &uio_dmem_genirq_dev_pm_ops, -- cgit v1.2.3 From 023c9c6dc2c414f1bef2e9ee649fb2e459f52326 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Fri, 20 Nov 2020 10:42:05 +0200 Subject: uio: uio_sercos3: use device-managed functions for simple allocs This change converts the simple allocations [kzalloc()] to devm_kzalloc() tying the life-time of these objects to the PCI device object. It cleans up the error and exit path and bit, and does a minor correction that -ENOMEM is returned (vs -ENODEV) in case the 'priv' object cannot be allocated. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201120084207.50736-1-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_sercos3.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_sercos3.c b/drivers/uio/uio_sercos3.c index 9658a0887fee..b93a5f8f4cba 100644 --- a/drivers/uio/uio_sercos3.c +++ b/drivers/uio/uio_sercos3.c @@ -124,16 +124,16 @@ static int sercos3_pci_probe(struct pci_dev *dev, struct sercos3_priv *priv; int i; - info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); + info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL); if (!info) return -ENOMEM; - priv = kzalloc(sizeof(struct sercos3_priv), GFP_KERNEL); + priv = devm_kzalloc(&dev->dev, sizeof(struct sercos3_priv), GFP_KERNEL); if (!priv) - goto out_free; + return -ENOMEM; if (pci_enable_device(dev)) - goto out_free_priv; + return -ENODEV; if (pci_request_regions(dev, "sercos3")) goto out_disable; @@ -174,10 +174,6 @@ out_unmap: pci_release_regions(dev); out_disable: pci_disable_device(dev); -out_free_priv: - kfree(priv); -out_free: - kfree(info); return -ENODEV; } @@ -193,8 +189,6 @@ static void sercos3_pci_remove(struct pci_dev *dev) if (info->mem[i].internal_addr) iounmap(info->mem[i].internal_addr); } - kfree(info->priv); - kfree(info); } static struct pci_device_id sercos3_pci_ids[] = { -- cgit v1.2.3 From 6b76c98b96bd619092a89da1c1e25e5cb0ccc46a Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Fri, 20 Nov 2020 10:42:06 +0200 Subject: uio: uio_mf624: use devm_kzalloc() for uio_info object This change uses the devm_kzalloc() function to tie the life-time of the uio_info object to PCI device. This cleans up the exit & error path a bit. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201120084207.50736-2-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_mf624.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_mf624.c b/drivers/uio/uio_mf624.c index b6a406986667..5065c6a073a8 100644 --- a/drivers/uio/uio_mf624.c +++ b/drivers/uio/uio_mf624.c @@ -136,12 +136,12 @@ static int mf624_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct uio_info *info; - info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); + info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL); if (!info) return -ENOMEM; if (pci_enable_device(dev)) - goto out_free; + return -ENODEV; if (pci_request_regions(dev, "mf624")) goto out_disable; @@ -189,8 +189,6 @@ out_release: out_disable: pci_disable_device(dev); -out_free: - kfree(info); return -ENODEV; } @@ -207,8 +205,6 @@ static void mf624_pci_remove(struct pci_dev *dev) iounmap(info->mem[0].internal_addr); iounmap(info->mem[1].internal_addr); iounmap(info->mem[2].internal_addr); - - kfree(info); } static const struct pci_device_id mf624_pci_id[] = { -- cgit v1.2.3 From c3a747791138062b81b7ba24547c2b58485d3718 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Fri, 20 Nov 2020 10:42:07 +0200 Subject: uio: uio_netx: use devm_kzalloc() for or uio_info object This change uses the devm_kzalloc() function to tie the life-time of the uio_info object to PCI device. This cleans up the exit & error path a bit. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201120084207.50736-3-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_netx.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_netx.c b/drivers/uio/uio_netx.c index 9ae29ffde410..2319d6de8d04 100644 --- a/drivers/uio/uio_netx.c +++ b/drivers/uio/uio_netx.c @@ -53,12 +53,12 @@ static int netx_pci_probe(struct pci_dev *dev, struct uio_info *info; int bar; - info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); + info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL); if (!info) return -ENOMEM; if (pci_enable_device(dev)) - goto out_free; + return -ENODEV; if (pci_request_regions(dev, "netx")) goto out_disable; @@ -112,8 +112,6 @@ out_release: pci_release_regions(dev); out_disable: pci_disable_device(dev); -out_free: - kfree(info); return -ENODEV; } @@ -127,8 +125,6 @@ static void netx_pci_remove(struct pci_dev *dev) pci_release_regions(dev); pci_disable_device(dev); iounmap(info->mem[0].internal_addr); - - kfree(info); } static struct pci_device_id netx_pci_ids[] = { -- cgit v1.2.3 From 0a4ade5397918286d97dc575a4dfb00b6bba9b36 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Thu, 19 Nov 2020 17:49:00 +0200 Subject: uio: uio_cif: use devm_kzalloc() for uio_info object The uio_info object is free'd last, so it's life-time is tied PCI device object. Using devm_kzalloc() cleans up the error path a bit and the exit path. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201119154903.82099-1-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_cif.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c index ab60186f9759..653f842a1491 100644 --- a/drivers/uio/uio_cif.c +++ b/drivers/uio/uio_cif.c @@ -43,12 +43,12 @@ static int hilscher_pci_probe(struct pci_dev *dev, { struct uio_info *info; - info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); + info = devm_kzalloc(&dev->dev, sizeof(struct uio_info), GFP_KERNEL); if (!info) return -ENOMEM; if (pci_enable_device(dev)) - goto out_free; + return -ENODEV; if (pci_request_regions(dev, "hilscher")) goto out_disable; @@ -92,8 +92,6 @@ out_release: pci_release_regions(dev); out_disable: pci_disable_device(dev); -out_free: - kfree (info); return -ENODEV; } @@ -105,8 +103,6 @@ static void hilscher_pci_remove(struct pci_dev *dev) pci_release_regions(dev); pci_disable_device(dev); iounmap(info->mem[0].internal_addr); - - kfree (info); } static struct pci_device_id hilscher_pci_ids[] = { -- cgit v1.2.3 From 16d546c42db5d9ea91e4d9d3bde5e80eb0cf6bf8 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Thu, 19 Nov 2020 17:49:01 +0200 Subject: uio: uio_aec: use devm_kzalloc() for uio_info object The uio_info object is free'd last, so it's life-time is tied PCI device object. Using devm_kzalloc() cleans up the error path a bit and the exit path. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201119154903.82099-2-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_aec.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_aec.c b/drivers/uio/uio_aec.c index 381a26dfac46..32357f8a92b5 100644 --- a/drivers/uio/uio_aec.c +++ b/drivers/uio/uio_aec.c @@ -71,12 +71,12 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *id) struct uio_info *info; int ret; - info = kzalloc(sizeof(struct uio_info), GFP_KERNEL); + info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info), GFP_KERNEL); if (!info) return -ENOMEM; if (pci_enable_device(pdev)) - goto out_free; + return -ENODEV; if (pci_request_regions(pdev, "aectc")) goto out_disable; @@ -117,8 +117,6 @@ out_release: pci_release_regions(pdev); out_disable: pci_disable_device(pdev); -out_free: - kfree(info); return -ENODEV; } @@ -136,8 +134,6 @@ static void remove(struct pci_dev *pdev) pci_release_regions(pdev); pci_disable_device(pdev); iounmap(info->priv); - - kfree(info); } static struct pci_driver pci_driver = { -- cgit v1.2.3 From d57801c45f53e3da999e2a0beb932717fe335c41 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Thu, 19 Nov 2020 17:49:02 +0200 Subject: uio: uio_fsl_elbc_gpcm: use device-managed allocators This change moves all the simple allocations to use device-managed allocator functions. This way their life-time is tied to the platform_device object, so when this gets free'd these allocations also get cleaned up. The final effect is that error & exit paths get cleaned up a bit. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201119154903.82099-3-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_fsl_elbc_gpcm.c | 32 +++++++++----------------------- 1 file changed, 9 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c index be8a6905f507..7d8eb9dc2068 100644 --- a/drivers/uio/uio_fsl_elbc_gpcm.c +++ b/drivers/uio/uio_fsl_elbc_gpcm.c @@ -299,7 +299,7 @@ static int get_of_data(struct fsl_elbc_gpcm *priv, struct device_node *node, /* get optional uio name */ if (of_property_read_string(node, "uio_name", &dt_name) != 0) dt_name = "eLBC_GPCM"; - *name = kstrdup(dt_name, GFP_KERNEL); + *name = devm_kstrdup(priv->dev, dt_name, GFP_KERNEL); if (!*name) return -ENOMEM; @@ -324,7 +324,7 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev) return -ENODEV; /* allocate private data */ - priv = kzalloc(sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = &pdev->dev; @@ -334,14 +334,12 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev) ret = get_of_data(priv, node, &res, ®_br_new, ®_or_new, &irq, &uio_name); if (ret) - goto out_err0; + return ret; /* allocate UIO structure */ - info = kzalloc(sizeof(*info), GFP_KERNEL); - if (!info) { - ret = -ENOMEM; - goto out_err0; - } + info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; /* get current BR/OR values */ reg_br_cur = in_be32(&priv->lbc->bank[priv->bank].br); @@ -354,8 +352,7 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev) != fsl_lbc_addr(res.start)) { dev_err(priv->dev, "bank in use by another peripheral\n"); - ret = -ENODEV; - goto out_err1; + return -ENODEV; } /* warn if behavior settings changing */ @@ -382,12 +379,11 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev) info->mem[0].internal_addr = ioremap(res.start, resource_size(&res)); if (!info->mem[0].internal_addr) { dev_err(priv->dev, "failed to map chip region\n"); - ret = -ENODEV; - goto out_err1; + return -ENODEV; } /* set all UIO data */ - info->mem[0].name = kasprintf(GFP_KERNEL, "%pOFn", node); + info->mem[0].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn", node); info->mem[0].addr = res.start; info->mem[0].size = resource_size(&res); info->mem[0].memtype = UIO_MEM_PHYS; @@ -428,12 +424,6 @@ out_err2: if (priv->shutdown) priv->shutdown(info, true); iounmap(info->mem[0].internal_addr); -out_err1: - kfree(info->mem[0].name); - kfree(info); -out_err0: - kfree(uio_name); - kfree(priv); return ret; } @@ -447,10 +437,6 @@ static int uio_fsl_elbc_gpcm_remove(struct platform_device *pdev) if (priv->shutdown) priv->shutdown(info, false); iounmap(info->mem[0].internal_addr); - kfree(info->mem[0].name); - kfree(info->name); - kfree(info); - kfree(priv); return 0; -- cgit v1.2.3 From 74e71964b1a9ffd34fa4b6ec8f2fa13e7cf0ac7a Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Thu, 19 Nov 2020 17:49:03 +0200 Subject: uio: uio_hv_generic: use devm_kzalloc() for private data alloc This is a minor cleanup for the management of the private object of this driver. The allocation can be tied to the life-time of the hv_device object. This cleans up a bit the exit & error paths, since the object doesn't need to be explicitly free'd anymore. Signed-off-by: Alexandru Ardelean Link: https://lore.kernel.org/r/20201119154903.82099-4-alexandru.ardelean@analog.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/uio_hv_generic.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index 4dae2320b103..0330ba99730e 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -247,14 +247,14 @@ hv_uio_probe(struct hv_device *dev, return -ENOTSUPP; } - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); + pdata = devm_kzalloc(&dev->device, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; ret = vmbus_alloc_ring(channel, HV_RING_SIZE * PAGE_SIZE, HV_RING_SIZE * PAGE_SIZE); if (ret) - goto fail; + return ret; set_channel_read_mode(channel, HV_CALL_ISR); @@ -347,8 +347,6 @@ hv_uio_probe(struct hv_device *dev, fail_close: hv_uio_cleanup(dev, pdata); -fail: - kfree(pdata); return ret; } @@ -364,10 +362,8 @@ hv_uio_remove(struct hv_device *dev) sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr); uio_unregister_device(&pdata->info); hv_uio_cleanup(dev, pdata); - hv_set_drvdata(dev, NULL); vmbus_free_ring(dev->channel); - kfree(pdata); return 0; } -- cgit v1.2.3 From b1f0aeecd25ac11053997484510a43346297a42d Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Wed, 9 Dec 2020 18:33:41 -0800 Subject: spmi: get rid of a warning when built with W=1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The SPMI core complaing with this warning when built with W=1: drivers/spmi/spmi.c: In function ‘spmi_controller_remove’: drivers/spmi/spmi.c:548:6: warning: variable ‘dummy’ set but not used [-Wunused-but-set-variable] 548 | int dummy; | ^~~~~ As the dummy var isn't needed, remove it. Link: https://lore.kernel.org/r/aacfd03835b7d1b3b6c21665b44000fe7242e535.1601360391.git.mchehab+huawei@kernel.org Reviewed-by: Stephen Boyd Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Stephen Boyd Link: https://lore.kernel.org/r/20201210023344.2838141-2-sboyd@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/spmi/spmi.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c index c16b60f645a4..fd3ff6079b15 100644 --- a/drivers/spmi/spmi.c +++ b/drivers/spmi/spmi.c @@ -545,13 +545,10 @@ static int spmi_ctrl_remove_device(struct device *dev, void *data) */ void spmi_controller_remove(struct spmi_controller *ctrl) { - int dummy; - if (!ctrl) return; - dummy = device_for_each_child(&ctrl->dev, NULL, - spmi_ctrl_remove_device); + device_for_each_child(&ctrl->dev, NULL, spmi_ctrl_remove_device); device_del(&ctrl->dev); } EXPORT_SYMBOL_GPL(spmi_controller_remove); -- cgit v1.2.3 From 0be0a733c9cd396c3900ac10873419d8b21bfd39 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Wed, 9 Dec 2020 18:33:42 -0800 Subject: spmi: fix some coding style issues at the spmi core While preparing to port the HiSilicon 6421v600 SPMI driver, I noticed some coding style issues at the SPMI core. Address them. Link: https://lore.kernel.org/r/fec878502147336cbf2cf86e476e9dd797cd7e6f.1601360391.git.mchehab+huawei@kernel.org Reviewed-by: Stephen Boyd Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Stephen Boyd Link: https://lore.kernel.org/r/20201210023344.2838141-3-sboyd@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/spmi/spmi.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c index fd3ff6079b15..253340e10dab 100644 --- a/drivers/spmi/spmi.c +++ b/drivers/spmi/spmi.c @@ -23,6 +23,7 @@ static DEFINE_IDA(ctrl_ida); static void spmi_dev_release(struct device *dev) { struct spmi_device *sdev = to_spmi_device(dev); + kfree(sdev); } @@ -33,6 +34,7 @@ static const struct device_type spmi_dev_type = { static void spmi_ctrl_release(struct device *dev) { struct spmi_controller *ctrl = to_spmi_controller(dev); + ida_simple_remove(&ctrl_ida, ctrl->nr); kfree(ctrl); } @@ -487,7 +489,7 @@ static void of_spmi_register_devices(struct spmi_controller *ctrl) continue; sdev->dev.of_node = node; - sdev->usid = (u8) reg[0]; + sdev->usid = (u8)reg[0]; err = spmi_device_add(sdev); if (err) { @@ -531,6 +533,7 @@ EXPORT_SYMBOL_GPL(spmi_controller_add); static int spmi_ctrl_remove_device(struct device *dev, void *data) { struct spmi_device *spmidev = to_spmi_device(dev); + if (dev->type == &spmi_dev_type) spmi_device_remove(spmidev); return 0; -- cgit v1.2.3 From d40c2d4ed62df64ce603c208bceff25245380157 Mon Sep 17 00:00:00 2001 From: Hsin-Hsiung Wang Date: Wed, 9 Dec 2020 18:33:43 -0800 Subject: spmi: Add driver shutdown support Add new shutdown() method. Use it in the standard driver model style. Link: https://lore.kernel.org/r/1603187810-30481-2-git-send-email-hsin-hsiung.wang@mediatek.com Signed-off-by: Hsin-Hsiung Wang Signed-off-by: Stephen Boyd Link: https://lore.kernel.org/r/20201210023344.2838141-4-sboyd@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/spmi/spmi.c | 9 +++++++++ include/linux/spmi.h | 1 + 2 files changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c index 253340e10dab..51f5aeb65b3b 100644 --- a/drivers/spmi/spmi.c +++ b/drivers/spmi/spmi.c @@ -359,6 +359,14 @@ static int spmi_drv_remove(struct device *dev) return 0; } +static void spmi_drv_shutdown(struct device *dev) +{ + const struct spmi_driver *sdrv = to_spmi_driver(dev->driver); + + if (sdrv && sdrv->shutdown) + sdrv->shutdown(to_spmi_device(dev)); +} + static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env) { int ret; @@ -375,6 +383,7 @@ static struct bus_type spmi_bus_type = { .match = spmi_device_match, .probe = spmi_drv_probe, .remove = spmi_drv_remove, + .shutdown = spmi_drv_shutdown, .uevent = spmi_drv_uevent, }; diff --git a/include/linux/spmi.h b/include/linux/spmi.h index 394a3f68bad5..729bcbf9f5ad 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h @@ -138,6 +138,7 @@ struct spmi_driver { struct device_driver driver; int (*probe)(struct spmi_device *sdev); void (*remove)(struct spmi_device *sdev); + void (*shutdown)(struct spmi_device *sdev); }; static inline struct spmi_driver *to_spmi_driver(struct device_driver *d) -- cgit v1.2.3 From 07d9a767ae5c3a9b0cf1adccbf157a6fae780de4 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Wed, 25 Nov 2020 10:31:05 +0100 Subject: siox: Use bus_type functions for probe, remove and shutdown MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The eventual goal is to get rid of the callbacks in struct device_driver. Tested-by: Thorsten Scherer Acked-by: Thorsten Scherer Signed-off-by: Uwe Kleine-König Link: https://lore.kernel.org/r/20201125093106.240643-2-u.kleine-koenig@pengutronix.de Signed-off-by: Greg Kroah-Hartman --- drivers/siox/siox-core.c | 49 ++++++++++++++++++++++++------------------------ 1 file changed, 25 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c index f8c08fb9891d..b56cdcb52967 100644 --- a/drivers/siox/siox-core.c +++ b/drivers/siox/siox-core.c @@ -512,41 +512,48 @@ static int siox_match(struct device *dev, struct device_driver *drv) return 1; } -static struct bus_type siox_bus_type = { - .name = "siox", - .match = siox_match, -}; - -static int siox_driver_probe(struct device *dev) +static int siox_probe(struct device *dev) { struct siox_driver *sdriver = to_siox_driver(dev->driver); struct siox_device *sdevice = to_siox_device(dev); - int ret; - ret = sdriver->probe(sdevice); - return ret; + return sdriver->probe(sdevice); } -static int siox_driver_remove(struct device *dev) +static int siox_remove(struct device *dev) { struct siox_driver *sdriver = container_of(dev->driver, struct siox_driver, driver); struct siox_device *sdevice = to_siox_device(dev); - int ret; + int ret = 0; + + if (sdriver->remove) + ret = sdriver->remove(sdevice); - ret = sdriver->remove(sdevice); return ret; } -static void siox_driver_shutdown(struct device *dev) +static void siox_shutdown(struct device *dev) { - struct siox_driver *sdriver = - container_of(dev->driver, struct siox_driver, driver); struct siox_device *sdevice = to_siox_device(dev); + struct siox_driver *sdriver; - sdriver->shutdown(sdevice); + if (!dev->driver) + return; + + sdriver = container_of(dev->driver, struct siox_driver, driver); + if (sdriver->shutdown) + sdriver->shutdown(sdevice); } +static struct bus_type siox_bus_type = { + .name = "siox", + .match = siox_match, + .probe = siox_probe, + .remove = siox_remove, + .shutdown = siox_shutdown, +}; + static ssize_t active_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -882,7 +889,8 @@ int __siox_driver_register(struct siox_driver *sdriver, struct module *owner) if (unlikely(!siox_is_registered)) return -EPROBE_DEFER; - if (!sdriver->set_data && !sdriver->get_data) { + if (!sdriver->probe || + (!sdriver->set_data && !sdriver->get_data)) { pr_err("Driver %s doesn't provide needed callbacks\n", sdriver->driver.name); return -EINVAL; @@ -891,13 +899,6 @@ int __siox_driver_register(struct siox_driver *sdriver, struct module *owner) sdriver->driver.owner = owner; sdriver->driver.bus = &siox_bus_type; - if (sdriver->probe) - sdriver->driver.probe = siox_driver_probe; - if (sdriver->remove) - sdriver->driver.remove = siox_driver_remove; - if (sdriver->shutdown) - sdriver->driver.shutdown = siox_driver_shutdown; - ret = driver_register(&sdriver->driver); if (ret) pr_err("Failed to register siox driver %s (%d)\n", -- cgit v1.2.3 From 1c12c27086dcef853832a7cbebcb48bdac8104b6 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Wed, 25 Nov 2020 10:31:06 +0100 Subject: siox: Make remove callback return void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The driver core ignores the return value of the remove callback, so don't give siox drivers the chance to provide a value. All siox drivers only allocate devm-managed resources in .probe, so there is no .remove callback to fix. Tested-by: Thorsten Scherer Acked-by: Thorsten Scherer Signed-off-by: Uwe Kleine-König Link: https://lore.kernel.org/r/20201125093106.240643-3-u.kleine-koenig@pengutronix.de Signed-off-by: Greg Kroah-Hartman --- drivers/siox/siox-core.c | 5 ++--- include/linux/siox.h | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c index b56cdcb52967..1794ff0106bc 100644 --- a/drivers/siox/siox-core.c +++ b/drivers/siox/siox-core.c @@ -525,12 +525,11 @@ static int siox_remove(struct device *dev) struct siox_driver *sdriver = container_of(dev->driver, struct siox_driver, driver); struct siox_device *sdevice = to_siox_device(dev); - int ret = 0; if (sdriver->remove) - ret = sdriver->remove(sdevice); + sdriver->remove(sdevice); - return ret; + return 0; } static void siox_shutdown(struct device *dev) diff --git a/include/linux/siox.h b/include/linux/siox.h index da7225bf1877..6bfbda3f634c 100644 --- a/include/linux/siox.h +++ b/include/linux/siox.h @@ -36,7 +36,7 @@ bool siox_device_connected(struct siox_device *sdevice); struct siox_driver { int (*probe)(struct siox_device *sdevice); - int (*remove)(struct siox_device *sdevice); + void (*remove)(struct siox_device *sdevice); void (*shutdown)(struct siox_device *sdevice); /* -- cgit v1.2.3 From 428bb001143cf5bfb65aa4ae90d4ebc95f82d007 Mon Sep 17 00:00:00 2001 From: Zhang Changzhong Date: Tue, 8 Dec 2020 09:54:32 +0800 Subject: slimbus: qcom: fix potential NULL dereference in qcom_slim_prg_slew() platform_get_resource_byname() may fail and in this case a NULL dereference will occur. Fix it to use devm_platform_ioremap_resource_byname() instead of calling platform_get_resource_byname() and devm_ioremap(). This is detected by Coccinelle semantic patch. @@ expression pdev, res, n, t, e, e1, e2; @@ res = \(platform_get_resource\|platform_get_resource_byname\)(pdev, t, n); + if (!res) + return -EINVAL; ... when != res == NULL e = devm_ioremap(e1, res->start, e2); Fixes: ad7fcbc308b0 ("slimbus: qcom: Add Qualcomm Slimbus controller driver") Signed-off-by: Zhang Changzhong Link: https://lore.kernel.org/r/1607392473-20610-1-git-send-email-zhangchangzhong@huawei.com Signed-off-by: Greg Kroah-Hartman --- drivers/slimbus/qcom-ctrl.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c index 4aad2566f52d..f04b961b96cd 100644 --- a/drivers/slimbus/qcom-ctrl.c +++ b/drivers/slimbus/qcom-ctrl.c @@ -472,15 +472,10 @@ static void qcom_slim_rxwq(struct work_struct *work) static void qcom_slim_prg_slew(struct platform_device *pdev, struct qcom_slim_ctrl *ctrl) { - struct resource *slew_mem; - if (!ctrl->slew_reg) { /* SLEW RATE register for this SLIMbus */ - slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "slew"); - ctrl->slew_reg = devm_ioremap(&pdev->dev, slew_mem->start, - resource_size(slew_mem)); - if (!ctrl->slew_reg) + ctrl->slew_reg = devm_platform_ioremap_resource_byname(pdev, "slew"); + if (IS_ERR(ctrl->slew_reg)) return; } -- cgit v1.2.3 From 06bc4ca115cddabba0faa801488bd946a48c0bf7 Mon Sep 17 00:00:00 2001 From: Michael Auchter Date: Thu, 15 Oct 2020 09:07:34 -0500 Subject: extcon: Add driver for TI TUSB320 This patch adds an extcon driver for the TI TUSB320 USB Type-C device. This can be used to detect whether the port is configured as a downstream or upstream facing port. Signed-off-by: Michael Auchter Signed-off-by: Chanwoo Choi --- drivers/extcon/Kconfig | 8 ++ drivers/extcon/Makefile | 1 + drivers/extcon/extcon-usbc-tusb320.c | 184 +++++++++++++++++++++++++++++++++++ 3 files changed, 193 insertions(+) create mode 100644 drivers/extcon/extcon-usbc-tusb320.c (limited to 'drivers') diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index aac507bff135..af58ebca2bf6 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -186,4 +186,12 @@ config EXTCON_USBC_CROS_EC Say Y here to enable USB Type C cable detection extcon support when using Chrome OS EC based USB Type-C ports. +config EXTCON_USBC_TUSB320 + tristate "TI TUSB320 USB-C extcon support" + depends on I2C + select REGMAP_I2C + help + Say Y here to enable support for USB Type C cable detection extcon + support using a TUSB320. + endif diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile index 52096fd8a216..fe10a1b7d18b 100644 --- a/drivers/extcon/Makefile +++ b/drivers/extcon/Makefile @@ -25,3 +25,4 @@ obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o obj-$(CONFIG_EXTCON_USB_GPIO) += extcon-usb-gpio.o obj-$(CONFIG_EXTCON_USBC_CROS_EC) += extcon-usbc-cros-ec.o +obj-$(CONFIG_EXTCON_USBC_TUSB320) += extcon-usbc-tusb320.o diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c new file mode 100644 index 000000000000..805af73b4152 --- /dev/null +++ b/drivers/extcon/extcon-usbc-tusb320.c @@ -0,0 +1,184 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * drivers/extcon/extcon-tusb320.c - TUSB320 extcon driver + * + * Copyright (C) 2020 National Instruments Corporation + * Author: Michael Auchter + */ + +#include +#include +#include +#include +#include +#include +#include + +#define TUSB320_REG9 0x9 +#define TUSB320_REG9_ATTACHED_STATE_SHIFT 6 +#define TUSB320_REG9_ATTACHED_STATE_MASK 0x3 +#define TUSB320_REG9_CABLE_DIRECTION BIT(5) +#define TUSB320_REG9_INTERRUPT_STATUS BIT(4) +#define TUSB320_ATTACHED_STATE_NONE 0x0 +#define TUSB320_ATTACHED_STATE_DFP 0x1 +#define TUSB320_ATTACHED_STATE_UFP 0x2 +#define TUSB320_ATTACHED_STATE_ACC 0x3 + +struct tusb320_priv { + struct device *dev; + struct regmap *regmap; + struct extcon_dev *edev; +}; + +static const char * const tusb_attached_states[] = { + [TUSB320_ATTACHED_STATE_NONE] = "not attached", + [TUSB320_ATTACHED_STATE_DFP] = "downstream facing port", + [TUSB320_ATTACHED_STATE_UFP] = "upstream facing port", + [TUSB320_ATTACHED_STATE_ACC] = "accessory", +}; + +static const unsigned int tusb320_extcon_cable[] = { + EXTCON_USB, + EXTCON_USB_HOST, + EXTCON_NONE, +}; + +static int tusb320_check_signature(struct tusb320_priv *priv) +{ + static const char sig[] = { '\0', 'T', 'U', 'S', 'B', '3', '2', '0' }; + unsigned val; + int i, ret; + + for (i = 0; i < sizeof(sig); i++) { + ret = regmap_read(priv->regmap, sizeof(sig) - 1 - i, &val); + if (ret < 0) + return ret; + if (val != sig[i]) { + dev_err(priv->dev, "signature mismatch!\n"); + return -ENODEV; + } + } + + return 0; +} + +static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) +{ + struct tusb320_priv *priv = dev_id; + int state, polarity; + unsigned reg; + + if (regmap_read(priv->regmap, TUSB320_REG9, ®)) { + dev_err(priv->dev, "error during i2c read!\n"); + return IRQ_NONE; + } + + if (!(reg & TUSB320_REG9_INTERRUPT_STATUS)) + return IRQ_NONE; + + state = (reg >> TUSB320_REG9_ATTACHED_STATE_SHIFT) & + TUSB320_REG9_ATTACHED_STATE_MASK; + polarity = !!(reg & TUSB320_REG9_CABLE_DIRECTION); + + dev_dbg(priv->dev, "attached state: %s, polarity: %d\n", + tusb_attached_states[state], polarity); + + extcon_set_state(priv->edev, EXTCON_USB, + state == TUSB320_ATTACHED_STATE_UFP); + extcon_set_state(priv->edev, EXTCON_USB_HOST, + state == TUSB320_ATTACHED_STATE_DFP); + extcon_set_property(priv->edev, EXTCON_USB, + EXTCON_PROP_USB_TYPEC_POLARITY, + (union extcon_property_value)polarity); + extcon_set_property(priv->edev, EXTCON_USB_HOST, + EXTCON_PROP_USB_TYPEC_POLARITY, + (union extcon_property_value)polarity); + extcon_sync(priv->edev, EXTCON_USB); + extcon_sync(priv->edev, EXTCON_USB_HOST); + + regmap_write(priv->regmap, TUSB320_REG9, reg); + + return IRQ_HANDLED; +} + +static const struct regmap_config tusb320_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +static int tusb320_extcon_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct tusb320_priv *priv; + int ret; + + priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + priv->dev = &client->dev; + + priv->regmap = devm_regmap_init_i2c(client, &tusb320_regmap_config); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + + ret = tusb320_check_signature(priv); + if (ret) + return ret; + + priv->edev = devm_extcon_dev_allocate(priv->dev, tusb320_extcon_cable); + if (IS_ERR(priv->edev)) { + dev_err(priv->dev, "failed to allocate extcon device\n"); + return PTR_ERR(priv->edev); + } + + ret = devm_extcon_dev_register(priv->dev, priv->edev); + if (ret < 0) { + dev_err(priv->dev, "failed to register extcon device\n"); + return ret; + } + + extcon_set_property_capability(priv->edev, EXTCON_USB, + EXTCON_PROP_USB_TYPEC_POLARITY); + extcon_set_property_capability(priv->edev, EXTCON_USB_HOST, + EXTCON_PROP_USB_TYPEC_POLARITY); + + /* update initial state */ + tusb320_irq_handler(client->irq, priv); + + ret = devm_request_threaded_irq(priv->dev, client->irq, NULL, + tusb320_irq_handler, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + client->name, priv); + + return ret; +} + +static const struct of_device_id tusb320_extcon_dt_match[] = { + { .compatible = "ti,tusb320", }, + { } +}; +MODULE_DEVICE_TABLE(of, tusb320_extcon_dt_match); + +static struct i2c_driver tusb320_extcon_driver = { + .probe = tusb320_extcon_probe, + .driver = { + .name = "extcon-tusb320", + .of_match_table = tusb320_extcon_dt_match, + }, +}; + +static int __init tusb320_init(void) +{ + return i2c_add_driver(&tusb320_extcon_driver); +} +subsys_initcall(tusb320_init); + +static void __exit tusb320_exit(void) +{ + i2c_del_driver(&tusb320_extcon_driver); +} +module_exit(tusb320_exit); + +MODULE_AUTHOR("Michael Auchter "); +MODULE_DESCRIPTION("TI TUSB320 extcon driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From f58f26ab22f78736a402940a7baf5599a111c72c Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 1 Nov 2020 01:43:57 +0100 Subject: extcon: fsa9480: Support TI TSU6111 variant The Texas Instruments TSU6111 is compatible to the FSA880/FSA9480. Signed-off-by: Linus Walleij Signed-off-by: Chanwoo Choi --- drivers/extcon/extcon-fsa9480.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c index 8405512f5199..08bdedbcdb0d 100644 --- a/drivers/extcon/extcon-fsa9480.c +++ b/drivers/extcon/extcon-fsa9480.c @@ -364,6 +364,7 @@ MODULE_DEVICE_TABLE(i2c, fsa9480_id); static const struct of_device_id fsa9480_of_match[] = { { .compatible = "fcs,fsa9480", }, { .compatible = "fcs,fsa880", }, + { .compatible = "ti,tsu6111", }, { }, }; MODULE_DEVICE_TABLE(of, fsa9480_of_match); -- cgit v1.2.3 From e1efdb604f5c9903a5d92ef42244009d3c04880f Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Tue, 8 Dec 2020 14:36:27 +0100 Subject: extcon: max77693: Fix modalias string The platform device driver name is "max77693-muic", so advertise it properly in the modalias string. This fixes automated module loading when this driver is compiled as a module. Fixes: db1b9037424b ("extcon: MAX77693: Add extcon-max77693 driver to support Maxim MAX77693 MUIC device") Signed-off-by: Marek Szyprowski Signed-off-by: Chanwoo Choi --- drivers/extcon/extcon-max77693.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c index 4a410fd2ea9a..92af97e00828 100644 --- a/drivers/extcon/extcon-max77693.c +++ b/drivers/extcon/extcon-max77693.c @@ -1277,4 +1277,4 @@ module_platform_driver(max77693_muic_driver); MODULE_DESCRIPTION("Maxim MAX77693 Extcon driver"); MODULE_AUTHOR("Chanwoo Choi "); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:extcon-max77693"); +MODULE_ALIAS("platform:max77693-muic"); -- cgit v1.2.3