From 0cf35347830ce981428833789ef77fb0b2347ae0 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Fri, 26 Nov 2021 19:15:24 +0530 Subject: bus: mhi: ep: Add support for registering MHI endpoint controllers This commit adds support for registering MHI endpoint controller drivers with the MHI endpoint stack. MHI endpoint controller drivers manage the interaction with the host machines (such as x86). They are also the MHI endpoint bus master in charge of managing the physical link between the host and endpoint device. Eventhough the MHI spec is bus agnostic, the current implementation is entirely based on PCIe bus. The endpoint controller driver encloses all information about the underlying physical bus like PCIe. The registration process involves parsing the channel configuration and allocating an MHI EP device. Channels used in the endpoint stack follows the perspective of the MHI host stack. i.e., UL - From host to endpoint DL - From endpoint to host Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/Kconfig | 1 + drivers/bus/mhi/Makefile | 3 + drivers/bus/mhi/ep/Kconfig | 10 ++ drivers/bus/mhi/ep/Makefile | 2 + drivers/bus/mhi/ep/internal.h | 156 ++++++++++++++++++++++++++++ drivers/bus/mhi/ep/main.c | 236 ++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 408 insertions(+) create mode 100644 drivers/bus/mhi/ep/Kconfig create mode 100644 drivers/bus/mhi/ep/Makefile create mode 100644 drivers/bus/mhi/ep/internal.h create mode 100644 drivers/bus/mhi/ep/main.c (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig index 4748df7f9cd5..b39a11e6c624 100644 --- a/drivers/bus/mhi/Kconfig +++ b/drivers/bus/mhi/Kconfig @@ -6,3 +6,4 @@ # source "drivers/bus/mhi/host/Kconfig" +source "drivers/bus/mhi/ep/Kconfig" diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile index 5f5708a249f5..46981331b38f 100644 --- a/drivers/bus/mhi/Makefile +++ b/drivers/bus/mhi/Makefile @@ -1,2 +1,5 @@ # Host MHI stack obj-y += host/ + +# Endpoint MHI stack +obj-y += ep/ diff --git a/drivers/bus/mhi/ep/Kconfig b/drivers/bus/mhi/ep/Kconfig new file mode 100644 index 000000000000..90ab3b040672 --- /dev/null +++ b/drivers/bus/mhi/ep/Kconfig @@ -0,0 +1,10 @@ +config MHI_BUS_EP + tristate "Modem Host Interface (MHI) bus Endpoint implementation" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by a host processor to control + and communicate a modem device over a high speed peripheral + bus or shared memory. + + MHI_BUS_EP implements the MHI protocol for the endpoint devices, + such as SDX55 modem connected to the host machine over PCIe. diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile new file mode 100644 index 000000000000..64e29252b608 --- /dev/null +++ b/drivers/bus/mhi/ep/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o +mhi_ep-y := main.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h new file mode 100644 index 000000000000..0d3923186a5e --- /dev/null +++ b/drivers/bus/mhi/ep/internal.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Linaro Ltd. + * + */ + +#ifndef _MHI_EP_INTERNAL_ +#define _MHI_EP_INTERNAL_ + +#include + +#include "../common.h" + +extern struct bus_type mhi_ep_bus_type; + +#define MHI_REG_OFFSET 0x100 +#define BHI_REG_OFFSET 0x200 + +/* MHI registers */ +#define EP_MHIREGLEN (MHI_REG_OFFSET + MHIREGLEN) +#define EP_MHIVER (MHI_REG_OFFSET + MHIVER) +#define EP_MHICFG (MHI_REG_OFFSET + MHICFG) +#define EP_CHDBOFF (MHI_REG_OFFSET + CHDBOFF) +#define EP_ERDBOFF (MHI_REG_OFFSET + ERDBOFF) +#define EP_BHIOFF (MHI_REG_OFFSET + BHIOFF) +#define EP_BHIEOFF (MHI_REG_OFFSET + BHIEOFF) +#define EP_DEBUGOFF (MHI_REG_OFFSET + DEBUGOFF) +#define EP_MHICTRL (MHI_REG_OFFSET + MHICTRL) +#define EP_MHISTATUS (MHI_REG_OFFSET + MHISTATUS) +#define EP_CCABAP_LOWER (MHI_REG_OFFSET + CCABAP_LOWER) +#define EP_CCABAP_HIGHER (MHI_REG_OFFSET + CCABAP_HIGHER) +#define EP_ECABAP_LOWER (MHI_REG_OFFSET + ECABAP_LOWER) +#define EP_ECABAP_HIGHER (MHI_REG_OFFSET + ECABAP_HIGHER) +#define EP_CRCBAP_LOWER (MHI_REG_OFFSET + CRCBAP_LOWER) +#define EP_CRCBAP_HIGHER (MHI_REG_OFFSET + CRCBAP_HIGHER) +#define EP_CRDB_LOWER (MHI_REG_OFFSET + CRDB_LOWER) +#define EP_CRDB_HIGHER (MHI_REG_OFFSET + CRDB_HIGHER) +#define EP_MHICTRLBASE_LOWER (MHI_REG_OFFSET + MHICTRLBASE_LOWER) +#define EP_MHICTRLBASE_HIGHER (MHI_REG_OFFSET + MHICTRLBASE_HIGHER) +#define EP_MHICTRLLIMIT_LOWER (MHI_REG_OFFSET + MHICTRLLIMIT_LOWER) +#define EP_MHICTRLLIMIT_HIGHER (MHI_REG_OFFSET + MHICTRLLIMIT_HIGHER) +#define EP_MHIDATABASE_LOWER (MHI_REG_OFFSET + MHIDATABASE_LOWER) +#define EP_MHIDATABASE_HIGHER (MHI_REG_OFFSET + MHIDATABASE_HIGHER) +#define EP_MHIDATALIMIT_LOWER (MHI_REG_OFFSET + MHIDATALIMIT_LOWER) +#define EP_MHIDATALIMIT_HIGHER (MHI_REG_OFFSET + MHIDATALIMIT_HIGHER) + +/* MHI BHI registers */ +#define EP_BHI_INTVEC (BHI_REG_OFFSET + BHI_INTVEC) +#define EP_BHI_EXECENV (BHI_REG_OFFSET + BHI_EXECENV) + +/* MHI Doorbell registers */ +#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n)) +#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n)) +#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n)) +#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n)) + +#define MHI_CTRL_INT_STATUS 0x4 +#define MHI_CTRL_INT_STATUS_MSK BIT(0) +#define MHI_CTRL_INT_STATUS_CRDB_MSK BIT(1) +#define MHI_CHDB_INT_STATUS_n(n) (0x28 + 0x4 * (n)) +#define MHI_ERDB_INT_STATUS_n(n) (0x38 + 0x4 * (n)) + +#define MHI_CTRL_INT_CLEAR 0x4c +#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2) +#define MHI_CTRL_INT_CRDB_CLEAR BIT(1) +#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0) + +#define MHI_CHDB_INT_CLEAR_n(n) (0x70 + 0x4 * (n)) +#define MHI_CHDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_CLEAR_n(n) (0x80 + 0x4 * (n)) +#define MHI_ERDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0) + +/* + * Unlike the usual "masking" convention, writing "1" to a bit in this register + * enables the interrupt and writing "0" will disable it.. + */ +#define MHI_CTRL_INT_MASK 0x94 +#define MHI_CTRL_INT_MASK_MASK GENMASK(1, 0) +#define MHI_CTRL_MHICTRL_MASK BIT(0) +#define MHI_CTRL_CRDB_MASK BIT(1) + +#define MHI_CHDB_INT_MASK_n(n) (0xb8 + 0x4 * (n)) +#define MHI_CHDB_INT_MASK_n_EN_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_MASK_n(n) (0xc8 + 0x4 * (n)) +#define MHI_ERDB_INT_MASK_n_EN_ALL GENMASK(31, 0) + +#define NR_OF_CMD_RINGS 1 +#define MHI_MASK_ROWS_CH_DB 4 +#define MHI_MASK_ROWS_EV_DB 4 +#define MHI_MASK_CH_LEN 32 +#define MHI_MASK_EV_LEN 32 + +/* Generic context */ +struct mhi_generic_ctx { + __le32 reserved0; + __le32 reserved1; + __le32 reserved2; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +enum mhi_ep_ring_type { + RING_TYPE_CMD, + RING_TYPE_ER, + RING_TYPE_CH, +}; + +/* Ring element */ +union mhi_ep_ring_ctx { + struct mhi_cmd_ctxt cmd; + struct mhi_event_ctxt ev; + struct mhi_chan_ctxt ch; + struct mhi_generic_ctx generic; +}; + +struct mhi_ep_ring { + struct mhi_ep_cntrl *mhi_cntrl; + union mhi_ep_ring_ctx *ring_ctx; + struct mhi_ring_element *ring_cache; + enum mhi_ep_ring_type type; + u64 rbase; + size_t rd_offset; + size_t wr_offset; + size_t ring_size; + u32 db_offset_h; + u32 db_offset_l; + u32 ch_id; +}; + +struct mhi_ep_cmd { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_event { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_chan { + char *name; + struct mhi_ep_device *mhi_dev; + struct mhi_ep_ring ring; + struct mutex lock; + void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result); + enum mhi_ch_state state; + enum dma_data_direction dir; + u64 tre_loc; + u32 tre_size; + u32 tre_bytes_left; + u32 chan; + bool skip_td; +}; + +#endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c new file mode 100644 index 000000000000..d932ad01761b --- /dev/null +++ b/drivers/bus/mhi/ep/main.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MHI Endpoint bus stack + * + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +static DEFINE_IDA(mhi_ep_cntrl_ida); + +static void mhi_ep_release_device(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + mhi_dev->mhi_cntrl->mhi_dev = NULL; + + /* + * We need to set the mhi_chan->mhi_dev to NULL here since the MHI + * devices for the channels will only get created in mhi_ep_create_device() + * if the mhi_dev associated with it is NULL. + */ + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_device_type dev_type) +{ + struct mhi_ep_device *mhi_dev; + struct device *dev; + + mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + if (!mhi_dev) + return ERR_PTR(-ENOMEM); + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_ep_bus_type; + dev->release = mhi_ep_release_device; + + /* Controller device is always allocated first */ + if (dev_type == MHI_DEVICE_CONTROLLER) + /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ + dev->parent = mhi_cntrl->cntrl_dev; + else + /* for MHI client devices, parent is the MHI controller device */ + dev->parent = &mhi_cntrl->mhi_dev->dev; + + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->dev_type = dev_type; + + return mhi_dev; +} + +static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + const struct mhi_ep_channel_config *ch_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + u32 chan, i; + int ret = -EINVAL; + + mhi_cntrl->max_chan = config->max_channels; + + /* + * Allocate max_channels supported by the MHI endpoint and populate + * only the defined channels + */ + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), + GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + for (i = 0; i < config->num_channels; i++) { + struct mhi_ep_chan *mhi_chan; + + ch_cfg = &config->ch_cfg[i]; + + chan = ch_cfg->num; + if (chan >= mhi_cntrl->max_chan) { + dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n", + chan, mhi_cntrl->max_chan); + goto error_chan_cfg; + } + + /* Bi-directional and direction less channels are not supported */ + if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { + dev_err(dev, "Invalid direction (%u) for channel (%u)\n", + ch_cfg->dir, chan); + goto error_chan_cfg; + } + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + mhi_chan->name = ch_cfg->name; + mhi_chan->chan = chan; + mhi_chan->dir = ch_cfg->dir; + mutex_init(&mhi_chan->lock); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} + +/* + * Allocate channel and command rings here. Event rings will be allocated + * in mhi_ep_power_up() as the config comes from the host. + */ +int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + struct mhi_ep_device *mhi_dev; + int ret; + + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev) + return -EINVAL; + + ret = mhi_ep_chan_init(mhi_cntrl, config); + if (ret) + return ret; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto err_free_ch; + } + + /* Set controller index */ + ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); + if (ret < 0) + goto err_free_cmd; + + mhi_cntrl->index = ret; + + /* Allocate the controller device */ + mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); + if (IS_ERR(mhi_dev)) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); + ret = PTR_ERR(mhi_dev); + goto err_ida_free; + } + + dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); + mhi_dev->name = dev_name(&mhi_dev->dev); + mhi_cntrl->mhi_dev = mhi_dev; + + ret = device_add(&mhi_dev->dev); + if (ret) + goto err_put_dev; + + dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); + + return 0; + +err_put_dev: + put_device(&mhi_dev->dev); +err_ida_free: + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +err_free_cmd: + kfree(mhi_cntrl->mhi_cmd); +err_free_ch: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_register_controller); + +void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; + + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_chan); + + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +} +EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); + +static int mhi_ep_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + /* + * If the device is a controller type then there is no client driver + * associated with it + */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + return 0; +}; + +struct bus_type mhi_ep_bus_type = { + .name = "mhi_ep", + .dev_name = "mhi_ep", + .match = mhi_ep_match, +}; + +static int __init mhi_ep_init(void) +{ + return bus_register(&mhi_ep_bus_type); +} + +static void __exit mhi_ep_exit(void) +{ + bus_unregister(&mhi_ep_bus_type); +} + +postcore_initcall(mhi_ep_init); +module_exit(mhi_ep_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI Bus Endpoint stack"); +MODULE_AUTHOR("Manivannan Sadhasivam "); -- cgit v1.2.3 From 36b7cae6a0328cd8560fe6b4ccdda921c600d154 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Fri, 26 Nov 2021 19:52:20 +0530 Subject: bus: mhi: ep: Add support for registering MHI endpoint client drivers This commit adds support for registering MHI endpoint client drivers with the MHI endpoint stack. MHI endpoint client drivers bind to one or more MHI endpoint devices inorder to send and receive the upper-layer protocol packets like IP packets, modem control messages, and diagnostics messages over MHI bus. Reviewed-by: Hemant Kumar Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/main.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 57 +++++++++++++++++++++++++++++-- 2 files changed, 140 insertions(+), 2 deletions(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index d932ad01761b..f7d5f75fc083 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -198,9 +198,88 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) } EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); +static int mhi_ep_driver_probe(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + + return mhi_drv->probe(mhi_dev, mhi_dev->id); +} + +static int mhi_ep_driver_remove(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int dir; + + /* Skip if it is a controller device */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + /* Disconnect the channels associated with the driver */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to the client driver */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + mhi_chan->state = MHI_CH_STATE_DISABLED; + mhi_chan->xfer_cb = NULL; + mutex_unlock(&mhi_chan->lock); + } + + /* Remove the client driver now */ + mhi_drv->remove(mhi_dev); + + return 0; +} + +int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + /* Client drivers should have callbacks defined for both channels */ + if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) + return -EINVAL; + + driver->bus = &mhi_ep_bus_type; + driver->owner = owner; + driver->probe = mhi_ep_driver_probe; + driver->remove = mhi_ep_driver_remove; + + return driver_register(driver); +} +EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); + +void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); + static int mhi_ep_match(struct device *dev, struct device_driver *drv) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); + const struct mhi_device_id *id; /* * If the device is a controller type then there is no client driver @@ -209,6 +288,12 @@ static int mhi_ep_match(struct device *dev, struct device_driver *drv) if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) return 0; + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->name, id->chan)) { + mhi_dev->id = id; + return 1; + } + return 0; }; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 891b199a9703..e2b94f9eb846 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -101,8 +101,8 @@ struct mhi_ep_cntrl { * @mhi_cntrl: Controller the device belongs to * @id: Pointer to MHI Endpoint device ID struct * @name: Name of the associated MHI Endpoint device - * @ul_chan: UL channel for the device - * @dl_chan: DL channel for the device + * @ul_chan: UL (from host to endpoint) channel for the device + * @dl_chan: DL (from endpoint to host) channel for the device * @dev_type: MHI device type */ struct mhi_ep_device { @@ -115,7 +115,60 @@ struct mhi_ep_device { enum mhi_device_type dev_type; }; +/** + * struct mhi_ep_driver - Structure representing a MHI Endpoint client driver + * @id_table: Pointer to MHI Endpoint device ID table + * @driver: Device driver model driver + * @probe: CB function for client driver probe function + * @remove: CB function for client driver remove function + * @ul_xfer_cb: CB function for UL (from host to endpoint) data transfer + * @dl_xfer_cb: CB function for DL (from endpoint to host) data transfer + */ +struct mhi_ep_driver { + const struct mhi_device_id *id_table; + struct device_driver driver; + int (*probe)(struct mhi_ep_device *mhi_ep, + const struct mhi_device_id *id); + void (*remove)(struct mhi_ep_device *mhi_ep); + void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev, + struct mhi_result *result); + void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev, + struct mhi_result *result); +}; + #define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev) +#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver) + +/* + * module_mhi_ep_driver() - Helper macro for drivers that don't do + * anything special other than using default mhi_ep_driver_register() and + * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate. + * Each module may only use this macro once. + */ +#define module_mhi_ep_driver(mhi_drv) \ + module_driver(mhi_drv, mhi_ep_driver_register, \ + mhi_ep_driver_unregister) + +/* + * Macro to avoid include chaining to get THIS_MODULE + */ +#define mhi_ep_driver_register(mhi_drv) \ + __mhi_ep_driver_register(mhi_drv, THIS_MODULE) + +/** + * __mhi_ep_driver_register - Register a driver with MHI Endpoint bus + * @mhi_drv: Driver to be associated with the device + * @owner: The module owner + * + * Return: 0 if driver registrations succeeds, a negative error code otherwise. + */ +int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner); + +/** + * mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus + * @mhi_drv: Driver associated with the device + */ +void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv); /** * mhi_ep_register_controller - Register MHI Endpoint controller -- cgit v1.2.3 From 03f44035a5863bd30cf6f24ea2e2a4f7da40a81d Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Fri, 26 Nov 2021 19:58:00 +0530 Subject: bus: mhi: ep: Add support for creating and destroying MHI EP devices This commit adds support for creating and destroying MHI endpoint devices. The MHI endpoint devices binds to the MHI endpoint channels and are used to transfer data between MHI host and endpoint device. There is a single MHI EP device for each channel pair. The devices will be created when the corresponding channels has been started by the host and will be destroyed during MHI EP power down and reset. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/main.c | 83 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index f7d5f75fc083..6c64745e8a06 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -68,6 +68,89 @@ static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, return mhi_dev; } +/* + * MHI channels are always defined in pairs with UL as the even numbered + * channel and DL as odd numbered one. This function gets UL channel (primary) + * as the ch_id and always looks after the next entry in channel list for + * the corresponding DL channel (secondary). + */ +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + struct device *dev = mhi_cntrl->cntrl_dev; + struct mhi_ep_device *mhi_dev; + int ret; + + /* Check if the channel name is same for both UL and DL */ + if (strcmp(mhi_chan->name, mhi_chan[1].name)) { + dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n", + mhi_chan->name, mhi_chan[1].name); + return -EINVAL; + } + + mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER); + if (IS_ERR(mhi_dev)) + return PTR_ERR(mhi_dev); + + /* Configure primary channel */ + mhi_dev->ul_chan = mhi_chan; + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Configure secondary channel as well */ + mhi_chan++; + mhi_dev->dl_chan = mhi_chan; + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Channel name is same for both UL and DL */ + mhi_dev->name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%s_%s", + dev_name(&mhi_cntrl->mhi_dev->dev), + mhi_dev->name); + + ret = device_add(&mhi_dev->dev); + if (ret) + put_device(&mhi_dev->dev); + + return ret; +} + +static int mhi_ep_destroy_device(struct device *dev, void *data) +{ + struct mhi_ep_device *mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl; + struct mhi_ep_chan *ul_chan, *dl_chan; + + if (dev->bus != &mhi_ep_bus_type) + return 0; + + mhi_dev = to_mhi_ep_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* Only destroy devices created for channels */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + ul_chan = mhi_dev->ul_chan; + dl_chan = mhi_dev->dl_chan; + + if (ul_chan) + put_device(&ul_chan->mhi_dev->dev); + + if (dl_chan) + put_device(&dl_chan->mhi_dev->dev); + + dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", + mhi_dev->name); + + /* Notify the client and remove the device from MHI bus */ + device_del(dev); + put_device(dev); + + return 0; +} + static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, const struct mhi_ep_cntrl_config *config) { -- cgit v1.2.3 From 37867094faf16d636f47a06b7acc035009dc2cd3 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 29 Nov 2021 12:36:28 +0530 Subject: bus: mhi: ep: Add support for managing MMIO registers Add support for managing the Memory Mapped Input Output (MMIO) registers of the MHI bus. All MHI operations are carried out using the MMIO registers by both host and the endpoint device. The MMIO registers reside inside the endpoint device memory (fixed location based on the platform) and the address is passed by the MHI EP controller driver during its registration. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/Makefile | 2 +- drivers/bus/mhi/ep/internal.h | 26 ++++ drivers/bus/mhi/ep/main.c | 6 +- drivers/bus/mhi/ep/mmio.c | 273 ++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 18 +++ 5 files changed, 323 insertions(+), 2 deletions(-) create mode 100644 drivers/bus/mhi/ep/mmio.c (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile index 64e29252b608..a1555ae287ad 100644 --- a/drivers/bus/mhi/ep/Makefile +++ b/drivers/bus/mhi/ep/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o -mhi_ep-y := main.o +mhi_ep-y := main.o mmio.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index 0d3923186a5e..475425a30d85 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -153,4 +153,30 @@ struct mhi_ep_chan { bool skip_td; }; +/* MMIO related functions */ +u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset); +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val); +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val); +u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask); +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl); +u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring); +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value); +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset); +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); + #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 6c64745e8a06..7dcc784f10d1 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -214,7 +214,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_device *mhi_dev; int ret; - if (!mhi_cntrl || !mhi_cntrl->cntrl_dev) + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio) return -EINVAL; ret = mhi_ep_chan_init(mhi_cntrl, config); @@ -227,6 +227,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + /* Set MHI version and AMSS EE before enumeration */ + mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + /* Set controller index */ ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); if (ret < 0) diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c new file mode 100644 index 000000000000..b5bfd22f2c8e --- /dev/null +++ b/drivers/bus/mhi/ep/mmio.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +#include +#include +#include + +#include "internal.h" + +u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset) +{ + return readl(mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val) +{ + writel(val, mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, offset); + regval &= ~mask; + regval |= (val << __ffs(mask)) & mask; + mhi_ep_mmio_write(mhi_cntrl, offset, regval); +} + +u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask) +{ + u32 regval; + + regval = mhi_ep_mmio_read(dev, offset); + regval &= mask; + regval >>= __ffs(mask); + + return regval; +} + +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICTRL); + *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval); + *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval); +} + +static void mhi_ep_mmio_set_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id, bool enable) +{ + u32 chid_mask, chid_shift, chdb_idx, val; + + chid_shift = ch_id % 32; + chid_mask = BIT(chid_shift); + chdb_idx = ch_id / 32; + + val = enable ? 1 : 0; + + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(chdb_idx), chid_mask, val); + + /* Update the local copy of the channel mask */ + mhi_cntrl->chdb[chdb_idx].mask &= ~chid_mask; + mhi_cntrl->chdb[chdb_idx].mask |= val << chid_shift; +} + +void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, true); +} + +void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, false); +} + +static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val, i; + + val = enable ? MHI_CHDB_INT_MASK_n_EN_ALL : 0; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(i), val); + mhi_cntrl->chdb[i].mask = val; + } +} + +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true); +} + +static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false); +} + +bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + bool chdb = false; + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + mhi_cntrl->chdb[i].status = mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_n(i)); + if (mhi_cntrl->chdb[i].status) + chdb = true; + } + + /* Return whether a channel doorbell interrupt occurred or not */ + return chdb; +} + +static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val, i; + + val = enable ? MHI_ERDB_INT_MASK_n_EN_ALL : 0; + + for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_n(i), val); +} + +static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false); +} + +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_MHICTRL_MASK, 1); +} + +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_MHICTRL_MASK, 0); +} + +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_CRDB_MASK, 1); +} + +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_CRDB_MASK, 0); +} + +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl); + mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl); + mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl); +} + +static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), + MHI_CHDB_INT_CLEAR_n_CLEAR_ALL); + + for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_n(i), + MHI_ERDB_INT_CLEAR_n_CLEAR_ALL); + + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, + MHI_CTRL_INT_MMIO_WR_CLEAR | + MHI_CTRL_INT_CRDB_CLEAR | + MHI_CTRL_INT_CRDB_MHICTRL_CLEAR); +} + +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_HIGHER); + mhi_cntrl->ch_ctx_host_pa = regval; + mhi_cntrl->ch_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_LOWER); + mhi_cntrl->ch_ctx_host_pa |= regval; +} + +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_HIGHER); + mhi_cntrl->ev_ctx_host_pa = regval; + mhi_cntrl->ev_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_LOWER); + mhi_cntrl->ev_ctx_host_pa |= regval; +} + +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_HIGHER); + mhi_cntrl->cmd_ctx_host_pa = regval; + mhi_cntrl->cmd_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_LOWER); + mhi_cntrl->cmd_ctx_host_pa |= regval; +} + +u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + u64 db_offset; + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h); + db_offset = regval; + db_offset <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l); + db_offset |= regval; + + return db_offset; +} + +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value) +{ + mhi_ep_mmio_write(mhi_cntrl, EP_BHI_EXECENV, value); +} + +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHICTRL, MHICTRL_RESET_MASK, 0); +} + +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_write(mhi_cntrl, EP_MHICTRL, 0); + mhi_ep_mmio_write(mhi_cntrl, EP_MHISTATUS, 0); + mhi_ep_mmio_clear_interrupts(mhi_cntrl); +} + +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + mhi_cntrl->chdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_CHDBOFF); + mhi_cntrl->erdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_ERDBOFF); + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval); + + mhi_ep_mmio_reset(mhi_cntrl); +} + +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval); +} diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index e2b94f9eb846..5db048e258e4 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -59,6 +59,10 @@ struct mhi_ep_db_info { * @mhi_event: Points to the event ring configurations table * @mhi_cmd: Points to the command ring configurations table * @sm: MHI Endpoint state machine + * @ch_ctx_host_pa: Physical address of host channel context data structure + * @ev_ctx_host_pa: Physical address of host event context data structure + * @cmd_ctx_host_pa: Physical address of host command context data structure + * @chdb: Array of channel doorbell interrupt info * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -67,6 +71,10 @@ struct mhi_ep_db_info { * @mhi_state: MHI Endpoint state * @max_chan: Maximum channels supported by the endpoint controller * @mru: MRU (Maximum Receive Unit) value of the endpoint controller + * @event_rings: Number of event rings supported by the endpoint controller + * @hw_event_rings: Number of hardware event rings supported by the endpoint controller + * @chdb_offset: Channel doorbell offset set by the host + * @erdb_offset: Event ring doorbell offset set by the host * @index: MHI Endpoint controller index */ struct mhi_ep_cntrl { @@ -79,6 +87,12 @@ struct mhi_ep_cntrl { struct mhi_ep_cmd *mhi_cmd; struct mhi_ep_sm *sm; + u64 ch_ctx_host_pa; + u64 ev_ctx_host_pa; + u64 cmd_ctx_host_pa; + + struct mhi_ep_db_info chdb[4]; + void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, void __iomem **virt, size_t size); @@ -91,6 +105,10 @@ struct mhi_ep_cntrl { u32 max_chan; u32 mru; + u32 event_rings; + u32 hw_event_rings; + u32 chdb_offset; + u32 erdb_offset; u32 index; }; -- cgit v1.2.3 From edf549e7b2396172ff374e044f3bfe843de3cf4c Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 29 Nov 2021 12:39:35 +0530 Subject: bus: mhi: ep: Add support for ring management Add support for managing the MHI ring. The MHI ring is a circular queue of data structures used to pass the information between host and the endpoint. MHI support 3 types of rings: 1. Transfer ring 2. Event ring 3. Command ring All rings reside inside the host memory and the MHI EP device maps it to the device memory using blocks like PCIe iATU. The mapping is handled in the MHI EP controller driver itself. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/Makefile | 2 +- drivers/bus/mhi/ep/internal.h | 18 ++++ drivers/bus/mhi/ep/ring.c | 207 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 226 insertions(+), 1 deletion(-) create mode 100644 drivers/bus/mhi/ep/ring.c (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile index a1555ae287ad..7ba0e04801eb 100644 --- a/drivers/bus/mhi/ep/Makefile +++ b/drivers/bus/mhi/ep/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o -mhi_ep-y := main.o mmio.o +mhi_ep-y := main.o mmio.o ring.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index 475425a30d85..d16b87061ac6 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -116,6 +116,11 @@ union mhi_ep_ring_ctx { struct mhi_generic_ctx generic; }; +struct mhi_ep_ring_item { + struct list_head node; + struct mhi_ep_ring *ring; +}; + struct mhi_ep_ring { struct mhi_ep_cntrl *mhi_cntrl; union mhi_ep_ring_ctx *ring_ctx; @@ -128,6 +133,9 @@ struct mhi_ep_ring { u32 db_offset_h; u32 db_offset_l; u32 ch_id; + u32 er_index; + u32 irq_vector; + bool started; }; struct mhi_ep_cmd { @@ -153,6 +161,16 @@ struct mhi_ep_chan { bool skip_td; }; +/* MHI Ring related functions */ +void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id); +void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring); +int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + union mhi_ep_ring_ctx *ctx); +size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr); +int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *element); +void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring); +int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring); + /* MMIO related functions */ u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset); void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val); diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c new file mode 100644 index 000000000000..115518ec76a4 --- /dev/null +++ b/drivers/bus/mhi/ep/ring.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +#include +#include "internal.h" + +size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr) +{ + return (ptr - ring->rbase) / sizeof(struct mhi_ring_element); +} + +static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring) +{ + __le64 rlen; + + memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64)); + + return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element); +} + +void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring) +{ + ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size; +} + +static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t start, copy_size; + int ret; + + /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */ + if (ring->type == RING_TYPE_ER) + return 0; + + /* No need to cache the ring if write pointer is unmodified */ + if (ring->wr_offset == end) + return 0; + + start = ring->wr_offset; + if (start < end) { + copy_size = (end - start) * sizeof(struct mhi_ring_element); + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + + (start * sizeof(struct mhi_ring_element)), + &ring->ring_cache[start], copy_size); + if (ret < 0) + return ret; + } else { + copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element); + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + + (start * sizeof(struct mhi_ring_element)), + &ring->ring_cache[start], copy_size); + if (ret < 0) + return ret; + + if (end) { + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase, + &ring->ring_cache[0], + end * sizeof(struct mhi_ring_element)); + if (ret < 0) + return ret; + } + } + + dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size); + + return 0; +} + +static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr) +{ + size_t wr_offset; + int ret; + + wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr); + + /* Cache the host ring till write offset */ + ret = __mhi_ep_cache_ring(ring, wr_offset); + if (ret) + return ret; + + ring->wr_offset = wr_offset; + + return 0; +} + +int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring) +{ + u64 wr_ptr; + + wr_ptr = mhi_ep_mmio_get_db(ring); + + return mhi_ep_cache_ring(ring, wr_ptr); +} + +/* TODO: Support for adding multiple ring elements to the ring */ +int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t old_offset = 0; + u32 num_free_elem; + __le64 rp; + int ret; + + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write pointer\n"); + return ret; + } + + if (ring->rd_offset < ring->wr_offset) + num_free_elem = (ring->wr_offset - ring->rd_offset) - 1; + else + num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1; + + /* Check if there is space in ring for adding at least an element */ + if (!num_free_elem) { + dev_err(dev, "No space left in the ring\n"); + return -ENOSPC; + } + + old_offset = ring->rd_offset; + mhi_ep_ring_inc_index(ring); + + dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset); + + /* Update rp in ring context */ + rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase); + memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64)); + + ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)), + sizeof(*el)); + if (ret < 0) + return ret; + + return 0; +} + +void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id) +{ + ring->type = type; + if (ring->type == RING_TYPE_CMD) { + ring->db_offset_h = EP_CRDB_HIGHER; + ring->db_offset_l = EP_CRDB_LOWER; + } else if (ring->type == RING_TYPE_CH) { + ring->db_offset_h = CHDB_HIGHER_n(id); + ring->db_offset_l = CHDB_LOWER_n(id); + ring->ch_id = id; + } else { + ring->db_offset_h = ERDB_HIGHER_n(id); + ring->db_offset_l = ERDB_LOWER_n(id); + } +} + +int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + union mhi_ep_ring_ctx *ctx) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + __le64 val; + int ret; + + ring->mhi_cntrl = mhi_cntrl; + ring->ring_ctx = ctx; + ring->ring_size = mhi_ep_ring_num_elems(ring); + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64)); + ring->rbase = le64_to_cpu(val); + + if (ring->type == RING_TYPE_CH) + ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex); + + if (ring->type == RING_TYPE_ER) + ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec); + + /* During ring init, both rp and wp are equal */ + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64)); + ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val)); + ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val)); + + /* Allocate ring cache memory for holding the copy of host ring */ + ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL); + if (!ring->ring_cache) + return -ENOMEM; + + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64)); + ret = mhi_ep_cache_ring(ring, le64_to_cpu(val)); + if (ret) { + dev_err(dev, "Failed to cache ring\n"); + kfree(ring->ring_cache); + return ret; + } + + ring->started = true; + + return 0; +} + +void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring) +{ + ring->started = false; + kfree(ring->ring_cache); + ring->ring_cache = NULL; +} -- cgit v1.2.3 From 6f7cb6e7883962b6393dbfbf188550a11d1490b3 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 29 Nov 2021 12:47:50 +0530 Subject: bus: mhi: ep: Add support for sending events to the host Add support for sending the events to the host over MHI bus from the endpoint. Following events are supported: 1. Transfer completion event 2. Command completion event 3. State change event 4. Execution Environment (EE) change event An event is sent whenever an operation has been completed in the MHI EP device. Event is sent using the MHI event ring and additionally the host is notified using an IRQ if required. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/common.h | 22 +++++++++++ drivers/bus/mhi/ep/internal.h | 4 ++ drivers/bus/mhi/ep/main.c | 89 +++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 8 ++++ 4 files changed, 123 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h index b4ef9acd3ce7..f794b9c8049e 100644 --- a/drivers/bus/mhi/common.h +++ b/drivers/bus/mhi/common.h @@ -165,6 +165,22 @@ #define MHI_TRE_GET_EV_LINKSPEED(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1))) #define MHI_TRE_GET_EV_LINKWIDTH(tre) FIELD_GET(GENMASK(7, 0), (MHI_TRE_GET_DWORD(tre, 0))) +/* State change event */ +#define MHI_SC_EV_PTR 0 +#define MHI_SC_EV_DWORD0(state) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), state)) +#define MHI_SC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + +/* EE event */ +#define MHI_EE_EV_PTR 0 +#define MHI_EE_EV_DWORD0(ee) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), ee)) +#define MHI_EE_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + + +/* Command Completion event */ +#define MHI_CC_EV_PTR(ptr) cpu_to_le64(ptr) +#define MHI_CC_EV_DWORD0(code) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code)) +#define MHI_CC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + /* Transfer descriptor macros */ #define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr) #define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(FIELD_PREP(GENMASK(15, 0), len)) @@ -175,6 +191,12 @@ FIELD_PREP(BIT(9), ieot) | \ FIELD_PREP(BIT(8), ieob) | \ FIELD_PREP(BIT(0), chain)) +#define MHI_TRE_DATA_GET_PTR(tre) le64_to_cpu((tre)->ptr) +#define MHI_TRE_DATA_GET_LEN(tre) FIELD_GET(GENMASK(15, 0), MHI_TRE_GET_DWORD(tre, 0)) +#define MHI_TRE_DATA_GET_CHAIN(tre) (!!(FIELD_GET(BIT(0), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_IEOB(tre) (!!(FIELD_GET(BIT(8), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_IEOT(tre) (!!(FIELD_GET(BIT(9), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_BEI(tre) (!!(FIELD_GET(BIT(10), MHI_TRE_GET_DWORD(tre, 1)))) /* RSC transfer descriptor macros */ #define MHI_RSCTRE_DATA_PTR(ptr, len) cpu_to_le64(FIELD_PREP(GENMASK(64, 48), len) | ptr) diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index d16b87061ac6..e096d9cb2cb1 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -197,4 +197,8 @@ void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *s void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl); void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); +/* MHI EP core functions */ +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state); +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env); + #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 7dcc784f10d1..eca1f58ba5fb 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -18,6 +18,93 @@ static DEFINE_IDA(mhi_ep_cntrl_ida); +static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, + struct mhi_ring_element *el, bool bei) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + union mhi_ep_ring_ctx *ctx; + struct mhi_ep_ring *ring; + int ret; + + mutex_lock(&mhi_cntrl->event_lock); + ring = &mhi_cntrl->mhi_event[ring_idx].ring; + ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; + if (!ring->started) { + ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); + if (ret) { + dev_err(dev, "Error starting event ring (%u)\n", ring_idx); + goto err_unlock; + } + } + + /* Add element to the event ring */ + ret = mhi_ep_ring_add_element(ring, el); + if (ret) { + dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx); + goto err_unlock; + } + + mutex_unlock(&mhi_cntrl->event_lock); + + /* + * Raise IRQ to host only if the BEI flag is not set in TRE. Host might + * set this flag for interrupt moderation as per MHI protocol. + */ + if (!bei) + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + + return 0; + +err_unlock: + mutex_unlock(&mhi_cntrl->event_lock); + + return ret; +} + +static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) +{ + struct mhi_ring_element event = {}; + + event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); + event.dword[0] = MHI_TRE_EV_DWORD0(code, len); + event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); + + return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre)); +} + +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) +{ + struct mhi_ring_element event = {}; + + event.dword[0] = MHI_SC_EV_DWORD0(state); + event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) +{ + struct mhi_ring_element event = {}; + + event.dword[0] = MHI_EE_EV_DWORD0(exec_env); + event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + +static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) +{ + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; + struct mhi_ring_element event = {}; + + event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); + event.dword[0] = MHI_CC_EV_DWORD0(code); + event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -227,6 +314,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + mutex_init(&mhi_cntrl->event_lock); + /* Set MHI version and AMSS EE before enumeration */ mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 5db048e258e4..46236ffb528a 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -59,10 +59,14 @@ struct mhi_ep_db_info { * @mhi_event: Points to the event ring configurations table * @mhi_cmd: Points to the command ring configurations table * @sm: MHI Endpoint state machine + * @ch_ctx_cache: Cache of host channel context data structure + * @ev_ctx_cache: Cache of host event context data structure + * @cmd_ctx_cache: Cache of host command context data structure * @ch_ctx_host_pa: Physical address of host channel context data structure * @ev_ctx_host_pa: Physical address of host event context data structure * @cmd_ctx_host_pa: Physical address of host command context data structure * @chdb: Array of channel doorbell interrupt info + * @event_lock: Lock for protecting event rings * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -87,11 +91,15 @@ struct mhi_ep_cntrl { struct mhi_ep_cmd *mhi_cmd; struct mhi_ep_sm *sm; + struct mhi_chan_ctxt *ch_ctx_cache; + struct mhi_event_ctxt *ev_ctx_cache; + struct mhi_cmd_ctxt *cmd_ctx_cache; u64 ch_ctx_host_pa; u64 ev_ctx_host_pa; u64 cmd_ctx_host_pa; struct mhi_ep_db_info chdb[4]; + struct mutex event_lock; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From 484e04faa8a0487843808a08ff8b11e0684972f5 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 29 Nov 2021 12:53:51 +0530 Subject: bus: mhi: ep: Add support for managing MHI state machine Add support for managing the MHI state machine by controlling the state transitions. Only the following MHI state transitions are supported: 1. Ready state 2. M0 state 3. M3 state 4. SYS_ERR state Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/Makefile | 2 +- drivers/bus/mhi/ep/internal.h | 11 ++++ drivers/bus/mhi/ep/main.c | 54 ++++++++++++++++- drivers/bus/mhi/ep/sm.c | 136 ++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 12 ++++ 5 files changed, 213 insertions(+), 2 deletions(-) create mode 100644 drivers/bus/mhi/ep/sm.c (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile index 7ba0e04801eb..aad85f180b70 100644 --- a/drivers/bus/mhi/ep/Makefile +++ b/drivers/bus/mhi/ep/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o -mhi_ep-y := main.o mmio.o ring.o +mhi_ep-y := main.o mmio.o ring.o sm.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index e096d9cb2cb1..4f2e26841702 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -146,6 +146,11 @@ struct mhi_ep_event { struct mhi_ep_ring ring; }; +struct mhi_ep_state_transition { + struct list_head node; + enum mhi_state state; +}; + struct mhi_ep_chan { char *name; struct mhi_ep_device *mhi_dev; @@ -200,5 +205,11 @@ void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); /* MHI EP core functions */ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state); int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env); +bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state, + enum mhi_state mhi_state); +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state); +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl); #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index eca1f58ba5fb..c912daf6dc65 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -105,6 +105,43 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); } +static void mhi_ep_state_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_state_transition *itr, *tmp; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->list_lock, flags); + list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dev_dbg(dev, "Handling MHI state transition to %s\n", + mhi_state_str(itr->state)); + + switch (itr->state) { + case MHI_STATE_M0: + ret = mhi_ep_set_m0_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M0 state\n"); + break; + case MHI_STATE_M3: + ret = mhi_ep_set_m3_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M3 state\n"); + break; + default: + dev_err(dev, "Invalid MHI state transition: %d\n", itr->state); + break; + } + kfree(itr); + } +} + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -314,6 +351,17 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); + + mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); + if (!mhi_cntrl->wq) { + ret = -ENOMEM; + goto err_free_cmd; + } + + INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); + spin_lock_init(&mhi_cntrl->state_lock); + spin_lock_init(&mhi_cntrl->list_lock); mutex_init(&mhi_cntrl->event_lock); /* Set MHI version and AMSS EE before enumeration */ @@ -323,7 +371,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, /* Set controller index */ ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); if (ret < 0) - goto err_free_cmd; + goto err_destroy_wq; mhi_cntrl->index = ret; @@ -351,6 +399,8 @@ err_put_dev: put_device(&mhi_dev->dev); err_ida_free: ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +err_destroy_wq: + destroy_workqueue(mhi_cntrl->wq); err_free_cmd: kfree(mhi_cntrl->mhi_cmd); err_free_ch: @@ -364,6 +414,8 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) { struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; + destroy_workqueue(mhi_cntrl->wq); + kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_chan); diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c new file mode 100644 index 000000000000..ffc02f5d0a0d --- /dev/null +++ b/drivers/bus/mhi/ep/sm.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +#include +#include +#include "internal.h" + +bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state cur_mhi_state, + enum mhi_state mhi_state) +{ + if (mhi_state == MHI_STATE_SYS_ERR) + return true; /* Allowed in any state */ + + if (mhi_state == MHI_STATE_READY) + return cur_mhi_state == MHI_STATE_RESET; + + if (mhi_state == MHI_STATE_M0) + return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY; + + if (mhi_state == MHI_STATE_M3) + return cur_mhi_state == MHI_STATE_M0; + + return false; +} + +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) { + dev_err(dev, "MHI state change to %s from %s is not allowed!\n", + mhi_state_str(mhi_state), + mhi_state_str(mhi_cntrl->mhi_state)); + return -EACCES; + } + + /* TODO: Add support for M1 and M2 states */ + if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) { + dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state)); + return -EOPNOTSUPP; + } + + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state); + mhi_cntrl->mhi_state = mhi_state; + + if (mhi_state == MHI_STATE_READY) + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1); + + if (mhi_state == MHI_STATE_SYS_ERR) + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1); + + return 0; +} + +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state old_state; + int ret; + + spin_lock_bh(&mhi_cntrl->state_lock); + old_state = mhi_cntrl->mhi_state; + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + spin_unlock_bh(&mhi_cntrl->state_lock); + + if (ret) + return ret; + + /* Signal host that the device moved to M0 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0); + if (ret) { + dev_err(dev, "Failed sending M0 state change event\n"); + return ret; + } + + if (old_state == MHI_STATE_READY) { + /* Send AMSS EE event to host */ + ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS); + if (ret) { + dev_err(dev, "Failed sending AMSS EE event\n"); + return ret; + } + } + + return 0; +} + +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + spin_lock_bh(&mhi_cntrl->state_lock); + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + spin_unlock_bh(&mhi_cntrl->state_lock); + + if (ret) + return ret; + + /* Signal host that the device moved to M3 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); + if (ret) { + dev_err(dev, "Failed sending M3 state change event\n"); + return ret; + } + + return 0; +} + +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state mhi_state; + int ret, is_ready; + + spin_lock_bh(&mhi_cntrl->state_lock); + /* Ensure that the MHISTATUS is set to RESET by host */ + mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK); + is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK); + + if (mhi_state != MHI_STATE_RESET || is_ready) { + dev_err(dev, "READY state transition failed. MHI host not in RESET state\n"); + spin_unlock_bh(&mhi_cntrl->state_lock); + return -EIO; + } + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY); + spin_unlock_bh(&mhi_cntrl->state_lock); + + return ret; +} diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 46236ffb528a..2880d2aa88b8 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -67,6 +67,11 @@ struct mhi_ep_db_info { * @cmd_ctx_host_pa: Physical address of host command context data structure * @chdb: Array of channel doorbell interrupt info * @event_lock: Lock for protecting event rings + * @list_lock: Lock for protecting state transition and channel doorbell lists + * @state_lock: Lock for protecting state transitions + * @st_transition_list: List of state transitions + * @wq: Dedicated workqueue for handling rings and state changes + * @state_work: State transition worker * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -100,6 +105,13 @@ struct mhi_ep_cntrl { struct mhi_ep_db_info chdb[4]; struct mutex event_lock; + spinlock_t list_lock; + spinlock_t state_lock; + + struct list_head st_transition_list; + + struct workqueue_struct *wq; + struct work_struct state_work; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From b6c52d410cb464f5cc5d962b92205f7232cb18b8 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 29 Nov 2021 14:19:29 +0530 Subject: bus: mhi: ep: Add support for processing MHI endpoint interrupts Add support for processing MHI endpoint interrupts such as control interrupt, command interrupt and channel interrupt from the host. The interrupts will be generated in the endpoint device whenever host writes to the corresponding doorbell registers. The doorbell logic is handled inside the hardware internally. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/main.c | 124 +++++++++++++++++++++++++++++++++++++++++++++- include/linux/mhi_ep.h | 4 ++ 2 files changed, 126 insertions(+), 2 deletions(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index c912daf6dc65..4e82006bd83b 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -142,6 +143,112 @@ static void mhi_ep_state_worker(struct work_struct *work) } } +static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, + u32 ch_idx) +{ + struct mhi_ep_ring_item *item; + struct mhi_ep_ring *ring; + bool work = !!ch_int; + LIST_HEAD(head); + u32 i; + + /* First add the ring items to a local list */ + for_each_set_bit(i, &ch_int, 32) { + /* Channel index varies for each register: 0, 32, 64, 96 */ + u32 ch_id = ch_idx + i; + + ring = &mhi_cntrl->mhi_chan[ch_id].ring; + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + return; + + item->ring = ring; + list_add_tail(&item->node, &head); + } + + /* Now, splice the local list into ch_db_list and queue the work item */ + if (work) { + spin_lock(&mhi_cntrl->list_lock); + list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); + spin_unlock(&mhi_cntrl->list_lock); + } +} + +/* + * Channel interrupt statuses are contained in 4 registers each of 32bit length. + * For checking all interrupts, we need to loop through each registers and then + * check for bits set. + */ +static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 ch_int, ch_idx, i; + + /* Bail out if there is no channel doorbell interrupt */ + if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) + return; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + ch_idx = i * MHI_MASK_CH_LEN; + + /* Only process channel interrupt if the mask is enabled */ + ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; + if (ch_int) { + mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), + mhi_cntrl->chdb[i].status); + } + } +} + +static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state state) +{ + struct mhi_ep_state_transition *item; + + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + return; + + item->state = state; + spin_lock(&mhi_cntrl->list_lock); + list_add_tail(&item->node, &mhi_cntrl->st_transition_list); + spin_unlock(&mhi_cntrl->list_lock); + + queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); +} + +/* + * Interrupt handler that services interrupts raised by the host writing to + * MHICTRL and Command ring doorbell (CRDB) registers for state change and + * channel interrupts. + */ +static irqreturn_t mhi_ep_irq(int irq, void *data) +{ + struct mhi_ep_cntrl *mhi_cntrl = data; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + u32 int_value; + + /* Acknowledge the ctrl interrupt */ + int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); + + /* Check for ctrl interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { + dev_dbg(dev, "Processing ctrl interrupt\n"); + mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); + } + + /* Check for command doorbell interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) + dev_dbg(dev, "Processing command doorbell interrupt\n"); + + /* Check for channel interrupts */ + mhi_ep_check_channel_interrupt(mhi_cntrl); + + return IRQ_HANDLED; +} + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -338,7 +445,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_device *mhi_dev; int ret; - if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio) + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) return -EINVAL; ret = mhi_ep_chan_init(mhi_cntrl, config); @@ -360,6 +467,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, } INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); + INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); spin_lock_init(&mhi_cntrl->state_lock); spin_lock_init(&mhi_cntrl->list_lock); mutex_init(&mhi_cntrl->event_lock); @@ -375,12 +483,20 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, mhi_cntrl->index = ret; + irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); + ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, + "doorbell_irq", mhi_cntrl); + if (ret) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); + goto err_ida_free; + } + /* Allocate the controller device */ mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); if (IS_ERR(mhi_dev)) { dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); ret = PTR_ERR(mhi_dev); - goto err_ida_free; + goto err_free_irq; } dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); @@ -397,6 +513,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, err_put_dev: put_device(&mhi_dev->dev); +err_free_irq: + free_irq(mhi_cntrl->irq, mhi_cntrl); err_ida_free: ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); err_destroy_wq: @@ -416,6 +534,8 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) destroy_workqueue(mhi_cntrl->wq); + free_irq(mhi_cntrl->irq, mhi_cntrl); + kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_chan); diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 2880d2aa88b8..137bd3ee2e43 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -70,6 +70,7 @@ struct mhi_ep_db_info { * @list_lock: Lock for protecting state transition and channel doorbell lists * @state_lock: Lock for protecting state transitions * @st_transition_list: List of state transitions + * @ch_db_list: List of queued channel doorbells * @wq: Dedicated workqueue for handling rings and state changes * @state_work: State transition worker * @raise_irq: CB function for raising IRQ to the host @@ -85,6 +86,7 @@ struct mhi_ep_db_info { * @chdb_offset: Channel doorbell offset set by the host * @erdb_offset: Event ring doorbell offset set by the host * @index: MHI Endpoint controller index + * @irq: IRQ used by the endpoint controller */ struct mhi_ep_cntrl { struct device *cntrl_dev; @@ -109,6 +111,7 @@ struct mhi_ep_cntrl { spinlock_t state_lock; struct list_head st_transition_list; + struct list_head ch_db_list; struct workqueue_struct *wq; struct work_struct state_work; @@ -130,6 +133,7 @@ struct mhi_ep_cntrl { u32 chdb_offset; u32 erdb_offset; u32 index; + int irq; }; /** -- cgit v1.2.3 From 7de7780c43b1c793d67b5fb16232ab5df00a676c Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 29 Nov 2021 13:48:05 +0530 Subject: bus: mhi: ep: Add support for powering up the MHI endpoint stack Add support for MHI endpoint power_up that includes initializing the MMIO and rings, caching the host MHI registers, and setting the MHI state to M0. After registering the MHI EP controller, the stack has to be powered up for usage. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/main.c | 205 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 16 ++++ 2 files changed, 221 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 4e82006bd83b..20d579733486 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -17,6 +17,9 @@ #include #include "internal.h" +#define M0_WAIT_DELAY_MS 100 +#define M0_WAIT_COUNT 100 + static DEFINE_IDA(mhi_ep_cntrl_ida); static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, @@ -106,6 +109,154 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); } +static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + /* Update the number of event rings (NER) programmed by the host */ + mhi_ep_mmio_update_ner(mhi_cntrl); + + dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n", + mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); + + ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; + ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; + cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; + + /* Get the channel context base pointer from host */ + mhi_ep_mmio_get_chc_base(mhi_cntrl); + + /* Allocate and map memory for caching host channel context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, + &mhi_cntrl->ch_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->ch_ctx_cache, + ch_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map ch_ctx_cache\n"); + return ret; + } + + /* Get the event context base pointer from host */ + mhi_ep_mmio_get_erc_base(mhi_cntrl); + + /* Allocate and map memory for caching host event context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, + &mhi_cntrl->ev_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->ev_ctx_cache, + ev_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map ev_ctx_cache\n"); + goto err_ch_ctx; + } + + /* Get the command context base pointer from host */ + mhi_ep_mmio_get_crc_base(mhi_cntrl); + + /* Allocate and map memory for caching host command context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, + &mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->cmd_ctx_cache, + cmd_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n"); + goto err_ev_ctx; + } + + /* Initialize command ring */ + ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, + (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); + if (ret) { + dev_err(dev, "Failed to start the command ring\n"); + goto err_cmd_ctx; + } + + return ret; + +err_cmd_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); + +err_ev_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); + +err_ch_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); + + return ret; +} + +static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; + + ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; + ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; + cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); +} + +static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) +{ + /* + * Doorbell interrupts are enabled when the corresponding channel gets started. + * Enabling all interrupts here triggers spurious irqs as some of the interrupts + * associated with hw channels always get triggered. + */ + mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); +} + +static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + bool mhi_reset; + u32 count = 0; + int ret; + + /* Wait for Host to set the M0 state */ + do { + msleep(M0_WAIT_DELAY_MS); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + /* Clear the MHI reset if host is in reset state */ + mhi_ep_mmio_clear_reset(mhi_cntrl); + dev_info(dev, "Detected Host reset while waiting for M0\n"); + } + count++; + } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT); + + if (state != MHI_STATE_M0) { + dev_err(dev, "Host failed to enter M0\n"); + return -ETIMEDOUT; + } + + ret = mhi_ep_cache_host_cfg(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to cache host config\n"); + return ret; + } + + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* Enable all interrupts now */ + mhi_ep_enable_int(mhi_cntrl); + + return 0; +} + static void mhi_ep_state_worker(struct work_struct *work) { struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); @@ -249,6 +400,60 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) return IRQ_HANDLED; } +int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + /* + * Mask all interrupts until the state machine is ready. Interrupts will + * be enabled later with mhi_ep_enable(). + */ + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + mhi_ep_mmio_init(mhi_cntrl); + + mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* Initialize command, channel and event rings */ + mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); + for (i = 0; i < mhi_cntrl->max_chan; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); + for (i = 0; i < mhi_cntrl->event_rings; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); + + mhi_cntrl->mhi_state = MHI_STATE_RESET; + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + goto err_free_event; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint\n"); + goto err_free_event; + } + + enable_irq(mhi_cntrl->irq); + mhi_cntrl->enabled = true; + + return 0; + +err_free_event: + kfree(mhi_cntrl->mhi_event); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_power_up); + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 137bd3ee2e43..3b065f82fbeb 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -65,6 +65,9 @@ struct mhi_ep_db_info { * @ch_ctx_host_pa: Physical address of host channel context data structure * @ev_ctx_host_pa: Physical address of host event context data structure * @cmd_ctx_host_pa: Physical address of host command context data structure + * @ch_ctx_cache_phys: Physical address of the host channel context cache + * @ev_ctx_cache_phys: Physical address of the host event context cache + * @cmd_ctx_cache_phys: Physical address of the host command context cache * @chdb: Array of channel doorbell interrupt info * @event_lock: Lock for protecting event rings * @list_lock: Lock for protecting state transition and channel doorbell lists @@ -87,6 +90,7 @@ struct mhi_ep_db_info { * @erdb_offset: Event ring doorbell offset set by the host * @index: MHI Endpoint controller index * @irq: IRQ used by the endpoint controller + * @enabled: Check if the endpoint controller is enabled or not */ struct mhi_ep_cntrl { struct device *cntrl_dev; @@ -104,6 +108,9 @@ struct mhi_ep_cntrl { u64 ch_ctx_host_pa; u64 ev_ctx_host_pa; u64 cmd_ctx_host_pa; + phys_addr_t ch_ctx_cache_phys; + phys_addr_t ev_ctx_cache_phys; + phys_addr_t cmd_ctx_cache_phys; struct mhi_ep_db_info chdb[4]; struct mutex event_lock; @@ -134,6 +141,7 @@ struct mhi_ep_cntrl { u32 erdb_offset; u32 index; int irq; + bool enabled; }; /** @@ -228,4 +236,12 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, */ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl); +/** + * mhi_ep_power_up - Power up the MHI endpoint stack + * @mhi_cntrl: MHI Endpoint controller + * + * Return: 0 if power up succeeds, a negative error code otherwise. + */ +int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl); + #endif -- cgit v1.2.3 From c6dba8924201079093e1187a61934c6863b9efc1 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 29 Nov 2021 13:59:16 +0530 Subject: bus: mhi: ep: Add support for powering down the MHI endpoint stack Add support for MHI endpoint power_down that includes stopping all available channels, destroying the channels, resetting the event and transfer rings and freeing the host cache. The stack will be powered down whenever the physical bus link goes down. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/main.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 6 ++++ 2 files changed, 84 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 20d579733486..968025e4d3ac 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -22,6 +22,8 @@ static DEFINE_IDA(mhi_ep_cntrl_ida); +static int mhi_ep_destroy_device(struct device *dev, void *data); + static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, struct mhi_ring_element *el, bool bei) { @@ -400,6 +402,68 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) return IRQ_HANDLED; } +static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_ring *ch_ring, *ev_ring; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int i; + + /* Stop all the channels */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + if (!mhi_chan->ring.started) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to client drivers */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + mhi_chan->state = MHI_CH_STATE_DISABLED; + mutex_unlock(&mhi_chan->lock); + } + + flush_workqueue(mhi_cntrl->wq); + + /* Destroy devices associated with all channels */ + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); + + /* Stop and reset the transfer rings */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + if (!mhi_chan->ring.started) + continue; + + ch_ring = &mhi_cntrl->mhi_chan[i].ring; + mutex_lock(&mhi_chan->lock); + mhi_ep_ring_reset(mhi_cntrl, ch_ring); + mutex_unlock(&mhi_chan->lock); + } + + /* Stop and reset the event rings */ + for (i = 0; i < mhi_cntrl->event_rings; i++) { + ev_ring = &mhi_cntrl->mhi_event[i].ring; + if (!ev_ring->started) + continue; + + mutex_lock(&mhi_cntrl->event_lock); + mhi_ep_ring_reset(mhi_cntrl, ev_ring); + mutex_unlock(&mhi_cntrl->event_lock); + } + + /* Stop and reset the command ring */ + mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); + + mhi_ep_free_host_cfg(mhi_cntrl); + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + + mhi_cntrl->enabled = false; +} + int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) { struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -454,6 +518,16 @@ err_free_event: } EXPORT_SYMBOL_GPL(mhi_ep_power_up); +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) +{ + if (mhi_cntrl->enabled) + mhi_ep_abort_transfer(mhi_cntrl); + + kfree(mhi_cntrl->mhi_event); + disable_irq(mhi_cntrl->irq); +} +EXPORT_SYMBOL_GPL(mhi_ep_power_down); + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -733,6 +807,10 @@ err_free_ch: } EXPORT_SYMBOL_GPL(mhi_ep_register_controller); +/* + * It is expected that the controller drivers will power down the MHI EP stack + * using "mhi_ep_power_down()" before calling this function to unregister themselves. + */ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) { struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 3b065f82fbeb..9da683e8302c 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -244,4 +244,10 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl); */ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl); +/** + * mhi_ep_power_down - Power down the MHI endpoint stack + * @mhi_cntrl: MHI controller + */ +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl); + #endif -- cgit v1.2.3 From 2ee735c1734783ef0dbe604076676820006ff107 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 29 Nov 2021 15:00:07 +0530 Subject: bus: mhi: ep: Add support for handling MHI_RESET Add support for handling MHI_RESET in MHI endpoint stack. MHI_RESET will be issued by the host during shutdown and during error scenario so that it can recover the endpoint device without restarting the whole device. MHI_RESET handling involves resetting the internal MHI registers, data structures, state machines, resetting all channels/rings and setting MHICTRL.RESET bit to 0. Additionally the device will also move to READY state if the reset was due to SYS_ERR. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/main.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 2 ++ 2 files changed, 55 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 968025e4d3ac..d36708d43eb6 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -381,6 +381,7 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) struct device *dev = &mhi_cntrl->mhi_dev->dev; enum mhi_state state; u32 int_value; + bool mhi_reset; /* Acknowledge the ctrl interrupt */ int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); @@ -389,6 +390,14 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) /* Check for ctrl interrupt */ if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { dev_dbg(dev, "Processing ctrl interrupt\n"); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + dev_info(dev, "Host triggered MHI reset!\n"); + disable_irq_nosync(mhi_cntrl->irq); + schedule_work(&mhi_cntrl->reset_work); + return IRQ_HANDLED; + } + mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); } @@ -464,6 +473,49 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) mhi_cntrl->enabled = false; } +static void mhi_ep_reset_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state cur_state; + int ret; + + mhi_ep_abort_transfer(mhi_cntrl); + + spin_lock_bh(&mhi_cntrl->state_lock); + /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ + mhi_ep_mmio_reset(mhi_cntrl); + cur_state = mhi_cntrl->mhi_state; + spin_unlock_bh(&mhi_cntrl->state_lock); + + /* + * Only proceed further if the reset is due to SYS_ERR. The host will + * issue reset during shutdown also and we don't need to do re-init in + * that case. + */ + if (cur_state == MHI_STATE_SYS_ERR) { + mhi_ep_mmio_init(mhi_cntrl); + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + return; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret); + return; + } + + enable_irq(mhi_cntrl->irq); + } +} + int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) { struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -738,6 +790,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, } INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); + INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); if (!mhi_cntrl->wq) { diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 9da683e8302c..2f31a54c205f 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -76,6 +76,7 @@ struct mhi_ep_db_info { * @ch_db_list: List of queued channel doorbells * @wq: Dedicated workqueue for handling rings and state changes * @state_work: State transition worker + * @reset_work: Worker for MHI Endpoint reset * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -122,6 +123,7 @@ struct mhi_ep_cntrl { struct workqueue_struct *wq; struct work_struct state_work; + struct work_struct reset_work; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From 1b54f3e8b4bb960f4b447460cc2e6dfb572b0e4a Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 29 Nov 2021 15:11:38 +0530 Subject: bus: mhi: ep: Add support for handling SYS_ERR condition Add support for handling SYS_ERR (System Error) condition in the MHI endpoint stack. The SYS_ERR flag will be asserted by the endpoint device when it detects an internal error. The host will then issue reset and reinitializes MHI to recover from the error state. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/internal.h | 1 + drivers/bus/mhi/ep/main.c | 20 ++++++++++++++++++++ drivers/bus/mhi/ep/sm.c | 11 +++++++++-- 3 files changed, 30 insertions(+), 2 deletions(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index 4f2e26841702..d201d755560c 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -211,5 +211,6 @@ int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_stat int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl); int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl); int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl); #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index d36708d43eb6..706473ea4918 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -516,6 +516,26 @@ static void mhi_ep_reset_worker(struct work_struct *work) } } +/* + * We don't need to do anything special other than setting the MHI SYS_ERR + * state. The host will reset all contexts and issue MHI RESET so that we + * could also recover from error state. + */ +void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + if (ret) + return; + + /* Signal host that the device went to SYS_ERR state */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR); + if (ret) + dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret); +} + int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) { struct device *dev = &mhi_cntrl->mhi_dev->dev; diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c index ffc02f5d0a0d..e3865b85399d 100644 --- a/drivers/bus/mhi/ep/sm.c +++ b/drivers/bus/mhi/ep/sm.c @@ -68,8 +68,10 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); spin_unlock_bh(&mhi_cntrl->state_lock); - if (ret) + if (ret) { + mhi_ep_handle_syserr(mhi_cntrl); return ret; + } /* Signal host that the device moved to M0 */ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0); @@ -99,8 +101,10 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); spin_unlock_bh(&mhi_cntrl->state_lock); - if (ret) + if (ret) { + mhi_ep_handle_syserr(mhi_cntrl); return ret; + } /* Signal host that the device moved to M3 */ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); @@ -132,5 +136,8 @@ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY); spin_unlock_bh(&mhi_cntrl->state_lock); + if (ret) + mhi_ep_handle_syserr(mhi_cntrl); + return ret; } -- cgit v1.2.3 From 2b231a40e9073a6828fb960f055b07707b6dd205 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Sat, 12 Feb 2022 20:56:27 +0530 Subject: bus: mhi: ep: Add support for processing command rings Add support for processing the command rings. Command ring is used by the host to issue channel specific commands to the ep device. Following commands are supported: 1. Start channel 2. Stop channel 3. Reset channel Once the device receives the command doorbell interrupt from host, it executes the command and generates a command completion event to the host in the primary event ring. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/main.c | 190 +++++++++++++++++++++++++++++++++++++++++++++- include/linux/mhi_ep.h | 2 + 2 files changed, 191 insertions(+), 1 deletion(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 706473ea4918..32ac567e0f67 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -22,6 +22,7 @@ static DEFINE_IDA(mhi_ep_cntrl_ida); +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); static int mhi_ep_destroy_device(struct device *dev, void *data); static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, @@ -111,6 +112,156 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); } +static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + struct mhi_ep_ring *ch_ring; + u32 tmp, ch_id; + int ret; + + ch_id = MHI_TRE_GET_CMD_CHID(el); + mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; + + switch (MHI_TRE_GET_CMD_TYPE(el)) { + case MHI_PKT_TYPE_START_CHAN_CMD: + dev_dbg(dev, "Received START command for channel (%u)\n", ch_id); + + mutex_lock(&mhi_chan->lock); + /* Initialize and configure the corresponding channel ring */ + if (!ch_ring->started) { + ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, + (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); + if (ret) { + dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id); + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, + MHI_EV_CC_UNDEFINED_ERR); + if (ret) + dev_err(dev, "Error sending completion event: %d\n", ret); + + goto err_unlock; + } + } + + /* Set channel state to RUNNING */ + mhi_chan->state = MHI_CH_STATE_RUNNING; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + + /* + * Create MHI device only during UL channel start. Since the MHI + * channels operate in a pair, we'll associate both UL and DL + * channels to the same device. + * + * We also need to check for mhi_dev != NULL because, the host + * will issue START_CHAN command during resume and we don't + * destroy the device during suspend. + */ + if (!(ch_id % 2) && !mhi_chan->mhi_dev) { + ret = mhi_ep_create_device(mhi_cntrl, ch_id); + if (ret) { + dev_err(dev, "Error creating device for channel (%u)\n", ch_id); + mhi_ep_handle_syserr(mhi_cntrl); + return ret; + } + } + + /* Finally, enable DB for the channel */ + mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); + + break; + case MHI_PKT_TYPE_STOP_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + if (!ch_ring->started) { + dev_err(dev, "Channel (%u) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Disable DB for the channel */ + mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); + + /* Send channel disconnect status to client drivers */ + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* Set channel state to STOP */ + mhi_chan->state = MHI_CH_STATE_STOP; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + case MHI_PKT_TYPE_RESET_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + if (!ch_ring->started) { + dev_err(dev, "Channel (%u) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Stop and reset the transfer ring */ + mhi_ep_ring_reset(mhi_cntrl, ch_ring); + + /* Send channel disconnect status to client driver */ + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* Set channel state to DISABLED */ + mhi_chan->state = MHI_CH_STATE_DISABLED; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + default: + dev_err(dev, "Invalid command received: %lu for channel (%u)\n", + MHI_TRE_GET_CMD_TYPE(el), ch_id); + return -EINVAL; + } + + return 0; + +err_unlock: + mutex_unlock(&mhi_chan->lock); + + return ret; +} + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; @@ -259,6 +410,40 @@ static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) return 0; } +static void mhi_ep_cmd_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ring_element *el; + int ret; + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + return; + } + + /* Sanity check to make sure there are elements in the ring */ + if (ring->rd_offset == ring->wr_offset) + return; + + /* + * Process command ring element till write offset. In case of an error, just try to + * process next element. + */ + while (ring->rd_offset != ring->wr_offset) { + el = &ring->ring_cache[ring->rd_offset]; + + ret = mhi_ep_process_cmd_ring(ring, el); + if (ret) + dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); + + mhi_ep_ring_inc_index(ring); + } +} + static void mhi_ep_state_worker(struct work_struct *work) { struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); @@ -402,8 +587,10 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) } /* Check for command doorbell interrupt */ - if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) + if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { dev_dbg(dev, "Processing command doorbell interrupt\n"); + queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); + } /* Check for channel interrupts */ mhi_ep_check_channel_interrupt(mhi_cntrl); @@ -811,6 +998,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); + INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); if (!mhi_cntrl->wq) { diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 2f31a54c205f..8c6406d9c51f 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -77,6 +77,7 @@ struct mhi_ep_db_info { * @wq: Dedicated workqueue for handling rings and state changes * @state_work: State transition worker * @reset_work: Worker for MHI Endpoint reset + * @cmd_ring_work: Worker for processing command rings * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -124,6 +125,7 @@ struct mhi_ep_cntrl { struct workqueue_struct *wq; struct work_struct state_work; struct work_struct reset_work; + struct work_struct cmd_ring_work; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From 20c01fde1b36dec0a3ffdbd22f6d117a29cf586d Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Sat, 12 Feb 2022 21:09:23 +0530 Subject: bus: mhi: ep: Add support for reading from the host Data transfer between host and the ep device happens over the transfer ring associated with each bi-directional channel pair. Host defines the transfer ring by allocating memory for it. The read and write pointer addresses of the transfer ring are stored in the channel context. Once host places the elements in the transfer ring, it increments the write pointer and rings the channel doorbell. Device will receive the doorbell interrupt and will process the transfer ring elements. This commit adds support for reading the transfer ring elements from the transfer ring till write pointer, incrementing the read pointer and finally sending the completion event to the host through corresponding event ring. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/main.c | 121 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 9 ++++ 2 files changed, 130 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 32ac567e0f67..1e24eae4b446 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -262,6 +262,127 @@ err_unlock: return ret; } +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) +{ + struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : + mhi_dev->ul_chan; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + return !!(ring->rd_offset == ring->wr_offset); +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); + +static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_ring *ring, + struct mhi_result *result, + u32 len) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t tr_len, read_offset, write_offset; + struct mhi_ring_element *el; + bool tr_done = false; + void *write_addr; + u64 read_addr; + u32 buf_left; + int ret; + + buf_left = len; + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + return -ENODEV; + } + + el = &ring->ring_cache[ring->rd_offset]; + + /* Check if there is data pending to be read from previous read operation */ + if (mhi_chan->tre_bytes_left) { + dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); + tr_len = min(buf_left, mhi_chan->tre_bytes_left); + } else { + mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); + mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); + mhi_chan->tre_bytes_left = mhi_chan->tre_size; + + tr_len = min(buf_left, mhi_chan->tre_size); + } + + read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; + write_offset = len - buf_left; + read_addr = mhi_chan->tre_loc + read_offset; + write_addr = result->buf_addr + write_offset; + + dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); + return ret; + } + + buf_left -= tr_len; + mhi_chan->tre_bytes_left -= tr_len; + + /* + * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been + * read completely: + * + * 1. Send completion event to the host based on the flags set in TRE. + * 2. Increment the local read offset of the transfer ring. + */ + if (!mhi_chan->tre_bytes_left) { + /* + * The host will split the data packet into multiple TREs if it can't fit + * the packet in a single TRE. In that case, CHAIN flag will be set by the + * host for all TREs except the last one. + */ + if (MHI_TRE_DATA_GET_CHAIN(el)) { + /* + * IEOB (Interrupt on End of Block) flag will be set by the host if + * it expects the completion event for all TREs of a TD. + */ + if (MHI_TRE_DATA_GET_IEOB(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOB); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + return ret; + } + } + } else { + /* + * IEOT (Interrupt on End of Transfer) flag will be set by the host + * for the last TRE of the TD and expects the completion event for + * the same. + */ + if (MHI_TRE_DATA_GET_IEOT(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOT); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + return ret; + } + } + + tr_done = true; + } + + mhi_ep_ring_inc_index(ring); + } + + result->bytes_xferd += tr_len; + } while (buf_left && !tr_done); + + return 0; +} + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 8c6406d9c51f..fc7d197413eb 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -254,4 +254,13 @@ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl); */ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl); +/** + * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * + * Return: true if the queue is empty, false otherwise. + */ +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir); + #endif -- cgit v1.2.3 From 494a6f63937744d447630c9ab2f4a58f105b69f5 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Sat, 12 Feb 2022 21:32:02 +0530 Subject: bus: mhi: ep: Add support for processing channel rings Add support for processing the channel rings from host. For the channel ring associated with DL channel, the xfer callback will simply invoked. For the case of UL channel, the ring elements will be read in a buffer till the write pointer and later passed to the client driver using the xfer callback. The client drivers should provide the callbacks for both UL and DL channels during registration. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/main.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 2 + 2 files changed, 110 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 1e24eae4b446..e2ed10b4a9d2 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -383,6 +383,57 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, return 0; } +static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct mhi_result result = {}; + u32 len = MHI_EP_DEFAULT_MTU; + struct mhi_ep_chan *mhi_chan; + int ret; + + mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + + /* + * Bail out if transfer callback is not registered for the channel. + * This is most likely due to the client driver not loaded at this point. + */ + if (!mhi_chan->xfer_cb) { + dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); + return -ENODEV; + } + + if (ring->ch_id % 2) { + /* DL channel */ + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } else { + /* UL channel */ + result.buf_addr = kzalloc(len, GFP_KERNEL); + if (!result.buf_addr) + return -ENOMEM; + + do { + ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); + kfree(result.buf_addr); + return ret; + } + + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + result.bytes_xferd = 0; + memset(result.buf_addr, 0, len); + + /* Read until the ring becomes empty */ + } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); + + kfree(result.buf_addr); + } + + return 0; +} + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; @@ -565,6 +616,60 @@ static void mhi_ep_cmd_ring_worker(struct work_struct *work) } } +static void mhi_ep_ch_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_ring_item *itr, *tmp; + struct mhi_ring_element *el; + struct mhi_ep_ring *ring; + struct mhi_ep_chan *chan; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->list_lock, flags); + list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); + + /* Process each queued channel ring. In case of an error, just process next element. */ + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + ring = itr->ring; + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + kfree(itr); + continue; + } + + /* Sanity check to make sure there are elements in the ring */ + if (ring->rd_offset == ring->wr_offset) { + kfree(itr); + continue; + } + + el = &ring->ring_cache[ring->rd_offset]; + chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + + mutex_lock(&chan->lock); + dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); + ret = mhi_ep_process_ch_ring(ring, el); + if (ret) { + dev_err(dev, "Error processing ring for channel (%u): %d\n", + ring->ch_id, ret); + mutex_unlock(&chan->lock); + kfree(itr); + continue; + } + + mutex_unlock(&chan->lock); + kfree(itr); + } +} + static void mhi_ep_state_worker(struct work_struct *work) { struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); @@ -630,6 +735,8 @@ static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned lon spin_lock(&mhi_cntrl->list_lock); list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); spin_unlock(&mhi_cntrl->list_lock); + + queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); } } @@ -1120,6 +1227,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); + INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); if (!mhi_cntrl->wq) { diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index fc7d197413eb..eecc8f35d630 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -78,6 +78,7 @@ struct mhi_ep_db_info { * @state_work: State transition worker * @reset_work: Worker for MHI Endpoint reset * @cmd_ring_work: Worker for processing command rings + * @ch_ring_work: Worker for processing channel rings * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -126,6 +127,7 @@ struct mhi_ep_cntrl { struct work_struct state_work; struct work_struct reset_work; struct work_struct cmd_ring_work; + struct work_struct ch_ring_work; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From f2a72d2410be8ad33a78b6d989861440def2c736 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 29 Nov 2021 15:32:42 +0530 Subject: bus: mhi: ep: Add support for queueing SKBs to the host Add support for queueing SKBs to the host over the transfer ring of the relevant channel. The mhi_ep_queue_skb() API will be used by the client networking drivers to queue the SKBs to the host over MHI bus. The host will add ring elements to the transfer ring periodically for the device and the device will write SKBs to the ring elements. If a single SKB doesn't fit in a ring element (TRE), it will be placed in multiple ring elements and the overflow event will be sent for all ring elements except the last one. For the last ring element, the EOT event will be sent indicating the packet boundary. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/main.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 9 ++++++ 2 files changed, 91 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index e2ed10b4a9d2..660d1e9791d3 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -434,6 +434,88 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem return 0; } +/* TODO: Handle partially formed TDs */ +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) +{ + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; + struct device *dev = &mhi_chan->mhi_dev->dev; + struct mhi_ring_element *el; + u32 buf_left, read_offset; + struct mhi_ep_ring *ring; + enum mhi_ev_ccs code; + void *read_addr; + u64 write_addr; + size_t tr_len; + u32 tre_len; + int ret; + + buf_left = skb->len; + ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + mutex_lock(&mhi_chan->lock); + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + ret = -ENODEV; + goto err_exit; + } + + if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { + dev_err(dev, "TRE not available!\n"); + ret = -ENOSPC; + goto err_exit; + } + + el = &ring->ring_cache[ring->rd_offset]; + tre_len = MHI_TRE_DATA_GET_LEN(el); + + tr_len = min(buf_left, tre_len); + read_offset = skb->len - buf_left; + read_addr = skb->data + read_offset; + write_addr = MHI_TRE_DATA_GET_PTR(el); + + dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(dev, "Error writing to the channel\n"); + goto err_exit; + } + + buf_left -= tr_len; + /* + * For all TREs queued by the host for DL channel, only the EOT flag will be set. + * If the packet doesn't fit into a single TRE, send the OVERFLOW event to + * the host so that the host can adjust the packet boundary to next TREs. Else send + * the EOT event to the host indicating the packet boundary. + */ + if (buf_left) + code = MHI_EV_CC_OVERFLOW; + else + code = MHI_EV_CC_EOT; + + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); + if (ret) { + dev_err(dev, "Error sending transfer completion event\n"); + goto err_exit; + } + + mhi_ep_ring_inc_index(ring); + } while (buf_left); + + mutex_unlock(&mhi_chan->lock); + + return 0; + +err_exit: + mutex_unlock(&mhi_chan->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index eecc8f35d630..478aece17046 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -265,4 +265,13 @@ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl); */ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir); +/** + * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint + * @mhi_dev: Device associated with the DL channel + * @skb: SKBs to be queued + * + * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise. + */ +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb); + #endif -- cgit v1.2.3 From 5fb83d97aa176035e1f55af1025128f5df59d878 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 29 Nov 2021 16:25:24 +0530 Subject: bus: mhi: ep: Add support for suspending and resuming channels Add support for suspending and resuming the channels in MHI endpoint stack. The channels will be moved to the suspended state during M3 state transition and will be resumed during M0 transition. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/internal.h | 2 ++ drivers/bus/mhi/ep/main.c | 58 +++++++++++++++++++++++++++++++++++++++++++ drivers/bus/mhi/ep/sm.c | 5 ++++ 3 files changed, 65 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index d201d755560c..a2125fa5fe2f 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -212,5 +212,7 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl); int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl); int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl); void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl); #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 660d1e9791d3..bae5f40ec15e 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -1097,6 +1097,64 @@ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) } EXPORT_SYMBOL_GPL(mhi_ep_power_down); +void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_chan *mhi_chan; + u32 tmp; + int i; + + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + + if (!mhi_chan->mhi_dev) + continue; + + mutex_lock(&mhi_chan->lock); + /* Skip if the channel is not currently running */ + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { + mutex_unlock(&mhi_chan->lock); + continue; + } + + dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); + /* Set channel state to SUSPENDED */ + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); + mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); + mutex_unlock(&mhi_chan->lock); + } +} + +void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_chan *mhi_chan; + u32 tmp; + int i; + + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + + if (!mhi_chan->mhi_dev) + continue; + + mutex_lock(&mhi_chan->lock); + /* Skip if the channel is not currently suspended */ + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { + mutex_unlock(&mhi_chan->lock); + continue; + } + + dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); + /* Set channel state to RUNNING */ + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); + mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); + mutex_unlock(&mhi_chan->lock); + } +} + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c index e3865b85399d..3655c19e23c7 100644 --- a/drivers/bus/mhi/ep/sm.c +++ b/drivers/bus/mhi/ep/sm.c @@ -62,8 +62,11 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) enum mhi_state old_state; int ret; + /* If MHI is in M3, resume suspended channels */ spin_lock_bh(&mhi_cntrl->state_lock); old_state = mhi_cntrl->mhi_state; + if (old_state == MHI_STATE_M3) + mhi_ep_resume_channels(mhi_cntrl); ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); spin_unlock_bh(&mhi_cntrl->state_lock); @@ -106,6 +109,8 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) return ret; } + mhi_ep_suspend_channels(mhi_cntrl); + /* Signal host that the device moved to M3 */ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); if (ret) { -- cgit v1.2.3 From 5d4be19cbe6aadfad0a5f40df91bd478cedd8344 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 29 Nov 2021 16:35:35 +0530 Subject: bus: mhi: ep: Add uevent support for module autoloading Add uevent support to MHI endpoint bus so that the client drivers can be autoloaded by udev when the MHI endpoint devices gets created. The client drivers are expected to provide MODULE_DEVICE_TABLE with the MHI id_table struct so that the alias can be exported. The MHI endpoint reused the mhi_device_id structure of the MHI bus. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/ep/main.c | 9 +++++++++ include/linux/mod_devicetable.h | 2 ++ scripts/mod/file2alias.c | 10 ++++++++++ 3 files changed, 21 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index bae5f40ec15e..40109a79017a 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -1536,6 +1536,14 @@ void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) } EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); +static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, + mhi_dev->name); +} + static int mhi_ep_match(struct device *dev, struct device_driver *drv) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -1562,6 +1570,7 @@ struct bus_type mhi_ep_bus_type = { .name = "mhi_ep", .dev_name = "mhi_ep", .match = mhi_ep_match, + .uevent = mhi_ep_uevent, }; static int __init mhi_ep_init(void) diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 5da5d990ff58..549590e9c644 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -835,6 +835,8 @@ struct wmi_device_id { #define MHI_DEVICE_MODALIAS_FMT "mhi:%s" #define MHI_NAME_SIZE 32 +#define MHI_EP_DEVICE_MODALIAS_FMT "mhi_ep:%s" + /** * struct mhi_device_id - MHI device identification * @chan: MHI channel name diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 5258247d78ac..d9d6a31446ea 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -1391,6 +1391,15 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias) return 1; } +/* Looks like: mhi_ep:S */ +static int do_mhi_ep_entry(const char *filename, void *symval, char *alias) +{ + DEF_FIELD_ADDR(symval, mhi_device_id, chan); + sprintf(alias, MHI_EP_DEVICE_MODALIAS_FMT, *chan); + + return 1; +} + /* Looks like: ishtp:{guid} */ static int do_ishtp_entry(const char *filename, void *symval, char *alias) { @@ -1519,6 +1528,7 @@ static const struct devtable devtable[] = { {"tee", SIZE_tee_client_device_id, do_tee_entry}, {"wmi", SIZE_wmi_device_id, do_wmi_entry}, {"mhi", SIZE_mhi_device_id, do_mhi_entry}, + {"mhi_ep", SIZE_mhi_device_id, do_mhi_ep_entry}, {"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry}, {"ssam", SIZE_ssam_device_id, do_ssam_entry}, {"dfl", SIZE_dfl_device_id, do_dfl_entry}, -- cgit v1.2.3 From e565d3efd894f0fba844cf3eafd098d66c266333 Mon Sep 17 00:00:00 2001 From: Jeffrey Hugo Date: Wed, 13 Apr 2022 10:41:53 -0600 Subject: bus: mhi: host: Use cached values for calculating the shared write pointer mhi_recycle_ev_ring() computes the shared write pointer for the ring (ctxt_wp) using a read/modify/write pattern where the ctxt_wp value in the shared memory is read, incremented, and written back. There are no checks on the read value, it is assumed that it is kept in sync with the locally cached value. Per the MHI spec, this is correct. The device should only read ctxt_wp, never write it. However, there are devices in the wild that violate the spec, and can update the ctxt_wp in a specific scenario. This can cause corruption, and violate the above assumption that the ctxt_wp is in sync with the cached value. This can occur when the device has loaded firmware from the host, and is transitioning from the SBL EE to the AMSS EE. As part of shutting down SBL, the SBL flushes it's local MHI context to the shared memory since the local context will not persist across an EE change. In the case of the event ring, SBL will flush its entire context, not just the parts that it is allowed to update. This means SBL will write to ctxt_wp, and possibly corrupt it. An example: Host Device ---- --- Update ctxt_wp to 0x1f0 SBL observes 0x1f0 Update ctxt_wp to 0x0 Starts transition to AMSS EE Context flush, writes 0x1f0 to ctxt_wp Update ctxt_wp to 0x200 Update ctxt_wp to 0x210 AMSS observes 0x210 0x210 exceeds ring size AMSS signals syserr The reason the ctxt_wp goes off the end of the ring is that the rollover check is only performed on the cached wp, which is out of sync with ctxt_wp. Since the host is the authority of the value of ctxt_wp per the MHI spec, we can fix this issue by not reading ctxt_wp from the shared memory, and instead compute it based on the cached value. If SBL corrupts ctxt_wp, the host won't observe it, and will correct the value at some point later. Signed-off-by: Jeffrey Hugo Reviewed-by: Hemant Kumar Reviewed-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/1649868113-18826-1-git-send-email-quic_jhugo@quicinc.com [mani: used the quicinc domain for Hemant and Bhaumik] Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/host/main.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index 9021be7f2359..54780923f6cf 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -531,18 +531,13 @@ irqreturn_t mhi_intvec_handler(int irq_number, void *dev) static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring) { - dma_addr_t ctxt_wp; - /* Update the WP */ ring->wp += ring->el_size; - ctxt_wp = le64_to_cpu(*ring->ctxt_wp) + ring->el_size; - if (ring->wp >= (ring->base + ring->len)) { + if (ring->wp >= (ring->base + ring->len)) ring->wp = ring->base; - ctxt_wp = ring->iommu_base; - } - *ring->ctxt_wp = cpu_to_le64(ctxt_wp); + *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base)); /* Update the RP */ ring->rp += ring->el_size; -- cgit v1.2.3 From 89ad19bea64948685da01035af8706e587cf5d25 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Mon, 11 Apr 2022 19:04:28 +0530 Subject: bus: mhi: host: pci_generic: Sort mhi_pci_id_table based on the PID Sorting this way helps in identifying the products of vendors. There is no sorting required for VID and the new VID should be added as the last entry. Let's also add a note clarifying this. Signed-off-by: Manivannan Sadhasivam Reviewed-by: Daniele Palmas Link: https://lore.kernel.org/r/20220411133428.42165-1-manivannan.sadhasivam@linaro.org Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/host/pci_generic.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 9527b7d63840..dfcd7535750f 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -446,20 +446,21 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { .sideband_wake = false, }; +/* Keep the list sorted based on the PID. New VID should be added as the last entry */ static const struct pci_device_id mhi_pci_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, - { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), - .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, - { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), - .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, /* T99W175 (sdx55), Both for eSIM and Non-eSIM */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab), .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, -- cgit v1.2.3 From 95c33ae41b822c37dc841cf80ca388fea376e36d Mon Sep 17 00:00:00 2001 From: Jeffrey Hugo Date: Mon, 18 Apr 2022 11:18:47 -0600 Subject: bus: mhi: host: Add soc_reset sysfs The MHI bus supports a standardized hardware reset, which is known as the "SoC Reset". This reset is similar to the reset sysfs for PCI devices - a hardware mechanism to reset the state back to square one. The MHI SoC Reset is described in the spec as a reset of last resort. If some unrecoverable error has occurred where other resets have failed, SoC Reset is the "big hammer" that ungracefully resets the device. This is effectivly the same as yanking the power on the device, and reapplying it. However, depending on the nature of the particular issue, the underlying transport link may remain active and configured. If the link remains up, the device will flag a MHI system error early in the boot process after the reset is executed, which allows the MHI bus to process a fatal error event, and clean up appropiately. While the SoC Reset is generally intended as a means of recovery when all else has failed, it can be useful in non-error scenarios. For example, if the device loads firmware from the host filesystem, the device may need to be fully rebooted inorder to pick up the new firmware. In this scenario, the system administrator may use the soc_reset sysfs to cause the device to pick up the new firmware that the admin placed on the filesystem. Signed-off-by: Jeffrey Hugo Reviewed-by: Bhaumik Bhatt Reviewed-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/1650302327-30439-1-git-send-email-quic_jhugo@quicinc.com Signed-off-by: Manivannan Sadhasivam --- Documentation/ABI/stable/sysfs-bus-mhi | 10 ++++++++++ drivers/bus/mhi/host/init.c | 14 ++++++++++++++ 2 files changed, 24 insertions(+) (limited to 'drivers/bus') diff --git a/Documentation/ABI/stable/sysfs-bus-mhi b/Documentation/ABI/stable/sysfs-bus-mhi index ecfe7662f8d0..96ccc3385a2b 100644 --- a/Documentation/ABI/stable/sysfs-bus-mhi +++ b/Documentation/ABI/stable/sysfs-bus-mhi @@ -19,3 +19,13 @@ Description: The file holds the OEM PK Hash value of the endpoint device read without having the device power on at least once, the file will read all 0's. Users: Any userspace application or clients interested in device info. + +What: /sys/bus/mhi/devices/.../soc_reset +Date: April 2022 +KernelVersion: 5.19 +Contact: mhi@lists.linux.dev +Description: Initiates a SoC reset on the MHI controller. A SoC reset is + a reset of last resort, and will require a complete re-init. + This can be useful as a method of recovery if the device is + non-responsive, or as a means of loading new firmware as a + system administration task. diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index a665b8e92408..a8c18c5f1672 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -108,9 +108,23 @@ static ssize_t oem_pk_hash_show(struct device *dev, } static DEVICE_ATTR_RO(oem_pk_hash); +static ssize_t soc_reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + + mhi_soc_reset(mhi_cntrl); + return count; +} +static DEVICE_ATTR_WO(soc_reset); + static struct attribute *mhi_dev_attrs[] = { &dev_attr_serial_number.attr, &dev_attr_oem_pk_hash.attr, + &dev_attr_soc_reset.attr, NULL, }; ATTRIBUTE_GROUPS(mhi_dev); -- cgit v1.2.3 From 36e5505dfb42e86b25ded0a023dace6cff875cc3 Mon Sep 17 00:00:00 2001 From: Jeffrey Hugo Date: Mon, 18 Apr 2022 11:22:42 -0600 Subject: bus: mhi: host: Wait for ready state after reset After the device has signaled the end of reset by clearing the reset bit, it will automatically reinit MHI and the internal device structures. Once That is done, the device will signal it has entered the ready state. Signaling the ready state involves sending an interrupt (MSI) to the host which might cause IOMMU faults if it occurs at the wrong time. If the controller is being powered down, and possibly removed, then the reset flow would only wait for the end of reset. At which point, the host and device would start a race. The host may complete its reset work, and remove the interrupt handler, which would cause the interrupt to be disabled in the IOMMU. If that occurs before the device signals the ready state, then the IOMMU will fault since it blocked an interrupt. While harmless, the fault would appear like a serious issue has occurred so let's silence it by making sure the device hits the ready state before the host completes its reset processing. Signed-off-by: Jeffrey Hugo Reviewed-by: Hemant Kumar Reviewed-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/1650302562-30964-1-git-send-email-quic_jhugo@quicinc.com Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/host/pm.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index 3d90b8ecd3d9..c000a92fd3ab 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -476,6 +476,15 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) * hence re-program it */ mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0); + + if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { + /* wait for ready to be set */ + ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, + MHISTATUS, + MHISTATUS_READY_MASK, 1, 25000); + if (ret) + dev_err(dev, "Device failed to enter READY state\n"); + } } dev_dbg(dev, -- cgit v1.2.3 From 0bca889fd6fe6fbedf6de6f807c8f588cb4e639d Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 18 Apr 2022 11:50:25 -0600 Subject: bus: mhi: host: Bail on writing register fields if read fails Helper API to write register fields relies on successful reads of the register/address prior to the write. Bail out if a failure is seen when reading the register before the actual write is performed. Signed-off-by: Bhaumik Bhatt Signed-off-by: Jeffrey Hugo Reviewed-by: Hemant Kumar Reviewed-by: Jeffrey Hugo Reviewed-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/1650304226-11080-2-git-send-email-quic_jhugo@quicinc.com Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/host/boot.c | 22 ++++++++++++++++------ drivers/bus/mhi/host/init.c | 22 +++++++++++++++++----- drivers/bus/mhi/host/internal.h | 7 ++++--- drivers/bus/mhi/host/main.c | 9 ++++++--- drivers/bus/mhi/host/pm.c | 15 +++++++++++---- 5 files changed, 54 insertions(+), 21 deletions(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c index b0da7ca4519c..26d0eddb1477 100644 --- a/drivers/bus/mhi/host/boot.c +++ b/drivers/bus/mhi/host/boot.c @@ -19,8 +19,8 @@ #include "internal.h" /* Setup RDDM vector table for RDDM transfer and program RXVEC */ -void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, - struct image_info *img_info) +int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, + struct image_info *img_info) { struct mhi_buf *mhi_buf = img_info->mhi_buf; struct bhi_vec_entry *bhi_vec = img_info->bhi_vec; @@ -28,6 +28,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 sequence_id; unsigned int i; + int ret; for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) { bhi_vec->dma_addr = mhi_buf->dma_addr; @@ -45,11 +46,17 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len); sequence_id = MHI_RANDOM_U32_NONZERO(BHIE_RXVECSTATUS_SEQNUM_BMSK); - mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, - BHIE_RXVECDB_SEQNUM_BMSK, sequence_id); + ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_RXVECDB_OFFS, + BHIE_RXVECDB_SEQNUM_BMSK, sequence_id); + if (ret) { + dev_err(dev, "Failed to write sequence ID for BHIE_RXVECDB\n"); + return ret; + } dev_dbg(dev, "Address: %p and len: 0x%zx sequence: %u\n", &mhi_buf->dma_addr, mhi_buf->len, sequence_id); + + return 0; } /* Collect RDDM buffer during kernel panic */ @@ -198,10 +205,13 @@ static int mhi_fw_load_bhie(struct mhi_controller *mhi_cntrl, mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len); - mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, - BHIE_TXVECDB_SEQNUM_BMSK, sequence_id); + ret = mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS, + BHIE_TXVECDB_SEQNUM_BMSK, sequence_id); read_unlock_bh(pm_lock); + if (ret) + return ret; + /* Wait for the image download to complete */ ret = wait_event_timeout(mhi_cntrl->state_event, MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state) || diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index a8c18c5f1672..38426111cf6e 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -547,9 +547,14 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; /* Write to MMIO registers */ - for (i = 0; reg_info[i].offset; i++) - mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, - reg_info[i].mask, reg_info[i].val); + for (i = 0; reg_info[i].offset; i++) { + ret = mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].mask, reg_info[i].val); + if (ret) { + dev_err(dev, "Unable to write to MMIO registers\n"); + return ret; + } + } return 0; } @@ -1117,8 +1122,15 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) */ mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, mhi_cntrl->rddm_size); - if (mhi_cntrl->rddm_image) - mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); + if (mhi_cntrl->rddm_image) { + ret = mhi_rddm_prepare(mhi_cntrl, + mhi_cntrl->rddm_image); + if (ret) { + mhi_free_bhie_table(mhi_cntrl, + mhi_cntrl->rddm_image); + goto error_reg_offset; + } + } } mutex_unlock(&mhi_cntrl->pm_mutex); diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h index b47d8ef2624a..01fd10a399b6 100644 --- a/drivers/bus/mhi/host/internal.h +++ b/drivers/bus/mhi/host/internal.h @@ -324,8 +324,9 @@ int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, u32 val, u32 delayus); void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 val); -void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, - u32 offset, u32 mask, u32 val); +int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 val); void mhi_ring_er_db(struct mhi_event *mhi_event); void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, dma_addr_t db_val); @@ -339,7 +340,7 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl); void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl); int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl); void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl); -void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, +int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl, struct image_info *img_info); void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl); diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index 54780923f6cf..f3aef77a6a4a 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -65,19 +65,22 @@ void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); } -void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, - u32 offset, u32 mask, u32 val) +int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, + void __iomem *base, u32 offset, u32 mask, + u32 val) { int ret; u32 tmp; ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); if (ret) - return; + return ret; tmp &= ~mask; tmp |= (val << __ffs(mask)); mhi_write_reg(mhi_cntrl, base, offset, tmp); + + return 0; } void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index c000a92fd3ab..dc2e8ff3bff2 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -129,13 +129,20 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cn void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state) { + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + if (state == MHI_STATE_RESET) { - mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 1); + ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_RESET_MASK, 1); } else { - mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_MHISTATE_MASK, state); + ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, + MHICTRL_MHISTATE_MASK, state); } + + if (ret) + dev_err(dev, "Failed to set MHI state to: %s\n", + mhi_state_str(state)); } /* NOP for backward compatibility, host allowed to ring DB in M2 state */ -- cgit v1.2.3 From d126bfeaf72161c4028769d735ce4a778a029657 Mon Sep 17 00:00:00 2001 From: Bhaumik Bhatt Date: Mon, 18 Apr 2022 11:50:26 -0600 Subject: bus: mhi: host: Optimize and update MMIO register write method As of now, MMIO writes done after ready state transition use the mhi_write_reg_field() API even though the whole register is being written in most cases. Optimize this process by using mhi_write_reg() API instead for those writes and use the mhi_write_reg_field() API for MHI config registers only. Signed-off-by: Bhaumik Bhatt Signed-off-by: Jeffrey Hugo Reviewed-by: Hemant Kumar Reviewed-by: Jeffrey Hugo Reviewed-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/1650304226-11080-3-git-send-email-quic_jhugo@quicinc.com Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/host/init.c | 62 ++++++++++++++++++++++----------------------- 1 file changed, 31 insertions(+), 31 deletions(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index 38426111cf6e..cbb86b21063e 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -439,74 +439,65 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) struct device *dev = &mhi_cntrl->mhi_dev->dev; struct { u32 offset; - u32 mask; u32 val; } reg_info[] = { { - CCABAP_HIGHER, U32_MAX, + CCABAP_HIGHER, upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), }, { - CCABAP_LOWER, U32_MAX, + CCABAP_LOWER, lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), }, { - ECABAP_HIGHER, U32_MAX, + ECABAP_HIGHER, upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), }, { - ECABAP_LOWER, U32_MAX, + ECABAP_LOWER, lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), }, { - CRCBAP_HIGHER, U32_MAX, + CRCBAP_HIGHER, upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), }, { - CRCBAP_LOWER, U32_MAX, + CRCBAP_LOWER, lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), }, { - MHICFG, MHICFG_NER_MASK, - mhi_cntrl->total_ev_rings, - }, - { - MHICFG, MHICFG_NHWER_MASK, - mhi_cntrl->hw_ev_rings, - }, - { - MHICTRLBASE_HIGHER, U32_MAX, + MHICTRLBASE_HIGHER, upper_32_bits(mhi_cntrl->iova_start), }, { - MHICTRLBASE_LOWER, U32_MAX, + MHICTRLBASE_LOWER, lower_32_bits(mhi_cntrl->iova_start), }, { - MHIDATABASE_HIGHER, U32_MAX, + MHIDATABASE_HIGHER, upper_32_bits(mhi_cntrl->iova_start), }, { - MHIDATABASE_LOWER, U32_MAX, + MHIDATABASE_LOWER, lower_32_bits(mhi_cntrl->iova_start), }, { - MHICTRLLIMIT_HIGHER, U32_MAX, + MHICTRLLIMIT_HIGHER, upper_32_bits(mhi_cntrl->iova_stop), }, { - MHICTRLLIMIT_LOWER, U32_MAX, + MHICTRLLIMIT_LOWER, lower_32_bits(mhi_cntrl->iova_stop), }, { - MHIDATALIMIT_HIGHER, U32_MAX, + MHIDATALIMIT_HIGHER, upper_32_bits(mhi_cntrl->iova_stop), }, { - MHIDATALIMIT_LOWER, U32_MAX, + MHIDATALIMIT_LOWER, lower_32_bits(mhi_cntrl->iova_stop), }, - { 0, 0, 0 } + {0, 0} }; dev_dbg(dev, "Initializing MHI registers\n"); @@ -547,13 +538,22 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; /* Write to MMIO registers */ - for (i = 0; reg_info[i].offset; i++) { - ret = mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, - reg_info[i].mask, reg_info[i].val); - if (ret) { - dev_err(dev, "Unable to write to MMIO registers\n"); - return ret; - } + for (i = 0; reg_info[i].offset; i++) + mhi_write_reg(mhi_cntrl, base, reg_info[i].offset, + reg_info[i].val); + + ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK, + mhi_cntrl->total_ev_rings); + if (ret) { + dev_err(dev, "Unable to write MHICFG register\n"); + return ret; + } + + ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK, + mhi_cntrl->hw_ev_rings); + if (ret) { + dev_err(dev, "Unable to write MHICFG register\n"); + return ret; } return 0; -- cgit v1.2.3 From 17e6ff4937be04b986558d752802bde48198e9c3 Mon Sep 17 00:00:00 2001 From: Slark Xiao Date: Thu, 21 Apr 2022 17:21:41 +0800 Subject: bus: mhi: host: Add support for Cinterion MV32-WA/MV32-WB MV32-WA is designed based on Qualcomm SDX62, and MV32-WB is designed based on QUalcomm SDX65. Both products' enumeration would align with previous product MV31-W.So we merge MV31 and MV32 to MV3X for some common settings. Signed-off-by: Slark Xiao Reviewed-by: Loic Poulain Reviewed-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220421092141.3984-1-slark_xiao@163.com [mani: removed the fixes tag that's not needed] Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/host/pci_generic.c | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index dfcd7535750f..476699b8039b 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -371,7 +371,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { .sideband_wake = false, }; -static const struct mhi_channel_config mhi_mv31_channels[] = { +static const struct mhi_channel_config mhi_mv3x_channels[] = { MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0), MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0), /* MBIM Control Channel */ @@ -382,25 +382,33 @@ static const struct mhi_channel_config mhi_mv31_channels[] = { MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3), }; -static struct mhi_event_config mhi_mv31_events[] = { +static struct mhi_event_config mhi_mv3x_events[] = { MHI_EVENT_CONFIG_CTRL(0, 256), MHI_EVENT_CONFIG_DATA(1, 256), MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101), }; -static const struct mhi_controller_config modem_mv31_config = { +static const struct mhi_controller_config modem_mv3x_config = { .max_channels = 128, .timeout_ms = 20000, - .num_channels = ARRAY_SIZE(mhi_mv31_channels), - .ch_cfg = mhi_mv31_channels, - .num_events = ARRAY_SIZE(mhi_mv31_events), - .event_cfg = mhi_mv31_events, + .num_channels = ARRAY_SIZE(mhi_mv3x_channels), + .ch_cfg = mhi_mv3x_channels, + .num_events = ARRAY_SIZE(mhi_mv3x_events), + .event_cfg = mhi_mv3x_events, }; static const struct mhi_pci_dev_info mhi_mv31_info = { .name = "cinterion-mv31", - .config = &modem_mv31_config, + .config = &modem_mv3x_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, +}; + +static const struct mhi_pci_dev_info mhi_mv32_info = { + .name = "cinterion-mv32", + .config = &modem_mv3x_config, .bar_num = MHI_PCI_DEFAULT_BAR_NUM, .dma_data_width = 32, .mru_default = 32768, @@ -476,6 +484,12 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* MV31-W (Cinterion) */ { PCI_DEVICE(0x1269, 0x00b3), .driver_data = (kernel_ulong_t) &mhi_mv31_info }, + /* MV32-WA (Cinterion) */ + { PCI_DEVICE(0x1269, 0x00ba), + .driver_data = (kernel_ulong_t) &mhi_mv32_info }, + /* MV32-WB (Cinterion) */ + { PCI_DEVICE(0x1269, 0x00bb), + .driver_data = (kernel_ulong_t) &mhi_mv32_info }, { } }; MODULE_DEVICE_TABLE(pci, mhi_pci_id_table); -- cgit v1.2.3 From d434743e5cac3558381b767f343eef5af246a4dc Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:37 +0530 Subject: bus: mhi: ep: Add support for registering MHI endpoint controllers This commit adds support for registering MHI endpoint controller drivers with the MHI endpoint stack. MHI endpoint controller drivers manage the interaction with the host machines (such as x86). They are also the MHI endpoint bus master in charge of managing the physical link between the host and endpoint device. Eventhough the MHI spec is bus agnostic, the current implementation is entirely based on PCIe bus. The endpoint controller driver encloses all information about the underlying physical bus like PCIe. The registration process involves parsing the channel configuration and allocating an MHI EP device. Channels used in the endpoint stack follows the perspective of the MHI host stack. i.e., UL - From host to endpoint DL - From endpoint to host Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-2-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/Kconfig | 1 + drivers/bus/mhi/Makefile | 3 + drivers/bus/mhi/ep/Kconfig | 10 ++ drivers/bus/mhi/ep/Makefile | 2 + drivers/bus/mhi/ep/internal.h | 156 ++++++++++++++++++++++++++++ drivers/bus/mhi/ep/main.c | 236 ++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 136 ++++++++++++++++++++++++ 7 files changed, 544 insertions(+) create mode 100644 drivers/bus/mhi/ep/Kconfig create mode 100644 drivers/bus/mhi/ep/Makefile create mode 100644 drivers/bus/mhi/ep/internal.h create mode 100644 drivers/bus/mhi/ep/main.c create mode 100644 include/linux/mhi_ep.h (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig index 4748df7f9cd5..b39a11e6c624 100644 --- a/drivers/bus/mhi/Kconfig +++ b/drivers/bus/mhi/Kconfig @@ -6,3 +6,4 @@ # source "drivers/bus/mhi/host/Kconfig" +source "drivers/bus/mhi/ep/Kconfig" diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile index 5f5708a249f5..46981331b38f 100644 --- a/drivers/bus/mhi/Makefile +++ b/drivers/bus/mhi/Makefile @@ -1,2 +1,5 @@ # Host MHI stack obj-y += host/ + +# Endpoint MHI stack +obj-y += ep/ diff --git a/drivers/bus/mhi/ep/Kconfig b/drivers/bus/mhi/ep/Kconfig new file mode 100644 index 000000000000..90ab3b040672 --- /dev/null +++ b/drivers/bus/mhi/ep/Kconfig @@ -0,0 +1,10 @@ +config MHI_BUS_EP + tristate "Modem Host Interface (MHI) bus Endpoint implementation" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by a host processor to control + and communicate a modem device over a high speed peripheral + bus or shared memory. + + MHI_BUS_EP implements the MHI protocol for the endpoint devices, + such as SDX55 modem connected to the host machine over PCIe. diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile new file mode 100644 index 000000000000..64e29252b608 --- /dev/null +++ b/drivers/bus/mhi/ep/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o +mhi_ep-y := main.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h new file mode 100644 index 000000000000..0d3923186a5e --- /dev/null +++ b/drivers/bus/mhi/ep/internal.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Linaro Ltd. + * + */ + +#ifndef _MHI_EP_INTERNAL_ +#define _MHI_EP_INTERNAL_ + +#include + +#include "../common.h" + +extern struct bus_type mhi_ep_bus_type; + +#define MHI_REG_OFFSET 0x100 +#define BHI_REG_OFFSET 0x200 + +/* MHI registers */ +#define EP_MHIREGLEN (MHI_REG_OFFSET + MHIREGLEN) +#define EP_MHIVER (MHI_REG_OFFSET + MHIVER) +#define EP_MHICFG (MHI_REG_OFFSET + MHICFG) +#define EP_CHDBOFF (MHI_REG_OFFSET + CHDBOFF) +#define EP_ERDBOFF (MHI_REG_OFFSET + ERDBOFF) +#define EP_BHIOFF (MHI_REG_OFFSET + BHIOFF) +#define EP_BHIEOFF (MHI_REG_OFFSET + BHIEOFF) +#define EP_DEBUGOFF (MHI_REG_OFFSET + DEBUGOFF) +#define EP_MHICTRL (MHI_REG_OFFSET + MHICTRL) +#define EP_MHISTATUS (MHI_REG_OFFSET + MHISTATUS) +#define EP_CCABAP_LOWER (MHI_REG_OFFSET + CCABAP_LOWER) +#define EP_CCABAP_HIGHER (MHI_REG_OFFSET + CCABAP_HIGHER) +#define EP_ECABAP_LOWER (MHI_REG_OFFSET + ECABAP_LOWER) +#define EP_ECABAP_HIGHER (MHI_REG_OFFSET + ECABAP_HIGHER) +#define EP_CRCBAP_LOWER (MHI_REG_OFFSET + CRCBAP_LOWER) +#define EP_CRCBAP_HIGHER (MHI_REG_OFFSET + CRCBAP_HIGHER) +#define EP_CRDB_LOWER (MHI_REG_OFFSET + CRDB_LOWER) +#define EP_CRDB_HIGHER (MHI_REG_OFFSET + CRDB_HIGHER) +#define EP_MHICTRLBASE_LOWER (MHI_REG_OFFSET + MHICTRLBASE_LOWER) +#define EP_MHICTRLBASE_HIGHER (MHI_REG_OFFSET + MHICTRLBASE_HIGHER) +#define EP_MHICTRLLIMIT_LOWER (MHI_REG_OFFSET + MHICTRLLIMIT_LOWER) +#define EP_MHICTRLLIMIT_HIGHER (MHI_REG_OFFSET + MHICTRLLIMIT_HIGHER) +#define EP_MHIDATABASE_LOWER (MHI_REG_OFFSET + MHIDATABASE_LOWER) +#define EP_MHIDATABASE_HIGHER (MHI_REG_OFFSET + MHIDATABASE_HIGHER) +#define EP_MHIDATALIMIT_LOWER (MHI_REG_OFFSET + MHIDATALIMIT_LOWER) +#define EP_MHIDATALIMIT_HIGHER (MHI_REG_OFFSET + MHIDATALIMIT_HIGHER) + +/* MHI BHI registers */ +#define EP_BHI_INTVEC (BHI_REG_OFFSET + BHI_INTVEC) +#define EP_BHI_EXECENV (BHI_REG_OFFSET + BHI_EXECENV) + +/* MHI Doorbell registers */ +#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n)) +#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n)) +#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n)) +#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n)) + +#define MHI_CTRL_INT_STATUS 0x4 +#define MHI_CTRL_INT_STATUS_MSK BIT(0) +#define MHI_CTRL_INT_STATUS_CRDB_MSK BIT(1) +#define MHI_CHDB_INT_STATUS_n(n) (0x28 + 0x4 * (n)) +#define MHI_ERDB_INT_STATUS_n(n) (0x38 + 0x4 * (n)) + +#define MHI_CTRL_INT_CLEAR 0x4c +#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2) +#define MHI_CTRL_INT_CRDB_CLEAR BIT(1) +#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0) + +#define MHI_CHDB_INT_CLEAR_n(n) (0x70 + 0x4 * (n)) +#define MHI_CHDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_CLEAR_n(n) (0x80 + 0x4 * (n)) +#define MHI_ERDB_INT_CLEAR_n_CLEAR_ALL GENMASK(31, 0) + +/* + * Unlike the usual "masking" convention, writing "1" to a bit in this register + * enables the interrupt and writing "0" will disable it.. + */ +#define MHI_CTRL_INT_MASK 0x94 +#define MHI_CTRL_INT_MASK_MASK GENMASK(1, 0) +#define MHI_CTRL_MHICTRL_MASK BIT(0) +#define MHI_CTRL_CRDB_MASK BIT(1) + +#define MHI_CHDB_INT_MASK_n(n) (0xb8 + 0x4 * (n)) +#define MHI_CHDB_INT_MASK_n_EN_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_MASK_n(n) (0xc8 + 0x4 * (n)) +#define MHI_ERDB_INT_MASK_n_EN_ALL GENMASK(31, 0) + +#define NR_OF_CMD_RINGS 1 +#define MHI_MASK_ROWS_CH_DB 4 +#define MHI_MASK_ROWS_EV_DB 4 +#define MHI_MASK_CH_LEN 32 +#define MHI_MASK_EV_LEN 32 + +/* Generic context */ +struct mhi_generic_ctx { + __le32 reserved0; + __le32 reserved1; + __le32 reserved2; + + __le64 rbase __packed __aligned(4); + __le64 rlen __packed __aligned(4); + __le64 rp __packed __aligned(4); + __le64 wp __packed __aligned(4); +}; + +enum mhi_ep_ring_type { + RING_TYPE_CMD, + RING_TYPE_ER, + RING_TYPE_CH, +}; + +/* Ring element */ +union mhi_ep_ring_ctx { + struct mhi_cmd_ctxt cmd; + struct mhi_event_ctxt ev; + struct mhi_chan_ctxt ch; + struct mhi_generic_ctx generic; +}; + +struct mhi_ep_ring { + struct mhi_ep_cntrl *mhi_cntrl; + union mhi_ep_ring_ctx *ring_ctx; + struct mhi_ring_element *ring_cache; + enum mhi_ep_ring_type type; + u64 rbase; + size_t rd_offset; + size_t wr_offset; + size_t ring_size; + u32 db_offset_h; + u32 db_offset_l; + u32 ch_id; +}; + +struct mhi_ep_cmd { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_event { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_chan { + char *name; + struct mhi_ep_device *mhi_dev; + struct mhi_ep_ring ring; + struct mutex lock; + void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result); + enum mhi_ch_state state; + enum dma_data_direction dir; + u64 tre_loc; + u32 tre_size; + u32 tre_bytes_left; + u32 chan; + bool skip_td; +}; + +#endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c new file mode 100644 index 000000000000..d932ad01761b --- /dev/null +++ b/drivers/bus/mhi/ep/main.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MHI Endpoint bus stack + * + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "internal.h" + +static DEFINE_IDA(mhi_ep_cntrl_ida); + +static void mhi_ep_release_device(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + mhi_dev->mhi_cntrl->mhi_dev = NULL; + + /* + * We need to set the mhi_chan->mhi_dev to NULL here since the MHI + * devices for the channels will only get created in mhi_ep_create_device() + * if the mhi_dev associated with it is NULL. + */ + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_device_type dev_type) +{ + struct mhi_ep_device *mhi_dev; + struct device *dev; + + mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + if (!mhi_dev) + return ERR_PTR(-ENOMEM); + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_ep_bus_type; + dev->release = mhi_ep_release_device; + + /* Controller device is always allocated first */ + if (dev_type == MHI_DEVICE_CONTROLLER) + /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ + dev->parent = mhi_cntrl->cntrl_dev; + else + /* for MHI client devices, parent is the MHI controller device */ + dev->parent = &mhi_cntrl->mhi_dev->dev; + + mhi_dev->mhi_cntrl = mhi_cntrl; + mhi_dev->dev_type = dev_type; + + return mhi_dev; +} + +static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + const struct mhi_ep_channel_config *ch_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + u32 chan, i; + int ret = -EINVAL; + + mhi_cntrl->max_chan = config->max_channels; + + /* + * Allocate max_channels supported by the MHI endpoint and populate + * only the defined channels + */ + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), + GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + for (i = 0; i < config->num_channels; i++) { + struct mhi_ep_chan *mhi_chan; + + ch_cfg = &config->ch_cfg[i]; + + chan = ch_cfg->num; + if (chan >= mhi_cntrl->max_chan) { + dev_err(dev, "Channel (%u) exceeds maximum available channels (%u)\n", + chan, mhi_cntrl->max_chan); + goto error_chan_cfg; + } + + /* Bi-directional and direction less channels are not supported */ + if (ch_cfg->dir == DMA_BIDIRECTIONAL || ch_cfg->dir == DMA_NONE) { + dev_err(dev, "Invalid direction (%u) for channel (%u)\n", + ch_cfg->dir, chan); + goto error_chan_cfg; + } + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + mhi_chan->name = ch_cfg->name; + mhi_chan->chan = chan; + mhi_chan->dir = ch_cfg->dir; + mutex_init(&mhi_chan->lock); + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} + +/* + * Allocate channel and command rings here. Event rings will be allocated + * in mhi_ep_power_up() as the config comes from the host. + */ +int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + struct mhi_ep_device *mhi_dev; + int ret; + + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev) + return -EINVAL; + + ret = mhi_ep_chan_init(mhi_cntrl, config); + if (ret) + return ret; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto err_free_ch; + } + + /* Set controller index */ + ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); + if (ret < 0) + goto err_free_cmd; + + mhi_cntrl->index = ret; + + /* Allocate the controller device */ + mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); + if (IS_ERR(mhi_dev)) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); + ret = PTR_ERR(mhi_dev); + goto err_ida_free; + } + + dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); + mhi_dev->name = dev_name(&mhi_dev->dev); + mhi_cntrl->mhi_dev = mhi_dev; + + ret = device_add(&mhi_dev->dev); + if (ret) + goto err_put_dev; + + dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); + + return 0; + +err_put_dev: + put_device(&mhi_dev->dev); +err_ida_free: + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +err_free_cmd: + kfree(mhi_cntrl->mhi_cmd); +err_free_ch: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_register_controller); + +void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; + + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_chan); + + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +} +EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); + +static int mhi_ep_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + /* + * If the device is a controller type then there is no client driver + * associated with it + */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + return 0; +}; + +struct bus_type mhi_ep_bus_type = { + .name = "mhi_ep", + .dev_name = "mhi_ep", + .match = mhi_ep_match, +}; + +static int __init mhi_ep_init(void) +{ + return bus_register(&mhi_ep_bus_type); +} + +static void __exit mhi_ep_exit(void) +{ + bus_unregister(&mhi_ep_bus_type); +} + +postcore_initcall(mhi_ep_init); +module_exit(mhi_ep_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI Bus Endpoint stack"); +MODULE_AUTHOR("Manivannan Sadhasivam "); diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h new file mode 100644 index 000000000000..891b199a9703 --- /dev/null +++ b/include/linux/mhi_ep.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022, Linaro Ltd. + * + */ +#ifndef _MHI_EP_H_ +#define _MHI_EP_H_ + +#include +#include + +#define MHI_EP_DEFAULT_MTU 0x8000 + +/** + * struct mhi_ep_channel_config - Channel configuration structure for controller + * @name: The name of this channel + * @num: The number assigned to this channel + * @num_elements: The number of elements that can be queued to this channel + * @dir: Direction that data may flow on this channel + */ +struct mhi_ep_channel_config { + char *name; + u32 num; + u32 num_elements; + enum dma_data_direction dir; +}; + +/** + * struct mhi_ep_cntrl_config - MHI Endpoint controller configuration + * @mhi_version: MHI spec version supported by the controller + * @max_channels: Maximum number of channels supported + * @num_channels: Number of channels defined in @ch_cfg + * @ch_cfg: Array of defined channels + */ +struct mhi_ep_cntrl_config { + u32 mhi_version; + u32 max_channels; + u32 num_channels; + const struct mhi_ep_channel_config *ch_cfg; +}; + +/** + * struct mhi_ep_db_info - MHI Endpoint doorbell info + * @mask: Mask of the doorbell interrupt + * @status: Status of the doorbell interrupt + */ +struct mhi_ep_db_info { + u32 mask; + u32 status; +}; + +/** + * struct mhi_ep_cntrl - MHI Endpoint controller structure + * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI + * Endpoint controller + * @mhi_dev: MHI Endpoint device instance for the controller + * @mmio: MMIO region containing the MHI registers + * @mhi_chan: Points to the channel configuration table + * @mhi_event: Points to the event ring configurations table + * @mhi_cmd: Points to the command ring configurations table + * @sm: MHI Endpoint state machine + * @raise_irq: CB function for raising IRQ to the host + * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it + * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context + * @read_from_host: CB function for reading from host memory from endpoint + * @write_to_host: CB function for writing to host memory from endpoint + * @mhi_state: MHI Endpoint state + * @max_chan: Maximum channels supported by the endpoint controller + * @mru: MRU (Maximum Receive Unit) value of the endpoint controller + * @index: MHI Endpoint controller index + */ +struct mhi_ep_cntrl { + struct device *cntrl_dev; + struct mhi_ep_device *mhi_dev; + void __iomem *mmio; + + struct mhi_ep_chan *mhi_chan; + struct mhi_ep_event *mhi_event; + struct mhi_ep_cmd *mhi_cmd; + struct mhi_ep_sm *sm; + + void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); + int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, + void __iomem **virt, size_t size); + void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys, + void __iomem *virt, size_t size); + int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size); + int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size); + + enum mhi_state mhi_state; + + u32 max_chan; + u32 mru; + u32 index; +}; + +/** + * struct mhi_ep_device - Structure representing an MHI Endpoint device that binds + * to channels or is associated with controllers + * @dev: Driver model device node for the MHI Endpoint device + * @mhi_cntrl: Controller the device belongs to + * @id: Pointer to MHI Endpoint device ID struct + * @name: Name of the associated MHI Endpoint device + * @ul_chan: UL channel for the device + * @dl_chan: DL channel for the device + * @dev_type: MHI device type + */ +struct mhi_ep_device { + struct device dev; + struct mhi_ep_cntrl *mhi_cntrl; + const struct mhi_device_id *id; + const char *name; + struct mhi_ep_chan *ul_chan; + struct mhi_ep_chan *dl_chan; + enum mhi_device_type dev_type; +}; + +#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev) + +/** + * mhi_ep_register_controller - Register MHI Endpoint controller + * @mhi_cntrl: MHI Endpoint controller to register + * @config: Configuration to use for the controller + * + * Return: 0 if controller registrations succeeds, a negative error code otherwise. + */ +int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config); + +/** + * mhi_ep_unregister_controller - Unregister MHI Endpoint controller + * @mhi_cntrl: MHI Endpoint controller to unregister + */ +void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl); + +#endif -- cgit v1.2.3 From ee0360b20b3fa08e2eee300eacb45e812ae44578 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:38 +0530 Subject: bus: mhi: ep: Add support for registering MHI endpoint client drivers This commit adds support for registering MHI endpoint client drivers with the MHI endpoint stack. MHI endpoint client drivers bind to one or more MHI endpoint devices inorder to send and receive the upper-layer protocol packets like IP packets, modem control messages, and diagnostics messages over MHI bus. Reviewed-by: Hemant Kumar Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-3-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 57 +++++++++++++++++++++++++++++-- 2 files changed, 140 insertions(+), 2 deletions(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index d932ad01761b..f7d5f75fc083 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -198,9 +198,88 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) } EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); +static int mhi_ep_driver_probe(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; + + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + + return mhi_drv->probe(mhi_dev, mhi_dev->id); +} + +static int mhi_ep_driver_remove(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int dir; + + /* Skip if it is a controller device */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + /* Disconnect the channels associated with the driver */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to the client driver */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + mhi_chan->state = MHI_CH_STATE_DISABLED; + mhi_chan->xfer_cb = NULL; + mutex_unlock(&mhi_chan->lock); + } + + /* Remove the client driver now */ + mhi_drv->remove(mhi_dev); + + return 0; +} + +int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + /* Client drivers should have callbacks defined for both channels */ + if (!mhi_drv->ul_xfer_cb || !mhi_drv->dl_xfer_cb) + return -EINVAL; + + driver->bus = &mhi_ep_bus_type; + driver->owner = owner; + driver->probe = mhi_ep_driver_probe; + driver->remove = mhi_ep_driver_remove; + + return driver_register(driver); +} +EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); + +void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); + static int mhi_ep_match(struct device *dev, struct device_driver *drv) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); + const struct mhi_device_id *id; /* * If the device is a controller type then there is no client driver @@ -209,6 +288,12 @@ static int mhi_ep_match(struct device *dev, struct device_driver *drv) if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) return 0; + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->name, id->chan)) { + mhi_dev->id = id; + return 1; + } + return 0; }; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 891b199a9703..e2b94f9eb846 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -101,8 +101,8 @@ struct mhi_ep_cntrl { * @mhi_cntrl: Controller the device belongs to * @id: Pointer to MHI Endpoint device ID struct * @name: Name of the associated MHI Endpoint device - * @ul_chan: UL channel for the device - * @dl_chan: DL channel for the device + * @ul_chan: UL (from host to endpoint) channel for the device + * @dl_chan: DL (from endpoint to host) channel for the device * @dev_type: MHI device type */ struct mhi_ep_device { @@ -115,7 +115,60 @@ struct mhi_ep_device { enum mhi_device_type dev_type; }; +/** + * struct mhi_ep_driver - Structure representing a MHI Endpoint client driver + * @id_table: Pointer to MHI Endpoint device ID table + * @driver: Device driver model driver + * @probe: CB function for client driver probe function + * @remove: CB function for client driver remove function + * @ul_xfer_cb: CB function for UL (from host to endpoint) data transfer + * @dl_xfer_cb: CB function for DL (from endpoint to host) data transfer + */ +struct mhi_ep_driver { + const struct mhi_device_id *id_table; + struct device_driver driver; + int (*probe)(struct mhi_ep_device *mhi_ep, + const struct mhi_device_id *id); + void (*remove)(struct mhi_ep_device *mhi_ep); + void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev, + struct mhi_result *result); + void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev, + struct mhi_result *result); +}; + #define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev) +#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver) + +/* + * module_mhi_ep_driver() - Helper macro for drivers that don't do + * anything special other than using default mhi_ep_driver_register() and + * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate. + * Each module may only use this macro once. + */ +#define module_mhi_ep_driver(mhi_drv) \ + module_driver(mhi_drv, mhi_ep_driver_register, \ + mhi_ep_driver_unregister) + +/* + * Macro to avoid include chaining to get THIS_MODULE + */ +#define mhi_ep_driver_register(mhi_drv) \ + __mhi_ep_driver_register(mhi_drv, THIS_MODULE) + +/** + * __mhi_ep_driver_register - Register a driver with MHI Endpoint bus + * @mhi_drv: Driver to be associated with the device + * @owner: The module owner + * + * Return: 0 if driver registrations succeeds, a negative error code otherwise. + */ +int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner); + +/** + * mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus + * @mhi_drv: Driver associated with the device + */ +void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv); /** * mhi_ep_register_controller - Register MHI Endpoint controller -- cgit v1.2.3 From 297c77a0f27312b9a04696018c4cbd47926ca92b Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:39 +0530 Subject: bus: mhi: ep: Add support for creating and destroying MHI EP devices This commit adds support for creating and destroying MHI endpoint devices. The MHI endpoint devices binds to the MHI endpoint channels and are used to transfer data between MHI host and endpoint device. There is a single MHI EP device for each channel pair. The devices will be created when the corresponding channels has been started by the host and will be destroyed during MHI EP power down and reset. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-4-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 83 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index f7d5f75fc083..6c64745e8a06 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -68,6 +68,89 @@ static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl, return mhi_dev; } +/* + * MHI channels are always defined in pairs with UL as the even numbered + * channel and DL as odd numbered one. This function gets UL channel (primary) + * as the ch_id and always looks after the next entry in channel list for + * the corresponding DL channel (secondary). + */ +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + struct device *dev = mhi_cntrl->cntrl_dev; + struct mhi_ep_device *mhi_dev; + int ret; + + /* Check if the channel name is same for both UL and DL */ + if (strcmp(mhi_chan->name, mhi_chan[1].name)) { + dev_err(dev, "UL and DL channel names are not same: (%s) != (%s)\n", + mhi_chan->name, mhi_chan[1].name); + return -EINVAL; + } + + mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_XFER); + if (IS_ERR(mhi_dev)) + return PTR_ERR(mhi_dev); + + /* Configure primary channel */ + mhi_dev->ul_chan = mhi_chan; + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Configure secondary channel as well */ + mhi_chan++; + mhi_dev->dl_chan = mhi_chan; + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Channel name is same for both UL and DL */ + mhi_dev->name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%s_%s", + dev_name(&mhi_cntrl->mhi_dev->dev), + mhi_dev->name); + + ret = device_add(&mhi_dev->dev); + if (ret) + put_device(&mhi_dev->dev); + + return ret; +} + +static int mhi_ep_destroy_device(struct device *dev, void *data) +{ + struct mhi_ep_device *mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl; + struct mhi_ep_chan *ul_chan, *dl_chan; + + if (dev->bus != &mhi_ep_bus_type) + return 0; + + mhi_dev = to_mhi_ep_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* Only destroy devices created for channels */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + ul_chan = mhi_dev->ul_chan; + dl_chan = mhi_dev->dl_chan; + + if (ul_chan) + put_device(&ul_chan->mhi_dev->dev); + + if (dl_chan) + put_device(&dl_chan->mhi_dev->dev); + + dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", + mhi_dev->name); + + /* Notify the client and remove the device from MHI bus */ + device_del(dev); + put_device(dev); + + return 0; +} + static int mhi_ep_chan_init(struct mhi_ep_cntrl *mhi_cntrl, const struct mhi_ep_cntrl_config *config) { -- cgit v1.2.3 From e9e4da23cd65ea76ba658346f5c182791bd1cea9 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:40 +0530 Subject: bus: mhi: ep: Add support for managing MMIO registers Add support for managing the Memory Mapped Input Output (MMIO) registers of the MHI bus. All MHI operations are carried out using the MMIO registers by both host and the endpoint device. The MMIO registers reside inside the endpoint device memory (fixed location based on the platform) and the address is passed by the MHI EP controller driver during its registration. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-5-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/Makefile | 2 +- drivers/bus/mhi/ep/internal.h | 26 ++++ drivers/bus/mhi/ep/main.c | 6 +- drivers/bus/mhi/ep/mmio.c | 273 ++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 18 +++ 5 files changed, 323 insertions(+), 2 deletions(-) create mode 100644 drivers/bus/mhi/ep/mmio.c (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile index 64e29252b608..a1555ae287ad 100644 --- a/drivers/bus/mhi/ep/Makefile +++ b/drivers/bus/mhi/ep/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o -mhi_ep-y := main.o +mhi_ep-y := main.o mmio.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index 0d3923186a5e..475425a30d85 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -153,4 +153,30 @@ struct mhi_ep_chan { bool skip_td; }; +/* MMIO related functions */ +u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset); +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val); +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val); +u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask); +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl); +u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring); +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value); +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset); +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); + #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 6c64745e8a06..7dcc784f10d1 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -214,7 +214,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_device *mhi_dev; int ret; - if (!mhi_cntrl || !mhi_cntrl->cntrl_dev) + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio) return -EINVAL; ret = mhi_ep_chan_init(mhi_cntrl, config); @@ -227,6 +227,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + /* Set MHI version and AMSS EE before enumeration */ + mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + /* Set controller index */ ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); if (ret < 0) diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c new file mode 100644 index 000000000000..b5bfd22f2c8e --- /dev/null +++ b/drivers/bus/mhi/ep/mmio.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +#include +#include +#include + +#include "internal.h" + +u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset) +{ + return readl(mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val) +{ + writel(val, mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, u32 val) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, offset); + regval &= ~mask; + regval |= (val << __ffs(mask)) & mask; + mhi_ep_mmio_write(mhi_cntrl, offset, regval); +} + +u32 mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, u32 mask) +{ + u32 regval; + + regval = mhi_ep_mmio_read(dev, offset); + regval &= mask; + regval >>= __ffs(mask); + + return regval; +} + +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICTRL); + *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval); + *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval); +} + +static void mhi_ep_mmio_set_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id, bool enable) +{ + u32 chid_mask, chid_shift, chdb_idx, val; + + chid_shift = ch_id % 32; + chid_mask = BIT(chid_shift); + chdb_idx = ch_id / 32; + + val = enable ? 1 : 0; + + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(chdb_idx), chid_mask, val); + + /* Update the local copy of the channel mask */ + mhi_cntrl->chdb[chdb_idx].mask &= ~chid_mask; + mhi_cntrl->chdb[chdb_idx].mask |= val << chid_shift; +} + +void mhi_ep_mmio_enable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, true); +} + +void mhi_ep_mmio_disable_chdb(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + mhi_ep_mmio_set_chdb(mhi_cntrl, ch_id, false); +} + +static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val, i; + + val = enable ? MHI_CHDB_INT_MASK_n_EN_ALL : 0; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_n(i), val); + mhi_cntrl->chdb[i].mask = val; + } +} + +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true); +} + +static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false); +} + +bool mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + bool chdb = false; + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + mhi_cntrl->chdb[i].status = mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_n(i)); + if (mhi_cntrl->chdb[i].status) + chdb = true; + } + + /* Return whether a channel doorbell interrupt occurred or not */ + return chdb; +} + +static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val, i; + + val = enable ? MHI_ERDB_INT_MASK_n_EN_ALL : 0; + + for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_n(i), val); +} + +static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false); +} + +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_MHICTRL_MASK, 1); +} + +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_MHICTRL_MASK, 0); +} + +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_CRDB_MASK, 1); +} + +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK, + MHI_CTRL_CRDB_MASK, 0); +} + +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl); + mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl); + mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl); +} + +static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), + MHI_CHDB_INT_CLEAR_n_CLEAR_ALL); + + for (i = 0; i < MHI_MASK_ROWS_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_n(i), + MHI_ERDB_INT_CLEAR_n_CLEAR_ALL); + + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, + MHI_CTRL_INT_MMIO_WR_CLEAR | + MHI_CTRL_INT_CRDB_CLEAR | + MHI_CTRL_INT_CRDB_MHICTRL_CLEAR); +} + +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_HIGHER); + mhi_cntrl->ch_ctx_host_pa = regval; + mhi_cntrl->ch_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CCABAP_LOWER); + mhi_cntrl->ch_ctx_host_pa |= regval; +} + +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_HIGHER); + mhi_cntrl->ev_ctx_host_pa = regval; + mhi_cntrl->ev_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_ECABAP_LOWER); + mhi_cntrl->ev_ctx_host_pa |= regval; +} + +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_HIGHER); + mhi_cntrl->cmd_ctx_host_pa = regval; + mhi_cntrl->cmd_ctx_host_pa <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_CRCBAP_LOWER); + mhi_cntrl->cmd_ctx_host_pa |= regval; +} + +u64 mhi_ep_mmio_get_db(struct mhi_ep_ring *ring) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + u64 db_offset; + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h); + db_offset = regval; + db_offset <<= 32; + + regval = mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l); + db_offset |= regval; + + return db_offset; +} + +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value) +{ + mhi_ep_mmio_write(mhi_cntrl, EP_BHI_EXECENV, value); +} + +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHICTRL, MHICTRL_RESET_MASK, 0); +} + +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_write(mhi_cntrl, EP_MHICTRL, 0); + mhi_ep_mmio_write(mhi_cntrl, EP_MHISTATUS, 0); + mhi_ep_mmio_clear_interrupts(mhi_cntrl); +} + +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + mhi_cntrl->chdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_CHDBOFF); + mhi_cntrl->erdb_offset = mhi_ep_mmio_read(mhi_cntrl, EP_ERDBOFF); + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval); + + mhi_ep_mmio_reset(mhi_cntrl); +} + +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 regval; + + regval = mhi_ep_mmio_read(mhi_cntrl, EP_MHICFG); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, regval); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, regval); +} diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index e2b94f9eb846..5db048e258e4 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -59,6 +59,10 @@ struct mhi_ep_db_info { * @mhi_event: Points to the event ring configurations table * @mhi_cmd: Points to the command ring configurations table * @sm: MHI Endpoint state machine + * @ch_ctx_host_pa: Physical address of host channel context data structure + * @ev_ctx_host_pa: Physical address of host event context data structure + * @cmd_ctx_host_pa: Physical address of host command context data structure + * @chdb: Array of channel doorbell interrupt info * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -67,6 +71,10 @@ struct mhi_ep_db_info { * @mhi_state: MHI Endpoint state * @max_chan: Maximum channels supported by the endpoint controller * @mru: MRU (Maximum Receive Unit) value of the endpoint controller + * @event_rings: Number of event rings supported by the endpoint controller + * @hw_event_rings: Number of hardware event rings supported by the endpoint controller + * @chdb_offset: Channel doorbell offset set by the host + * @erdb_offset: Event ring doorbell offset set by the host * @index: MHI Endpoint controller index */ struct mhi_ep_cntrl { @@ -79,6 +87,12 @@ struct mhi_ep_cntrl { struct mhi_ep_cmd *mhi_cmd; struct mhi_ep_sm *sm; + u64 ch_ctx_host_pa; + u64 ev_ctx_host_pa; + u64 cmd_ctx_host_pa; + + struct mhi_ep_db_info chdb[4]; + void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, void __iomem **virt, size_t size); @@ -91,6 +105,10 @@ struct mhi_ep_cntrl { u32 max_chan; u32 mru; + u32 event_rings; + u32 hw_event_rings; + u32 chdb_offset; + u32 erdb_offset; u32 index; }; -- cgit v1.2.3 From bbdcba57a1a26a4439a4f4ecdbfaf80a10fd8f34 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:41 +0530 Subject: bus: mhi: ep: Add support for ring management Add support for managing the MHI ring. The MHI ring is a circular queue of data structures used to pass the information between host and the endpoint. MHI support 3 types of rings: 1. Transfer ring 2. Event ring 3. Command ring All rings reside inside the host memory and the MHI EP device maps it to the device memory using blocks like PCIe iATU. The mapping is handled in the MHI EP controller driver itself. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-6-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/Makefile | 2 +- drivers/bus/mhi/ep/internal.h | 18 ++++ drivers/bus/mhi/ep/ring.c | 207 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 226 insertions(+), 1 deletion(-) create mode 100644 drivers/bus/mhi/ep/ring.c (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile index a1555ae287ad..7ba0e04801eb 100644 --- a/drivers/bus/mhi/ep/Makefile +++ b/drivers/bus/mhi/ep/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o -mhi_ep-y := main.o mmio.o +mhi_ep-y := main.o mmio.o ring.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index 475425a30d85..d16b87061ac6 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -116,6 +116,11 @@ union mhi_ep_ring_ctx { struct mhi_generic_ctx generic; }; +struct mhi_ep_ring_item { + struct list_head node; + struct mhi_ep_ring *ring; +}; + struct mhi_ep_ring { struct mhi_ep_cntrl *mhi_cntrl; union mhi_ep_ring_ctx *ring_ctx; @@ -128,6 +133,9 @@ struct mhi_ep_ring { u32 db_offset_h; u32 db_offset_l; u32 ch_id; + u32 er_index; + u32 irq_vector; + bool started; }; struct mhi_ep_cmd { @@ -153,6 +161,16 @@ struct mhi_ep_chan { bool skip_td; }; +/* MHI Ring related functions */ +void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id); +void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring); +int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + union mhi_ep_ring_ctx *ctx); +size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr); +int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *element); +void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring); +int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring); + /* MMIO related functions */ u32 mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset); void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val); diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c new file mode 100644 index 000000000000..115518ec76a4 --- /dev/null +++ b/drivers/bus/mhi/ep/ring.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +#include +#include "internal.h" + +size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr) +{ + return (ptr - ring->rbase) / sizeof(struct mhi_ring_element); +} + +static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring) +{ + __le64 rlen; + + memcpy_fromio(&rlen, (void __iomem *) &ring->ring_ctx->generic.rlen, sizeof(u64)); + + return le64_to_cpu(rlen) / sizeof(struct mhi_ring_element); +} + +void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring) +{ + ring->rd_offset = (ring->rd_offset + 1) % ring->ring_size; +} + +static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t start, copy_size; + int ret; + + /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */ + if (ring->type == RING_TYPE_ER) + return 0; + + /* No need to cache the ring if write pointer is unmodified */ + if (ring->wr_offset == end) + return 0; + + start = ring->wr_offset; + if (start < end) { + copy_size = (end - start) * sizeof(struct mhi_ring_element); + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + + (start * sizeof(struct mhi_ring_element)), + &ring->ring_cache[start], copy_size); + if (ret < 0) + return ret; + } else { + copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element); + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + + (start * sizeof(struct mhi_ring_element)), + &ring->ring_cache[start], copy_size); + if (ret < 0) + return ret; + + if (end) { + ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase, + &ring->ring_cache[0], + end * sizeof(struct mhi_ring_element)); + if (ret < 0) + return ret; + } + } + + dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size); + + return 0; +} + +static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr) +{ + size_t wr_offset; + int ret; + + wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr); + + /* Cache the host ring till write offset */ + ret = __mhi_ep_cache_ring(ring, wr_offset); + if (ret) + return ret; + + ring->wr_offset = wr_offset; + + return 0; +} + +int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring) +{ + u64 wr_ptr; + + wr_ptr = mhi_ep_mmio_get_db(ring); + + return mhi_ep_cache_ring(ring, wr_ptr); +} + +/* TODO: Support for adding multiple ring elements to the ring */ +int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t old_offset = 0; + u32 num_free_elem; + __le64 rp; + int ret; + + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write pointer\n"); + return ret; + } + + if (ring->rd_offset < ring->wr_offset) + num_free_elem = (ring->wr_offset - ring->rd_offset) - 1; + else + num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1; + + /* Check if there is space in ring for adding at least an element */ + if (!num_free_elem) { + dev_err(dev, "No space left in the ring\n"); + return -ENOSPC; + } + + old_offset = ring->rd_offset; + mhi_ep_ring_inc_index(ring); + + dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset); + + /* Update rp in ring context */ + rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase); + memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64)); + + ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)), + sizeof(*el)); + if (ret < 0) + return ret; + + return 0; +} + +void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id) +{ + ring->type = type; + if (ring->type == RING_TYPE_CMD) { + ring->db_offset_h = EP_CRDB_HIGHER; + ring->db_offset_l = EP_CRDB_LOWER; + } else if (ring->type == RING_TYPE_CH) { + ring->db_offset_h = CHDB_HIGHER_n(id); + ring->db_offset_l = CHDB_LOWER_n(id); + ring->ch_id = id; + } else { + ring->db_offset_h = ERDB_HIGHER_n(id); + ring->db_offset_l = ERDB_LOWER_n(id); + } +} + +int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + union mhi_ep_ring_ctx *ctx) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + __le64 val; + int ret; + + ring->mhi_cntrl = mhi_cntrl; + ring->ring_ctx = ctx; + ring->ring_size = mhi_ep_ring_num_elems(ring); + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rbase, sizeof(u64)); + ring->rbase = le64_to_cpu(val); + + if (ring->type == RING_TYPE_CH) + ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex); + + if (ring->type == RING_TYPE_ER) + ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec); + + /* During ring init, both rp and wp are equal */ + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64)); + ring->rd_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val)); + ring->wr_offset = mhi_ep_ring_addr2offset(ring, le64_to_cpu(val)); + + /* Allocate ring cache memory for holding the copy of host ring */ + ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ring_element), GFP_KERNEL); + if (!ring->ring_cache) + return -ENOMEM; + + memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.wp, sizeof(u64)); + ret = mhi_ep_cache_ring(ring, le64_to_cpu(val)); + if (ret) { + dev_err(dev, "Failed to cache ring\n"); + kfree(ring->ring_cache); + return ret; + } + + ring->started = true; + + return 0; +} + +void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring) +{ + ring->started = false; + kfree(ring->ring_cache); + ring->ring_cache = NULL; +} -- cgit v1.2.3 From 961aeb6892242e0a667f5b8eb62b9b0a6041752c Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:42 +0530 Subject: bus: mhi: ep: Add support for sending events to the host Add support for sending the events to the host over MHI bus from the endpoint. Following events are supported: 1. Transfer completion event 2. Command completion event 3. State change event 4. Execution Environment (EE) change event An event is sent whenever an operation has been completed in the MHI EP device. Event is sent using the MHI event ring and additionally the host is notified using an IRQ if required. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-7-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/common.h | 22 +++++++++++ drivers/bus/mhi/ep/internal.h | 4 ++ drivers/bus/mhi/ep/main.c | 89 +++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 8 ++++ 4 files changed, 123 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h index b4ef9acd3ce7..f794b9c8049e 100644 --- a/drivers/bus/mhi/common.h +++ b/drivers/bus/mhi/common.h @@ -165,6 +165,22 @@ #define MHI_TRE_GET_EV_LINKSPEED(tre) FIELD_GET(GENMASK(31, 24), (MHI_TRE_GET_DWORD(tre, 1))) #define MHI_TRE_GET_EV_LINKWIDTH(tre) FIELD_GET(GENMASK(7, 0), (MHI_TRE_GET_DWORD(tre, 0))) +/* State change event */ +#define MHI_SC_EV_PTR 0 +#define MHI_SC_EV_DWORD0(state) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), state)) +#define MHI_SC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + +/* EE event */ +#define MHI_EE_EV_PTR 0 +#define MHI_EE_EV_DWORD0(ee) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), ee)) +#define MHI_EE_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + + +/* Command Completion event */ +#define MHI_CC_EV_PTR(ptr) cpu_to_le64(ptr) +#define MHI_CC_EV_DWORD0(code) cpu_to_le32(FIELD_PREP(GENMASK(31, 24), code)) +#define MHI_CC_EV_DWORD1(type) cpu_to_le32(FIELD_PREP(GENMASK(23, 16), type)) + /* Transfer descriptor macros */ #define MHI_TRE_DATA_PTR(ptr) cpu_to_le64(ptr) #define MHI_TRE_DATA_DWORD0(len) cpu_to_le32(FIELD_PREP(GENMASK(15, 0), len)) @@ -175,6 +191,12 @@ FIELD_PREP(BIT(9), ieot) | \ FIELD_PREP(BIT(8), ieob) | \ FIELD_PREP(BIT(0), chain)) +#define MHI_TRE_DATA_GET_PTR(tre) le64_to_cpu((tre)->ptr) +#define MHI_TRE_DATA_GET_LEN(tre) FIELD_GET(GENMASK(15, 0), MHI_TRE_GET_DWORD(tre, 0)) +#define MHI_TRE_DATA_GET_CHAIN(tre) (!!(FIELD_GET(BIT(0), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_IEOB(tre) (!!(FIELD_GET(BIT(8), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_IEOT(tre) (!!(FIELD_GET(BIT(9), MHI_TRE_GET_DWORD(tre, 1)))) +#define MHI_TRE_DATA_GET_BEI(tre) (!!(FIELD_GET(BIT(10), MHI_TRE_GET_DWORD(tre, 1)))) /* RSC transfer descriptor macros */ #define MHI_RSCTRE_DATA_PTR(ptr, len) cpu_to_le64(FIELD_PREP(GENMASK(64, 48), len) | ptr) diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index d16b87061ac6..e096d9cb2cb1 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -197,4 +197,8 @@ void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *s void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl); void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); +/* MHI EP core functions */ +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state); +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env); + #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 7dcc784f10d1..eca1f58ba5fb 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -18,6 +18,93 @@ static DEFINE_IDA(mhi_ep_cntrl_ida); +static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, + struct mhi_ring_element *el, bool bei) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + union mhi_ep_ring_ctx *ctx; + struct mhi_ep_ring *ring; + int ret; + + mutex_lock(&mhi_cntrl->event_lock); + ring = &mhi_cntrl->mhi_event[ring_idx].ring; + ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[ring_idx]; + if (!ring->started) { + ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); + if (ret) { + dev_err(dev, "Error starting event ring (%u)\n", ring_idx); + goto err_unlock; + } + } + + /* Add element to the event ring */ + ret = mhi_ep_ring_add_element(ring, el); + if (ret) { + dev_err(dev, "Error adding element to event ring (%u)\n", ring_idx); + goto err_unlock; + } + + mutex_unlock(&mhi_cntrl->event_lock); + + /* + * Raise IRQ to host only if the BEI flag is not set in TRE. Host might + * set this flag for interrupt moderation as per MHI protocol. + */ + if (!bei) + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + + return 0; + +err_unlock: + mutex_unlock(&mhi_cntrl->event_lock); + + return ret; +} + +static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) +{ + struct mhi_ring_element event = {}; + + event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); + event.dword[0] = MHI_TRE_EV_DWORD0(code, len); + event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); + + return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre)); +} + +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) +{ + struct mhi_ring_element event = {}; + + event.dword[0] = MHI_SC_EV_DWORD0(state); + event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) +{ + struct mhi_ring_element event = {}; + + event.dword[0] = MHI_EE_EV_DWORD0(exec_env); + event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + +static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) +{ + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; + struct mhi_ring_element event = {}; + + event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); + event.dword[0] = MHI_CC_EV_DWORD0(code); + event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); + + return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); +} + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -227,6 +314,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + mutex_init(&mhi_cntrl->event_lock); + /* Set MHI version and AMSS EE before enumeration */ mhi_ep_mmio_write(mhi_cntrl, EP_MHIVER, config->mhi_version); mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 5db048e258e4..46236ffb528a 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -59,10 +59,14 @@ struct mhi_ep_db_info { * @mhi_event: Points to the event ring configurations table * @mhi_cmd: Points to the command ring configurations table * @sm: MHI Endpoint state machine + * @ch_ctx_cache: Cache of host channel context data structure + * @ev_ctx_cache: Cache of host event context data structure + * @cmd_ctx_cache: Cache of host command context data structure * @ch_ctx_host_pa: Physical address of host channel context data structure * @ev_ctx_host_pa: Physical address of host event context data structure * @cmd_ctx_host_pa: Physical address of host command context data structure * @chdb: Array of channel doorbell interrupt info + * @event_lock: Lock for protecting event rings * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -87,11 +91,15 @@ struct mhi_ep_cntrl { struct mhi_ep_cmd *mhi_cmd; struct mhi_ep_sm *sm; + struct mhi_chan_ctxt *ch_ctx_cache; + struct mhi_event_ctxt *ev_ctx_cache; + struct mhi_cmd_ctxt *cmd_ctx_cache; u64 ch_ctx_host_pa; u64 ev_ctx_host_pa; u64 cmd_ctx_host_pa; struct mhi_ep_db_info chdb[4]; + struct mutex event_lock; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From f9baa4f737950523ca648866dfa345ac378e4487 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:43 +0530 Subject: bus: mhi: ep: Add support for managing MHI state machine Add support for managing the MHI state machine by controlling the state transitions. Only the following MHI state transitions are supported: 1. Ready state 2. M0 state 3. M3 state 4. SYS_ERR state Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-8-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/Makefile | 2 +- drivers/bus/mhi/ep/internal.h | 11 ++++ drivers/bus/mhi/ep/main.c | 54 ++++++++++++++++- drivers/bus/mhi/ep/sm.c | 136 ++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 12 ++++ 5 files changed, 213 insertions(+), 2 deletions(-) create mode 100644 drivers/bus/mhi/ep/sm.c (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile index 7ba0e04801eb..aad85f180b70 100644 --- a/drivers/bus/mhi/ep/Makefile +++ b/drivers/bus/mhi/ep/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o -mhi_ep-y := main.o mmio.o ring.o +mhi_ep-y := main.o mmio.o ring.o sm.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index e096d9cb2cb1..4f2e26841702 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -146,6 +146,11 @@ struct mhi_ep_event { struct mhi_ep_ring ring; }; +struct mhi_ep_state_transition { + struct list_head node; + enum mhi_state state; +}; + struct mhi_ep_chan { char *name; struct mhi_ep_device *mhi_dev; @@ -200,5 +205,11 @@ void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); /* MHI EP core functions */ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state); int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env); +bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state, + enum mhi_state mhi_state); +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state); +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl); #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index eca1f58ba5fb..c912daf6dc65 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -105,6 +105,43 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); } +static void mhi_ep_state_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_state_transition *itr, *tmp; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->list_lock, flags); + list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dev_dbg(dev, "Handling MHI state transition to %s\n", + mhi_state_str(itr->state)); + + switch (itr->state) { + case MHI_STATE_M0: + ret = mhi_ep_set_m0_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M0 state\n"); + break; + case MHI_STATE_M3: + ret = mhi_ep_set_m3_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M3 state\n"); + break; + default: + dev_err(dev, "Invalid MHI state transition: %d\n", itr->state); + break; + } + kfree(itr); + } +} + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -314,6 +351,17 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); + + mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); + if (!mhi_cntrl->wq) { + ret = -ENOMEM; + goto err_free_cmd; + } + + INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); + spin_lock_init(&mhi_cntrl->state_lock); + spin_lock_init(&mhi_cntrl->list_lock); mutex_init(&mhi_cntrl->event_lock); /* Set MHI version and AMSS EE before enumeration */ @@ -323,7 +371,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, /* Set controller index */ ret = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); if (ret < 0) - goto err_free_cmd; + goto err_destroy_wq; mhi_cntrl->index = ret; @@ -351,6 +399,8 @@ err_put_dev: put_device(&mhi_dev->dev); err_ida_free: ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +err_destroy_wq: + destroy_workqueue(mhi_cntrl->wq); err_free_cmd: kfree(mhi_cntrl->mhi_cmd); err_free_ch: @@ -364,6 +414,8 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) { struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; + destroy_workqueue(mhi_cntrl->wq); + kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_chan); diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c new file mode 100644 index 000000000000..ffc02f5d0a0d --- /dev/null +++ b/drivers/bus/mhi/ep/sm.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +#include +#include +#include "internal.h" + +bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state cur_mhi_state, + enum mhi_state mhi_state) +{ + if (mhi_state == MHI_STATE_SYS_ERR) + return true; /* Allowed in any state */ + + if (mhi_state == MHI_STATE_READY) + return cur_mhi_state == MHI_STATE_RESET; + + if (mhi_state == MHI_STATE_M0) + return cur_mhi_state == MHI_STATE_M3 || cur_mhi_state == MHI_STATE_READY; + + if (mhi_state == MHI_STATE_M3) + return cur_mhi_state == MHI_STATE_M0; + + return false; +} + +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) { + dev_err(dev, "MHI state change to %s from %s is not allowed!\n", + mhi_state_str(mhi_state), + mhi_state_str(mhi_cntrl->mhi_state)); + return -EACCES; + } + + /* TODO: Add support for M1 and M2 states */ + if (mhi_state == MHI_STATE_M1 || mhi_state == MHI_STATE_M2) { + dev_err(dev, "MHI state (%s) not supported\n", mhi_state_str(mhi_state)); + return -EOPNOTSUPP; + } + + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK, mhi_state); + mhi_cntrl->mhi_state = mhi_state; + + if (mhi_state == MHI_STATE_READY) + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK, 1); + + if (mhi_state == MHI_STATE_SYS_ERR) + mhi_ep_mmio_masked_write(mhi_cntrl, EP_MHISTATUS, MHISTATUS_SYSERR_MASK, 1); + + return 0; +} + +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state old_state; + int ret; + + spin_lock_bh(&mhi_cntrl->state_lock); + old_state = mhi_cntrl->mhi_state; + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + spin_unlock_bh(&mhi_cntrl->state_lock); + + if (ret) + return ret; + + /* Signal host that the device moved to M0 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0); + if (ret) { + dev_err(dev, "Failed sending M0 state change event\n"); + return ret; + } + + if (old_state == MHI_STATE_READY) { + /* Send AMSS EE event to host */ + ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS); + if (ret) { + dev_err(dev, "Failed sending AMSS EE event\n"); + return ret; + } + } + + return 0; +} + +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + spin_lock_bh(&mhi_cntrl->state_lock); + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + spin_unlock_bh(&mhi_cntrl->state_lock); + + if (ret) + return ret; + + /* Signal host that the device moved to M3 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); + if (ret) { + dev_err(dev, "Failed sending M3 state change event\n"); + return ret; + } + + return 0; +} + +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state mhi_state; + int ret, is_ready; + + spin_lock_bh(&mhi_cntrl->state_lock); + /* Ensure that the MHISTATUS is set to RESET by host */ + mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK); + is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK); + + if (mhi_state != MHI_STATE_RESET || is_ready) { + dev_err(dev, "READY state transition failed. MHI host not in RESET state\n"); + spin_unlock_bh(&mhi_cntrl->state_lock); + return -EIO; + } + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY); + spin_unlock_bh(&mhi_cntrl->state_lock); + + return ret; +} diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 46236ffb528a..2880d2aa88b8 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -67,6 +67,11 @@ struct mhi_ep_db_info { * @cmd_ctx_host_pa: Physical address of host command context data structure * @chdb: Array of channel doorbell interrupt info * @event_lock: Lock for protecting event rings + * @list_lock: Lock for protecting state transition and channel doorbell lists + * @state_lock: Lock for protecting state transitions + * @st_transition_list: List of state transitions + * @wq: Dedicated workqueue for handling rings and state changes + * @state_work: State transition worker * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -100,6 +105,13 @@ struct mhi_ep_cntrl { struct mhi_ep_db_info chdb[4]; struct mutex event_lock; + spinlock_t list_lock; + spinlock_t state_lock; + + struct list_head st_transition_list; + + struct workqueue_struct *wq; + struct work_struct state_work; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From 4799e71b082615445dc40ba0bbb86cbb76c24724 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:44 +0530 Subject: bus: mhi: ep: Add support for processing MHI endpoint interrupts Add support for processing MHI endpoint interrupts such as control interrupt, command interrupt and channel interrupt from the host. The interrupts will be generated in the endpoint device whenever host writes to the corresponding doorbell registers. The doorbell logic is handled inside the hardware internally. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-9-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 124 +++++++++++++++++++++++++++++++++++++++++++++- include/linux/mhi_ep.h | 4 ++ 2 files changed, 126 insertions(+), 2 deletions(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index c912daf6dc65..4e82006bd83b 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -142,6 +143,112 @@ static void mhi_ep_state_worker(struct work_struct *work) } } +static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int, + u32 ch_idx) +{ + struct mhi_ep_ring_item *item; + struct mhi_ep_ring *ring; + bool work = !!ch_int; + LIST_HEAD(head); + u32 i; + + /* First add the ring items to a local list */ + for_each_set_bit(i, &ch_int, 32) { + /* Channel index varies for each register: 0, 32, 64, 96 */ + u32 ch_id = ch_idx + i; + + ring = &mhi_cntrl->mhi_chan[ch_id].ring; + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + return; + + item->ring = ring; + list_add_tail(&item->node, &head); + } + + /* Now, splice the local list into ch_db_list and queue the work item */ + if (work) { + spin_lock(&mhi_cntrl->list_lock); + list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); + spin_unlock(&mhi_cntrl->list_lock); + } +} + +/* + * Channel interrupt statuses are contained in 4 registers each of 32bit length. + * For checking all interrupts, we need to loop through each registers and then + * check for bits set. + */ +static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 ch_int, ch_idx, i; + + /* Bail out if there is no channel doorbell interrupt */ + if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl)) + return; + + for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) { + ch_idx = i * MHI_MASK_CH_LEN; + + /* Only process channel interrupt if the mask is enabled */ + ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask; + if (ch_int) { + mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i), + mhi_cntrl->chdb[i].status); + } + } +} + +static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state state) +{ + struct mhi_ep_state_transition *item; + + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + return; + + item->state = state; + spin_lock(&mhi_cntrl->list_lock); + list_add_tail(&item->node, &mhi_cntrl->st_transition_list); + spin_unlock(&mhi_cntrl->list_lock); + + queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work); +} + +/* + * Interrupt handler that services interrupts raised by the host writing to + * MHICTRL and Command ring doorbell (CRDB) registers for state change and + * channel interrupts. + */ +static irqreturn_t mhi_ep_irq(int irq, void *data) +{ + struct mhi_ep_cntrl *mhi_cntrl = data; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + u32 int_value; + + /* Acknowledge the ctrl interrupt */ + int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value); + + /* Check for ctrl interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { + dev_dbg(dev, "Processing ctrl interrupt\n"); + mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); + } + + /* Check for command doorbell interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) + dev_dbg(dev, "Processing command doorbell interrupt\n"); + + /* Check for channel interrupts */ + mhi_ep_check_channel_interrupt(mhi_cntrl); + + return IRQ_HANDLED; +} + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -338,7 +445,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_device *mhi_dev; int ret; - if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio) + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) return -EINVAL; ret = mhi_ep_chan_init(mhi_cntrl, config); @@ -360,6 +467,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, } INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); + INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); spin_lock_init(&mhi_cntrl->state_lock); spin_lock_init(&mhi_cntrl->list_lock); mutex_init(&mhi_cntrl->event_lock); @@ -375,12 +483,20 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, mhi_cntrl->index = ret; + irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); + ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, + "doorbell_irq", mhi_cntrl); + if (ret) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n"); + goto err_ida_free; + } + /* Allocate the controller device */ mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER); if (IS_ERR(mhi_dev)) { dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); ret = PTR_ERR(mhi_dev); - goto err_ida_free; + goto err_free_irq; } dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index); @@ -397,6 +513,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, err_put_dev: put_device(&mhi_dev->dev); +err_free_irq: + free_irq(mhi_cntrl->irq, mhi_cntrl); err_ida_free: ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); err_destroy_wq: @@ -416,6 +534,8 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) destroy_workqueue(mhi_cntrl->wq); + free_irq(mhi_cntrl->irq, mhi_cntrl); + kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_chan); diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 2880d2aa88b8..137bd3ee2e43 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -70,6 +70,7 @@ struct mhi_ep_db_info { * @list_lock: Lock for protecting state transition and channel doorbell lists * @state_lock: Lock for protecting state transitions * @st_transition_list: List of state transitions + * @ch_db_list: List of queued channel doorbells * @wq: Dedicated workqueue for handling rings and state changes * @state_work: State transition worker * @raise_irq: CB function for raising IRQ to the host @@ -85,6 +86,7 @@ struct mhi_ep_db_info { * @chdb_offset: Channel doorbell offset set by the host * @erdb_offset: Event ring doorbell offset set by the host * @index: MHI Endpoint controller index + * @irq: IRQ used by the endpoint controller */ struct mhi_ep_cntrl { struct device *cntrl_dev; @@ -109,6 +111,7 @@ struct mhi_ep_cntrl { spinlock_t state_lock; struct list_head st_transition_list; + struct list_head ch_db_list; struct workqueue_struct *wq; struct work_struct state_work; @@ -130,6 +133,7 @@ struct mhi_ep_cntrl { u32 chdb_offset; u32 erdb_offset; u32 index; + int irq; }; /** -- cgit v1.2.3 From fb3a26b7e8aff11e44d582604f61c38f63bd507c Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:45 +0530 Subject: bus: mhi: ep: Add support for powering up the MHI endpoint stack Add support for MHI endpoint power_up that includes initializing the MMIO and rings, caching the host MHI registers, and setting the MHI state to M0. After registering the MHI EP controller, the stack has to be powered up for usage. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-10-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 205 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 16 ++++ 2 files changed, 221 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 4e82006bd83b..20d579733486 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -17,6 +17,9 @@ #include #include "internal.h" +#define M0_WAIT_DELAY_MS 100 +#define M0_WAIT_COUNT 100 + static DEFINE_IDA(mhi_ep_cntrl_ida); static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, @@ -106,6 +109,154 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); } +static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + /* Update the number of event rings (NER) programmed by the host */ + mhi_ep_mmio_update_ner(mhi_cntrl); + + dev_dbg(dev, "Number of Event rings: %u, HW Event rings: %u\n", + mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); + + ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; + ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; + cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; + + /* Get the channel context base pointer from host */ + mhi_ep_mmio_get_chc_base(mhi_cntrl); + + /* Allocate and map memory for caching host channel context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, + &mhi_cntrl->ch_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->ch_ctx_cache, + ch_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map ch_ctx_cache\n"); + return ret; + } + + /* Get the event context base pointer from host */ + mhi_ep_mmio_get_erc_base(mhi_cntrl); + + /* Allocate and map memory for caching host event context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, + &mhi_cntrl->ev_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->ev_ctx_cache, + ev_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map ev_ctx_cache\n"); + goto err_ch_ctx; + } + + /* Get the command context base pointer from host */ + mhi_ep_mmio_get_crc_base(mhi_cntrl); + + /* Allocate and map memory for caching host command context */ + ret = mhi_cntrl->alloc_map(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, + &mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem **) &mhi_cntrl->cmd_ctx_cache, + cmd_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to allocate and map cmd_ctx_cache\n"); + goto err_ev_ctx; + } + + /* Initialize command ring */ + ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, + (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); + if (ret) { + dev_err(dev, "Failed to start the command ring\n"); + goto err_cmd_ctx; + } + + return ret; + +err_cmd_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); + +err_ev_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); + +err_ch_ctx: + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); + + return ret; +} + +static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; + + ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) * mhi_cntrl->max_chan; + ev_ctx_host_size = sizeof(struct mhi_event_ctxt) * mhi_cntrl->event_rings; + cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt) * NR_OF_CMD_RINGS; + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_cache_phys, + (void __iomem *) mhi_cntrl->cmd_ctx_cache, cmd_ctx_host_size); + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ev_ctx_cache, ev_ctx_host_size); + + mhi_cntrl->unmap_free(mhi_cntrl, mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_cache_phys, + (void __iomem *) mhi_cntrl->ch_ctx_cache, ch_ctx_host_size); +} + +static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) +{ + /* + * Doorbell interrupts are enabled when the corresponding channel gets started. + * Enabling all interrupts here triggers spurious irqs as some of the interrupts + * associated with hw channels always get triggered. + */ + mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); +} + +static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + bool mhi_reset; + u32 count = 0; + int ret; + + /* Wait for Host to set the M0 state */ + do { + msleep(M0_WAIT_DELAY_MS); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + /* Clear the MHI reset if host is in reset state */ + mhi_ep_mmio_clear_reset(mhi_cntrl); + dev_info(dev, "Detected Host reset while waiting for M0\n"); + } + count++; + } while (state != MHI_STATE_M0 && count < M0_WAIT_COUNT); + + if (state != MHI_STATE_M0) { + dev_err(dev, "Host failed to enter M0\n"); + return -ETIMEDOUT; + } + + ret = mhi_ep_cache_host_cfg(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to cache host config\n"); + return ret; + } + + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* Enable all interrupts now */ + mhi_ep_enable_int(mhi_cntrl); + + return 0; +} + static void mhi_ep_state_worker(struct work_struct *work) { struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); @@ -249,6 +400,60 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) return IRQ_HANDLED; } +int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + /* + * Mask all interrupts until the state machine is ready. Interrupts will + * be enabled later with mhi_ep_enable(). + */ + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + mhi_ep_mmio_init(mhi_cntrl); + + mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return -ENOMEM; + + /* Initialize command, channel and event rings */ + mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); + for (i = 0; i < mhi_cntrl->max_chan; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); + for (i = 0; i < mhi_cntrl->event_rings; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); + + mhi_cntrl->mhi_state = MHI_STATE_RESET; + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + goto err_free_event; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint\n"); + goto err_free_event; + } + + enable_irq(mhi_cntrl->irq); + mhi_cntrl->enabled = true; + + return 0; + +err_free_event: + kfree(mhi_cntrl->mhi_event); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_power_up); + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 137bd3ee2e43..3b065f82fbeb 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -65,6 +65,9 @@ struct mhi_ep_db_info { * @ch_ctx_host_pa: Physical address of host channel context data structure * @ev_ctx_host_pa: Physical address of host event context data structure * @cmd_ctx_host_pa: Physical address of host command context data structure + * @ch_ctx_cache_phys: Physical address of the host channel context cache + * @ev_ctx_cache_phys: Physical address of the host event context cache + * @cmd_ctx_cache_phys: Physical address of the host command context cache * @chdb: Array of channel doorbell interrupt info * @event_lock: Lock for protecting event rings * @list_lock: Lock for protecting state transition and channel doorbell lists @@ -87,6 +90,7 @@ struct mhi_ep_db_info { * @erdb_offset: Event ring doorbell offset set by the host * @index: MHI Endpoint controller index * @irq: IRQ used by the endpoint controller + * @enabled: Check if the endpoint controller is enabled or not */ struct mhi_ep_cntrl { struct device *cntrl_dev; @@ -104,6 +108,9 @@ struct mhi_ep_cntrl { u64 ch_ctx_host_pa; u64 ev_ctx_host_pa; u64 cmd_ctx_host_pa; + phys_addr_t ch_ctx_cache_phys; + phys_addr_t ev_ctx_cache_phys; + phys_addr_t cmd_ctx_cache_phys; struct mhi_ep_db_info chdb[4]; struct mutex event_lock; @@ -134,6 +141,7 @@ struct mhi_ep_cntrl { u32 erdb_offset; u32 index; int irq; + bool enabled; }; /** @@ -228,4 +236,12 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, */ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl); +/** + * mhi_ep_power_up - Power up the MHI endpoint stack + * @mhi_cntrl: MHI Endpoint controller + * + * Return: 0 if power up succeeds, a negative error code otherwise. + */ +int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl); + #endif -- cgit v1.2.3 From 5d507ee04894e166f8c5a29f05c6b06ce91d5833 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:46 +0530 Subject: bus: mhi: ep: Add support for powering down the MHI endpoint stack Add support for MHI endpoint power_down that includes stopping all available channels, destroying the channels, resetting the event and transfer rings and freeing the host cache. The stack will be powered down whenever the physical bus link goes down. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-11-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 6 ++++ 2 files changed, 84 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 20d579733486..968025e4d3ac 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -22,6 +22,8 @@ static DEFINE_IDA(mhi_ep_cntrl_ida); +static int mhi_ep_destroy_device(struct device *dev, void *data); + static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, struct mhi_ring_element *el, bool bei) { @@ -400,6 +402,68 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) return IRQ_HANDLED; } +static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_ring *ch_ring, *ev_ring; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int i; + + /* Stop all the channels */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + if (!mhi_chan->ring.started) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to client drivers */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + mhi_chan->state = MHI_CH_STATE_DISABLED; + mutex_unlock(&mhi_chan->lock); + } + + flush_workqueue(mhi_cntrl->wq); + + /* Destroy devices associated with all channels */ + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); + + /* Stop and reset the transfer rings */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + if (!mhi_chan->ring.started) + continue; + + ch_ring = &mhi_cntrl->mhi_chan[i].ring; + mutex_lock(&mhi_chan->lock); + mhi_ep_ring_reset(mhi_cntrl, ch_ring); + mutex_unlock(&mhi_chan->lock); + } + + /* Stop and reset the event rings */ + for (i = 0; i < mhi_cntrl->event_rings; i++) { + ev_ring = &mhi_cntrl->mhi_event[i].ring; + if (!ev_ring->started) + continue; + + mutex_lock(&mhi_cntrl->event_lock); + mhi_ep_ring_reset(mhi_cntrl, ev_ring); + mutex_unlock(&mhi_cntrl->event_lock); + } + + /* Stop and reset the command ring */ + mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); + + mhi_ep_free_host_cfg(mhi_cntrl); + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + + mhi_cntrl->enabled = false; +} + int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) { struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -454,6 +518,16 @@ err_free_event: } EXPORT_SYMBOL_GPL(mhi_ep_power_up); +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) +{ + if (mhi_cntrl->enabled) + mhi_ep_abort_transfer(mhi_cntrl); + + kfree(mhi_cntrl->mhi_event); + disable_irq(mhi_cntrl->irq); +} +EXPORT_SYMBOL_GPL(mhi_ep_power_down); + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -733,6 +807,10 @@ err_free_ch: } EXPORT_SYMBOL_GPL(mhi_ep_register_controller); +/* + * It is expected that the controller drivers will power down the MHI EP stack + * using "mhi_ep_power_down()" before calling this function to unregister themselves. + */ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) { struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 3b065f82fbeb..9da683e8302c 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -244,4 +244,10 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl); */ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl); +/** + * mhi_ep_power_down - Power down the MHI endpoint stack + * @mhi_cntrl: MHI controller + */ +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl); + #endif -- cgit v1.2.3 From 7a97b6b47353c60dd1b53ada6180741437e377f2 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:47 +0530 Subject: bus: mhi: ep: Add support for handling MHI_RESET Add support for handling MHI_RESET in MHI endpoint stack. MHI_RESET will be issued by the host during shutdown and during error scenario so that it can recover the endpoint device without restarting the whole device. MHI_RESET handling involves resetting the internal MHI registers, data structures, state machines, resetting all channels/rings and setting MHICTRL.RESET bit to 0. Additionally the device will also move to READY state if the reset was due to SYS_ERR. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-12-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 2 ++ 2 files changed, 55 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 968025e4d3ac..d36708d43eb6 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -381,6 +381,7 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) struct device *dev = &mhi_cntrl->mhi_dev->dev; enum mhi_state state; u32 int_value; + bool mhi_reset; /* Acknowledge the ctrl interrupt */ int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS); @@ -389,6 +390,14 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) /* Check for ctrl interrupt */ if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) { dev_dbg(dev, "Processing ctrl interrupt\n"); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + dev_info(dev, "Host triggered MHI reset!\n"); + disable_irq_nosync(mhi_cntrl->irq); + schedule_work(&mhi_cntrl->reset_work); + return IRQ_HANDLED; + } + mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); } @@ -464,6 +473,49 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) mhi_cntrl->enabled = false; } +static void mhi_ep_reset_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state cur_state; + int ret; + + mhi_ep_abort_transfer(mhi_cntrl); + + spin_lock_bh(&mhi_cntrl->state_lock); + /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ + mhi_ep_mmio_reset(mhi_cntrl); + cur_state = mhi_cntrl->mhi_state; + spin_unlock_bh(&mhi_cntrl->state_lock); + + /* + * Only proceed further if the reset is due to SYS_ERR. The host will + * issue reset during shutdown also and we don't need to do re-init in + * that case. + */ + if (cur_state == MHI_STATE_SYS_ERR) { + mhi_ep_mmio_init(mhi_cntrl); + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + return; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret); + return; + } + + enable_irq(mhi_cntrl->irq); + } +} + int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) { struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -738,6 +790,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, } INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); + INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); if (!mhi_cntrl->wq) { diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 9da683e8302c..2f31a54c205f 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -76,6 +76,7 @@ struct mhi_ep_db_info { * @ch_db_list: List of queued channel doorbells * @wq: Dedicated workqueue for handling rings and state changes * @state_work: State transition worker + * @reset_work: Worker for MHI Endpoint reset * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -122,6 +123,7 @@ struct mhi_ep_cntrl { struct workqueue_struct *wq; struct work_struct state_work; + struct work_struct reset_work; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From f7d0806bdb1b377d4abe0f2c7798cec5b8a837ce Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:48 +0530 Subject: bus: mhi: ep: Add support for handling SYS_ERR condition Add support for handling SYS_ERR (System Error) condition in the MHI endpoint stack. The SYS_ERR flag will be asserted by the endpoint device when it detects an internal error. The host will then issue reset and reinitializes MHI to recover from the error state. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-13-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/internal.h | 1 + drivers/bus/mhi/ep/main.c | 20 ++++++++++++++++++++ drivers/bus/mhi/ep/sm.c | 11 +++++++++-- 3 files changed, 30 insertions(+), 2 deletions(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index 4f2e26841702..d201d755560c 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -211,5 +211,6 @@ int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_stat int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl); int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl); int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl); #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index d36708d43eb6..706473ea4918 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -516,6 +516,26 @@ static void mhi_ep_reset_worker(struct work_struct *work) } } +/* + * We don't need to do anything special other than setting the MHI SYS_ERR + * state. The host will reset all contexts and issue MHI RESET so that we + * could also recover from error state. + */ +void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + if (ret) + return; + + /* Signal host that the device went to SYS_ERR state */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR); + if (ret) + dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret); +} + int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) { struct device *dev = &mhi_cntrl->mhi_dev->dev; diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c index ffc02f5d0a0d..e3865b85399d 100644 --- a/drivers/bus/mhi/ep/sm.c +++ b/drivers/bus/mhi/ep/sm.c @@ -68,8 +68,10 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); spin_unlock_bh(&mhi_cntrl->state_lock); - if (ret) + if (ret) { + mhi_ep_handle_syserr(mhi_cntrl); return ret; + } /* Signal host that the device moved to M0 */ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0); @@ -99,8 +101,10 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); spin_unlock_bh(&mhi_cntrl->state_lock); - if (ret) + if (ret) { + mhi_ep_handle_syserr(mhi_cntrl); return ret; + } /* Signal host that the device moved to M3 */ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); @@ -132,5 +136,8 @@ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY); spin_unlock_bh(&mhi_cntrl->state_lock); + if (ret) + mhi_ep_handle_syserr(mhi_cntrl); + return ret; } -- cgit v1.2.3 From e827569062a804c67b51930ce83a4cb886113cb7 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:49 +0530 Subject: bus: mhi: ep: Add support for processing command rings Add support for processing the command rings. Command ring is used by the host to issue channel specific commands to the ep device. Following commands are supported: 1. Start channel 2. Stop channel 3. Reset channel Once the device receives the command doorbell interrupt from host, it executes the command and generates a command completion event to the host in the primary event ring. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-14-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 190 +++++++++++++++++++++++++++++++++++++++++++++- include/linux/mhi_ep.h | 2 + 2 files changed, 191 insertions(+), 1 deletion(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 706473ea4918..32ac567e0f67 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -22,6 +22,7 @@ static DEFINE_IDA(mhi_ep_cntrl_ida); +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); static int mhi_ep_destroy_device(struct device *dev, void *data); static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, @@ -111,6 +112,156 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); } +static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + struct mhi_ep_ring *ch_ring; + u32 tmp, ch_id; + int ret; + + ch_id = MHI_TRE_GET_CMD_CHID(el); + mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; + + switch (MHI_TRE_GET_CMD_TYPE(el)) { + case MHI_PKT_TYPE_START_CHAN_CMD: + dev_dbg(dev, "Received START command for channel (%u)\n", ch_id); + + mutex_lock(&mhi_chan->lock); + /* Initialize and configure the corresponding channel ring */ + if (!ch_ring->started) { + ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, + (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); + if (ret) { + dev_err(dev, "Failed to start ring for channel (%u)\n", ch_id); + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, + MHI_EV_CC_UNDEFINED_ERR); + if (ret) + dev_err(dev, "Error sending completion event: %d\n", ret); + + goto err_unlock; + } + } + + /* Set channel state to RUNNING */ + mhi_chan->state = MHI_CH_STATE_RUNNING; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + + /* + * Create MHI device only during UL channel start. Since the MHI + * channels operate in a pair, we'll associate both UL and DL + * channels to the same device. + * + * We also need to check for mhi_dev != NULL because, the host + * will issue START_CHAN command during resume and we don't + * destroy the device during suspend. + */ + if (!(ch_id % 2) && !mhi_chan->mhi_dev) { + ret = mhi_ep_create_device(mhi_cntrl, ch_id); + if (ret) { + dev_err(dev, "Error creating device for channel (%u)\n", ch_id); + mhi_ep_handle_syserr(mhi_cntrl); + return ret; + } + } + + /* Finally, enable DB for the channel */ + mhi_ep_mmio_enable_chdb(mhi_cntrl, ch_id); + + break; + case MHI_PKT_TYPE_STOP_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + if (!ch_ring->started) { + dev_err(dev, "Channel (%u) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Disable DB for the channel */ + mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id); + + /* Send channel disconnect status to client drivers */ + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* Set channel state to STOP */ + mhi_chan->state = MHI_CH_STATE_STOP; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_STOP); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + case MHI_PKT_TYPE_RESET_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + if (!ch_ring->started) { + dev_err(dev, "Channel (%u) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Stop and reset the transfer ring */ + mhi_ep_ring_reset(mhi_cntrl, ch_ring); + + /* Send channel disconnect status to client driver */ + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* Set channel state to DISABLED */ + mhi_chan->state = MHI_CH_STATE_DISABLED; + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[ch_id].chcfg); + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = cpu_to_le32(tmp); + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event (%u)\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + default: + dev_err(dev, "Invalid command received: %lu for channel (%u)\n", + MHI_TRE_GET_CMD_TYPE(el), ch_id); + return -EINVAL; + } + + return 0; + +err_unlock: + mutex_unlock(&mhi_chan->lock); + + return ret; +} + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; @@ -259,6 +410,40 @@ static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) return 0; } +static void mhi_ep_cmd_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, cmd_ring_work); + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ring_element *el; + int ret; + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + return; + } + + /* Sanity check to make sure there are elements in the ring */ + if (ring->rd_offset == ring->wr_offset) + return; + + /* + * Process command ring element till write offset. In case of an error, just try to + * process next element. + */ + while (ring->rd_offset != ring->wr_offset) { + el = &ring->ring_cache[ring->rd_offset]; + + ret = mhi_ep_process_cmd_ring(ring, el); + if (ret) + dev_err(dev, "Error processing cmd ring element: %zu\n", ring->rd_offset); + + mhi_ep_ring_inc_index(ring); + } +} + static void mhi_ep_state_worker(struct work_struct *work) { struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); @@ -402,8 +587,10 @@ static irqreturn_t mhi_ep_irq(int irq, void *data) } /* Check for command doorbell interrupt */ - if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) + if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { dev_dbg(dev, "Processing command doorbell interrupt\n"); + queue_work(mhi_cntrl->wq, &mhi_cntrl->cmd_ring_work); + } /* Check for channel interrupts */ mhi_ep_check_channel_interrupt(mhi_cntrl); @@ -811,6 +998,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); + INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); if (!mhi_cntrl->wq) { diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 2f31a54c205f..8c6406d9c51f 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -77,6 +77,7 @@ struct mhi_ep_db_info { * @wq: Dedicated workqueue for handling rings and state changes * @state_work: State transition worker * @reset_work: Worker for MHI Endpoint reset + * @cmd_ring_work: Worker for processing command rings * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -124,6 +125,7 @@ struct mhi_ep_cntrl { struct workqueue_struct *wq; struct work_struct state_work; struct work_struct reset_work; + struct work_struct cmd_ring_work; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From 530125889977365cb6db32d7d0bd84c9f54c8aab Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:50 +0530 Subject: bus: mhi: ep: Add support for reading from the host Data transfer between host and the ep device happens over the transfer ring associated with each bi-directional channel pair. Host defines the transfer ring by allocating memory for it. The read and write pointer addresses of the transfer ring are stored in the channel context. Once host places the elements in the transfer ring, it increments the write pointer and rings the channel doorbell. Device will receive the doorbell interrupt and will process the transfer ring elements. This commit adds support for reading the transfer ring elements from the transfer ring till write pointer, incrementing the read pointer and finally sending the completion event to the host through corresponding event ring. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-15-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 121 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 9 ++++ 2 files changed, 130 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 32ac567e0f67..1e24eae4b446 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -262,6 +262,127 @@ err_unlock: return ret; } +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) +{ + struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : + mhi_dev->ul_chan; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + return !!(ring->rd_offset == ring->wr_offset); +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); + +static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_ring *ring, + struct mhi_result *result, + u32 len) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t tr_len, read_offset, write_offset; + struct mhi_ring_element *el; + bool tr_done = false; + void *write_addr; + u64 read_addr; + u32 buf_left; + int ret; + + buf_left = len; + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + return -ENODEV; + } + + el = &ring->ring_cache[ring->rd_offset]; + + /* Check if there is data pending to be read from previous read operation */ + if (mhi_chan->tre_bytes_left) { + dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left); + tr_len = min(buf_left, mhi_chan->tre_bytes_left); + } else { + mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el); + mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el); + mhi_chan->tre_bytes_left = mhi_chan->tre_size; + + tr_len = min(buf_left, mhi_chan->tre_size); + } + + read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; + write_offset = len - buf_left; + read_addr = mhi_chan->tre_loc + read_offset; + write_addr = result->buf_addr + write_offset; + + dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); + return ret; + } + + buf_left -= tr_len; + mhi_chan->tre_bytes_left -= tr_len; + + /* + * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been + * read completely: + * + * 1. Send completion event to the host based on the flags set in TRE. + * 2. Increment the local read offset of the transfer ring. + */ + if (!mhi_chan->tre_bytes_left) { + /* + * The host will split the data packet into multiple TREs if it can't fit + * the packet in a single TRE. In that case, CHAIN flag will be set by the + * host for all TREs except the last one. + */ + if (MHI_TRE_DATA_GET_CHAIN(el)) { + /* + * IEOB (Interrupt on End of Block) flag will be set by the host if + * it expects the completion event for all TREs of a TD. + */ + if (MHI_TRE_DATA_GET_IEOB(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOB); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + return ret; + } + } + } else { + /* + * IEOT (Interrupt on End of Transfer) flag will be set by the host + * for the last TRE of the TD and expects the completion event for + * the same. + */ + if (MHI_TRE_DATA_GET_IEOT(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOT); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + return ret; + } + } + + tr_done = true; + } + + mhi_ep_ring_inc_index(ring); + } + + result->bytes_xferd += tr_len; + } while (buf_left && !tr_done); + + return 0; +} + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 8c6406d9c51f..fc7d197413eb 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -254,4 +254,13 @@ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl); */ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl); +/** + * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty + * @mhi_dev: Device associated with the channels + * @dir: DMA direction for the channel + * + * Return: true if the queue is empty, false otherwise. + */ +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir); + #endif -- cgit v1.2.3 From 03c0bb8ec983f993a704417d73cc0a3511453d3e Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:51 +0530 Subject: bus: mhi: ep: Add support for processing channel rings Add support for processing the channel rings from host. For the channel ring associated with DL channel, the xfer callback will simply invoked. For the case of UL channel, the ring elements will be read in a buffer till the write pointer and later passed to the client driver using the xfer callback. The client drivers should provide the callbacks for both UL and DL channels during registration. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-16-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 2 + 2 files changed, 110 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 1e24eae4b446..e2ed10b4a9d2 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -383,6 +383,57 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, return 0; } +static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct mhi_result result = {}; + u32 len = MHI_EP_DEFAULT_MTU; + struct mhi_ep_chan *mhi_chan; + int ret; + + mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + + /* + * Bail out if transfer callback is not registered for the channel. + * This is most likely due to the client driver not loaded at this point. + */ + if (!mhi_chan->xfer_cb) { + dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n"); + return -ENODEV; + } + + if (ring->ch_id % 2) { + /* DL channel */ + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } else { + /* UL channel */ + result.buf_addr = kzalloc(len, GFP_KERNEL); + if (!result.buf_addr) + return -ENOMEM; + + do { + ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); + kfree(result.buf_addr); + return ret; + } + + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + result.bytes_xferd = 0; + memset(result.buf_addr, 0, len); + + /* Read until the ring becomes empty */ + } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); + + kfree(result.buf_addr); + } + + return 0; +} + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; @@ -565,6 +616,60 @@ static void mhi_ep_cmd_ring_worker(struct work_struct *work) } } +static void mhi_ep_ch_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_ring_item *itr, *tmp; + struct mhi_ring_element *el; + struct mhi_ep_ring *ring; + struct mhi_ep_chan *chan; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->list_lock, flags); + list_splice_tail_init(&mhi_cntrl->ch_db_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags); + + /* Process each queued channel ring. In case of an error, just process next element. */ + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + ring = itr->ring; + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + kfree(itr); + continue; + } + + /* Sanity check to make sure there are elements in the ring */ + if (ring->rd_offset == ring->wr_offset) { + kfree(itr); + continue; + } + + el = &ring->ring_cache[ring->rd_offset]; + chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + + mutex_lock(&chan->lock); + dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); + ret = mhi_ep_process_ch_ring(ring, el); + if (ret) { + dev_err(dev, "Error processing ring for channel (%u): %d\n", + ring->ch_id, ret); + mutex_unlock(&chan->lock); + kfree(itr); + continue; + } + + mutex_unlock(&chan->lock); + kfree(itr); + } +} + static void mhi_ep_state_worker(struct work_struct *work) { struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); @@ -630,6 +735,8 @@ static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned lon spin_lock(&mhi_cntrl->list_lock); list_splice_tail_init(&head, &mhi_cntrl->ch_db_list); spin_unlock(&mhi_cntrl->list_lock); + + queue_work(mhi_cntrl->wq, &mhi_cntrl->ch_ring_work); } } @@ -1120,6 +1227,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); + INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker); mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); if (!mhi_cntrl->wq) { diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index fc7d197413eb..eecc8f35d630 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -78,6 +78,7 @@ struct mhi_ep_db_info { * @state_work: State transition worker * @reset_work: Worker for MHI Endpoint reset * @cmd_ring_work: Worker for processing command rings + * @ch_ring_work: Worker for processing channel rings * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context @@ -126,6 +127,7 @@ struct mhi_ep_cntrl { struct work_struct state_work; struct work_struct reset_work; struct work_struct cmd_ring_work; + struct work_struct ch_ring_work; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, -- cgit v1.2.3 From 2d945a394d9c1c59d88397cb383b11216d018a6b Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:52 +0530 Subject: bus: mhi: ep: Add support for queueing SKBs to the host Add support for queueing SKBs to the host over the transfer ring of the relevant channel. The mhi_ep_queue_skb() API will be used by the client networking drivers to queue the SKBs to the host over MHI bus. The host will add ring elements to the transfer ring periodically for the device and the device will write SKBs to the ring elements. If a single SKB doesn't fit in a ring element (TRE), it will be placed in multiple ring elements and the overflow event will be sent for all ring elements except the last one. For the last ring element, the EOT event will be sent indicating the packet boundary. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-17-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mhi_ep.h | 9 ++++++ 2 files changed, 91 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index e2ed10b4a9d2..660d1e9791d3 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -434,6 +434,88 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem return 0; } +/* TODO: Handle partially formed TDs */ +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) +{ + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; + struct device *dev = &mhi_chan->mhi_dev->dev; + struct mhi_ring_element *el; + u32 buf_left, read_offset; + struct mhi_ep_ring *ring; + enum mhi_ev_ccs code; + void *read_addr; + u64 write_addr; + size_t tr_len; + u32 tre_len; + int ret; + + buf_left = skb->len; + ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + mutex_lock(&mhi_chan->lock); + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel not available\n"); + ret = -ENODEV; + goto err_exit; + } + + if (mhi_ep_queue_is_empty(mhi_dev, DMA_FROM_DEVICE)) { + dev_err(dev, "TRE not available!\n"); + ret = -ENOSPC; + goto err_exit; + } + + el = &ring->ring_cache[ring->rd_offset]; + tre_len = MHI_TRE_DATA_GET_LEN(el); + + tr_len = min(buf_left, tre_len); + read_offset = skb->len - buf_left; + read_addr = skb->data + read_offset; + write_addr = MHI_TRE_DATA_GET_PTR(el); + + dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); + if (ret < 0) { + dev_err(dev, "Error writing to the channel\n"); + goto err_exit; + } + + buf_left -= tr_len; + /* + * For all TREs queued by the host for DL channel, only the EOT flag will be set. + * If the packet doesn't fit into a single TRE, send the OVERFLOW event to + * the host so that the host can adjust the packet boundary to next TREs. Else send + * the EOT event to the host indicating the packet boundary. + */ + if (buf_left) + code = MHI_EV_CC_OVERFLOW; + else + code = MHI_EV_CC_EOT; + + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); + if (ret) { + dev_err(dev, "Error sending transfer completion event\n"); + goto err_exit; + } + + mhi_ep_ring_inc_index(ring); + } while (buf_left); + + mutex_unlock(&mhi_chan->lock); + + return 0; + +err_exit: + mutex_unlock(&mhi_chan->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); + static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) { size_t cmd_ctx_host_size, ch_ctx_host_size, ev_ctx_host_size; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index eecc8f35d630..478aece17046 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -265,4 +265,13 @@ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl); */ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir); +/** + * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint + * @mhi_dev: Device associated with the DL channel + * @skb: SKBs to be queued + * + * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise. + */ +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb); + #endif -- cgit v1.2.3 From e4b7b5f0f30aaa4677126e04220677a02839e1c4 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:53 +0530 Subject: bus: mhi: ep: Add support for suspending and resuming channels Add support for suspending and resuming the channels in MHI endpoint stack. The channels will be moved to the suspended state during M3 state transition and will be resumed during M0 transition. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-18-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/internal.h | 2 ++ drivers/bus/mhi/ep/main.c | 58 +++++++++++++++++++++++++++++++++++++++++++ drivers/bus/mhi/ep/sm.c | 5 ++++ 3 files changed, 65 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index d201d755560c..a2125fa5fe2f 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -212,5 +212,7 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl); int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl); int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl); void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl); #endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 660d1e9791d3..bae5f40ec15e 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -1097,6 +1097,64 @@ void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) } EXPORT_SYMBOL_GPL(mhi_ep_power_down); +void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_chan *mhi_chan; + u32 tmp; + int i; + + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + + if (!mhi_chan->mhi_dev) + continue; + + mutex_lock(&mhi_chan->lock); + /* Skip if the channel is not currently running */ + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { + mutex_unlock(&mhi_chan->lock); + continue; + } + + dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); + /* Set channel state to SUSPENDED */ + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); + mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); + mutex_unlock(&mhi_chan->lock); + } +} + +void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_chan *mhi_chan; + u32 tmp; + int i; + + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + + if (!mhi_chan->mhi_dev) + continue; + + mutex_lock(&mhi_chan->lock); + /* Skip if the channel is not currently suspended */ + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { + mutex_unlock(&mhi_chan->lock); + continue; + } + + dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); + /* Set channel state to RUNNING */ + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); + mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); + mutex_unlock(&mhi_chan->lock); + } +} + static void mhi_ep_release_device(struct device *dev) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c index e3865b85399d..3655c19e23c7 100644 --- a/drivers/bus/mhi/ep/sm.c +++ b/drivers/bus/mhi/ep/sm.c @@ -62,8 +62,11 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) enum mhi_state old_state; int ret; + /* If MHI is in M3, resume suspended channels */ spin_lock_bh(&mhi_cntrl->state_lock); old_state = mhi_cntrl->mhi_state; + if (old_state == MHI_STATE_M3) + mhi_ep_resume_channels(mhi_cntrl); ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); spin_unlock_bh(&mhi_cntrl->state_lock); @@ -106,6 +109,8 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) return ret; } + mhi_ep_suspend_channels(mhi_cntrl); + /* Signal host that the device moved to M3 */ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); if (ret) { -- cgit v1.2.3 From c268c0a8a33047cd957fecc1349d09a68eb6ad9e Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Tue, 5 Apr 2022 19:27:54 +0530 Subject: bus: mhi: ep: Add uevent support for module autoloading Add uevent support to MHI endpoint bus so that the client drivers can be autoloaded by udev when the MHI endpoint devices gets created. The client drivers are expected to provide MODULE_DEVICE_TABLE with the MHI id_table struct so that the alias can be exported. The MHI endpoint reused the mhi_device_id structure of the MHI bus. Reviewed-by: Alex Elder Signed-off-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220405135754.6622-19-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/ep/main.c | 9 +++++++++ include/linux/mod_devicetable.h | 2 ++ scripts/mod/file2alias.c | 10 ++++++++++ 3 files changed, 21 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index bae5f40ec15e..40109a79017a 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -1536,6 +1536,14 @@ void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) } EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); +static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, + mhi_dev->name); +} + static int mhi_ep_match(struct device *dev, struct device_driver *drv) { struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); @@ -1562,6 +1570,7 @@ struct bus_type mhi_ep_bus_type = { .name = "mhi_ep", .dev_name = "mhi_ep", .match = mhi_ep_match, + .uevent = mhi_ep_uevent, }; static int __init mhi_ep_init(void) diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 5da5d990ff58..549590e9c644 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -835,6 +835,8 @@ struct wmi_device_id { #define MHI_DEVICE_MODALIAS_FMT "mhi:%s" #define MHI_NAME_SIZE 32 +#define MHI_EP_DEVICE_MODALIAS_FMT "mhi_ep:%s" + /** * struct mhi_device_id - MHI device identification * @chan: MHI channel name diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 5258247d78ac..d9d6a31446ea 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -1391,6 +1391,15 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias) return 1; } +/* Looks like: mhi_ep:S */ +static int do_mhi_ep_entry(const char *filename, void *symval, char *alias) +{ + DEF_FIELD_ADDR(symval, mhi_device_id, chan); + sprintf(alias, MHI_EP_DEVICE_MODALIAS_FMT, *chan); + + return 1; +} + /* Looks like: ishtp:{guid} */ static int do_ishtp_entry(const char *filename, void *symval, char *alias) { @@ -1519,6 +1528,7 @@ static const struct devtable devtable[] = { {"tee", SIZE_tee_client_device_id, do_tee_entry}, {"wmi", SIZE_wmi_device_id, do_wmi_entry}, {"mhi", SIZE_mhi_device_id, do_mhi_entry}, + {"mhi_ep", SIZE_mhi_device_id, do_mhi_ep_entry}, {"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry}, {"ssam", SIZE_ssam_device_id, do_ssam_entry}, {"dfl", SIZE_dfl_device_id, do_dfl_entry}, -- cgit v1.2.3 From 249369e93121306ccf0e1365fc5b2ecc6c8618ab Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Tue, 26 Apr 2022 20:58:59 +0800 Subject: bus: mhi: replace snprintf with sysfs_emit Fix following coccicheck warning: ./drivers/bus/mhi/host/init.c:89:8-16: WARNING: use scnprintf or sprintf Use sysfs_emit and sysfs_emit_at instead of snprintf. Reviewed-by: Kees Cook Reviewed-by: Manivannan Sadhasivam Signed-off-by: Wan Jiabing Link: https://lore.kernel.org/r/20220426125902.681258-1-wanjiabing@vivo.com Signed-off-by: Greg Kroah-Hartman --- drivers/bus/mhi/host/init.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index a665b8e92408..d2f1086ea1b7 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -86,7 +86,7 @@ static ssize_t serial_number_show(struct device *dev, struct mhi_device *mhi_dev = to_mhi_device(dev); struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; - return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n", + return sysfs_emit(buf, "Serial Number: %u\n", mhi_cntrl->serial_number); } static DEVICE_ATTR_RO(serial_number); @@ -100,9 +100,8 @@ static ssize_t oem_pk_hash_show(struct device *dev, int i, cnt = 0; for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) - cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, - "OEMPKHASH[%d]: 0x%x\n", i, - mhi_cntrl->oem_pk_hash[i]); + cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", + i, mhi_cntrl->oem_pk_hash[i]); return cnt; } -- cgit v1.2.3 From a96ef8b504efb2ad445dfb6d54f9488c3ddf23d2 Mon Sep 17 00:00:00 2001 From: Daniele Palmas Date: Wed, 27 Apr 2022 09:26:48 +0200 Subject: bus: mhi: host: pci_generic: add Telit FN980 v1 hardware revision Add Telit FN980 v1 hardware revision: 01:00.0 Unassigned class [ff00]: Qualcomm Device [17cb:0306] Subsystem: Device [1c5d:2000] Signed-off-by: Daniele Palmas Reviewed-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220427072648.17635-1-dnlplm@gmail.com [mani: Added "host" to the subject] Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/host/pci_generic.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 476699b8039b..d0a7b5d3c01e 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -454,6 +454,41 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { .sideband_wake = false, }; +static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = { + MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0), + MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2), +}; + +static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) +}; + +static struct mhi_controller_config modem_telit_fn980_hw_v1_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels), + .ch_cfg = mhi_telit_fn980_hw_v1_channels, + .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events), + .event_cfg = mhi_telit_fn980_hw_v1_events, +}; + +static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = { + .name = "telit-fn980-hwv1", + .fw = "qcom/sdx55m/sbl1.mbn", + .edl = "qcom/sdx55m/edl.mbn", + .config = &modem_telit_fn980_hw_v1_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + /* Keep the list sorted based on the PID. New VID should be added as the last entry */ static const struct pci_device_id mhi_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), @@ -461,6 +496,9 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, + /* Telit FN980 hardware revision v1 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000), + .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), -- cgit v1.2.3 From 77fc41204734042861210b9d05338c9b8360affb Mon Sep 17 00:00:00 2001 From: Daniele Palmas Date: Mon, 2 May 2022 13:20:36 +0200 Subject: bus: mhi: host: pci_generic: add Telit FN990 Add Telit FN990: 01:00.0 Unassigned class [ff00]: Qualcomm Device 0308 Subsystem: Device 1c5d:2010 Signed-off-by: Daniele Palmas Reviewed-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/20220502112036.443618-1-dnlplm@gmail.com [mani: Added "host" to the subject] Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/host/pci_generic.c | 41 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index d0a7b5d3c01e..24c94c23d78b 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -489,6 +489,44 @@ static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = { .sideband_wake = false, }; +static const struct mhi_channel_config mhi_telit_fn990_channels[] = { + MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), +}; + +static struct mhi_event_config mhi_telit_fn990_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_DATA(1, 128), + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) +}; + +static const struct mhi_controller_config modem_telit_fn990_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels), + .ch_cfg = mhi_telit_fn990_channels, + .num_events = ARRAY_SIZE(mhi_telit_fn990_events), + .event_cfg = mhi_telit_fn990_events, +}; + +static const struct mhi_pci_dev_info mhi_telit_fn990_info = { + .name = "telit-fn990", + .config = &modem_telit_fn990_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, + .mru_default = 32768, +}; + /* Keep the list sorted based on the PID. New VID should be added as the last entry */ static const struct pci_device_id mhi_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), @@ -501,6 +539,9 @@ static const struct pci_device_id mhi_pci_id_table[] = { .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, + /* Telit FN990 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), + .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ -- cgit v1.2.3 From 13b9b814da2de5006795ddcff2bbaea46017429d Mon Sep 17 00:00:00 2001 From: Slark Xiao Date: Tue, 3 May 2022 10:43:49 +0800 Subject: bus: mhi: host: Add support for Foxconn T99W373 and T99W368 Product's enumeration align with previous Foxconn SDX55, so T99W373(SDX62)/T99W368(SDX65) would use the same config as Foxconn SDX55. Remove fw and edl for this new commit. Signed-off-by: Slark Xiao Reviewed-by: Manivannan Sadhasivam Reviewed-by: Loic Poulain Link: https://lore.kernel.org/r/20220503024349.4486-1-slark_xiao@163.com Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/host/pci_generic.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers/bus') diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 24c94c23d78b..6fbc5915ea36 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -371,6 +371,15 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = { .sideband_wake = false, }; +static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = { + .name = "foxconn-sdx65", + .config = &modem_foxconn_sdx55_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + static const struct mhi_channel_config mhi_mv3x_channels[] = { MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0), MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0), @@ -560,6 +569,12 @@ static const struct pci_device_id mhi_pci_id_table[] = { /* T99W175 (sdx55), Based on Qualcomm new baseline */ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf), .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info }, + /* T99W368 (sdx65) */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, + /* T99W373 (sdx62) */ + { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9), + .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info }, /* MV31-W (Cinterion) */ { PCI_DEVICE(0x1269, 0x00b3), .driver_data = (kernel_ulong_t) &mhi_mv31_info }, -- cgit v1.2.3 From c337125b8834f9719dfda0e40b25eaa266f1b8cf Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 12 May 2022 08:30:21 +0300 Subject: bus: ti-sysc: Fix warnings for unbind for serial We can get "failed to disable" clock_unprepare warnings on unbind at least for the serial console device if the unbind is done before the device has been idled. As some devices are using deferred idle, we must check the status for pending idle work to idle the device. Fixes: 76f0f772e469 ("bus: ti-sysc: Improve handling for no-reset-on-init and no-idle-on-init") Cc: Romain Naour Reviewed-by: Romain Naour Signed-off-by: Tony Lindgren Link: https://lore.kernel.org/r/20220512053021.61650-1-tony@atomide.com Signed-off-by: Greg Kroah-Hartman --- drivers/bus/ti-sysc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/bus') diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 7a1b1f9e4933..70d00cea9d22 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -3395,7 +3395,9 @@ static int sysc_remove(struct platform_device *pdev) struct sysc *ddata = platform_get_drvdata(pdev); int error; - cancel_delayed_work_sync(&ddata->idle_work); + /* Device can still be enabled, see deferred idle quirk in probe */ + if (cancel_delayed_work_sync(&ddata->idle_work)) + ti_sysc_idle(&ddata->idle_work.work); error = pm_runtime_resume_and_get(ddata->dev); if (error < 0) { -- cgit v1.2.3