summaryrefslogtreecommitdiffstats
path: root/drivers/bus/mhi
diff options
context:
space:
mode:
authorManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>2021-12-16 09:12:25 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-12-17 17:17:14 +0100
commit227fee5fc99eeb74d43bf68832f6d59d30ac07d8 (patch)
tree55a63fc3ca74db001f0c9b6e8b17ed7acc611f81 /drivers/bus/mhi
parentbus: mhi: core: Fix race while handling SYS_ERR at power up (diff)
downloadlinux-227fee5fc99eeb74d43bf68832f6d59d30ac07d8.tar.xz
linux-227fee5fc99eeb74d43bf68832f6d59d30ac07d8.zip
bus: mhi: core: Add an API for auto queueing buffers for DL channel
Add a new API "mhi_prepare_for_transfer_autoqueue" for using with client drivers like QRTR to request MHI core to autoqueue buffers for the DL channel along with starting both UL and DL channels. So far, the "auto_queue" flag specified by the controller drivers in channel definition served this purpose but this will be removed at some point in future. Cc: netdev@vger.kernel.org Cc: Jakub Kicinski <kuba@kernel.org> Cc: David S. Miller <davem@davemloft.net> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Co-developed-by: Loic Poulain <loic.poulain@linaro.org> Acked-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Loic Poulain <loic.poulain@linaro.org> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Link: https://lore.kernel.org/r/20211216081227.237749-9-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/bus/mhi')
-rw-r--r--drivers/bus/mhi/core/internal.h6
-rw-r--r--drivers/bus/mhi/core/main.c21
2 files changed, 22 insertions, 5 deletions
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index 9d72b1d1e986..e2e10474a9d9 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -682,8 +682,12 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
+
+/* Automatically allocate and queue inbound buffers */
+#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan);
+ struct mhi_chan *mhi_chan, unsigned int flags);
+
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 930aba666b67..ffde617f93a3 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -1430,7 +1430,7 @@ exit_unprepare_channel:
}
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan)
+ struct mhi_chan *mhi_chan, unsigned int flags)
{
int ret = 0;
struct device *dev = &mhi_chan->mhi_dev->dev;
@@ -1455,6 +1455,9 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
if (ret)
goto error_pm_state;
+ if (mhi_chan->dir == DMA_FROM_DEVICE)
+ mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
+
/* Pre-allocate buffer for xfer ring */
if (mhi_chan->pre_alloc) {
int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
@@ -1610,8 +1613,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
read_unlock_bh(&mhi_cntrl->pm_lock);
}
-/* Move channel to start state */
-int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
{
int ret, dir;
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
@@ -1622,7 +1624,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
if (!mhi_chan)
continue;
- ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
+ ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
if (ret)
goto error_open_chan;
}
@@ -1640,8 +1642,19 @@ error_open_chan:
return ret;
}
+
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+{
+ return __mhi_prepare_for_transfer(mhi_dev, 0);
+}
EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
+int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
+{
+ return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
+
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;