summaryrefslogtreecommitdiffstats
path: root/drivers/net/ipa
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r--drivers/net/ipa/Kconfig10
-rw-r--r--drivers/net/ipa/gsi.c350
-rw-r--r--drivers/net/ipa/gsi.h1
-rw-r--r--drivers/net/ipa/gsi_reg.h10
-rw-r--r--drivers/net/ipa/gsi_trans.h1
-rw-r--r--drivers/net/ipa/ipa.h4
-rw-r--r--drivers/net/ipa/ipa_clock.c194
-rw-r--r--drivers/net/ipa/ipa_cmd.c45
-rw-r--r--drivers/net/ipa/ipa_cmd.h24
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c38
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c38
-rw-r--r--drivers/net/ipa/ipa_data.h26
-rw-r--r--drivers/net/ipa/ipa_endpoint.c121
-rw-r--r--drivers/net/ipa/ipa_main.c39
-rw-r--r--drivers/net/ipa/ipa_reg.h22
15 files changed, 519 insertions, 404 deletions
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index 9f0d2a93379c..b68f1289b89e 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -1,9 +1,10 @@
config QCOM_IPA
tristate "Qualcomm IPA support"
- depends on ARCH_QCOM && 64BIT && NET
- depends on QCOM_Q6V5_MSS
+ depends on 64BIT && NET && QCOM_SMEM
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
+ select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_QMI_HELPERS
- select QCOM_MDT_LOADER
help
Choose Y or M here to include support for the Qualcomm
IP Accelerator (IPA), a hardware block present in some
@@ -11,7 +12,8 @@ config QCOM_IPA
that is capable of generic hardware handling of IP packets,
including routing, filtering, and NAT. Currently the IPA
driver supports only basic transport of network traffic
- between the AP and modem, on the Qualcomm SDM845 SoC.
+ between the AP and modem, on the Qualcomm SDM845 and SC7180
+ SoCs.
Note that if selected, the selection type must match that
of QCOM_Q6V5_COMMON (Y or M).
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index b77f5fef7aec..440213646188 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -89,9 +89,9 @@
/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
#define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
-#define GSI_CMD_TIMEOUT 5 /* seconds */
+#define GSI_CMD_TIMEOUT 50 /* milliseconds */
-#define GSI_CHANNEL_STOP_RX_RETRIES 10
+#define GSI_CHANNEL_STOP_RETRIES 10
#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
@@ -220,7 +220,59 @@ static void gsi_irq_teardown(struct gsi *gsi)
/* Nothing to do */
}
-static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
+/* Event ring commands are performed one at a time. Their completion
+ * is signaled by the event ring control GSI interrupt type, which is
+ * only enabled when we issue an event ring command. Only the event
+ * ring being operated on has this interrupt enabled.
+ */
+static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val = BIT(evt_ring_id);
+
+ /* There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
+ */
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+}
+
+/* Disable event ring control interrupts */
+static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
+{
+ gsi_irq_type_disable(gsi, GSI_EV_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+}
+
+/* Channel commands are performed one at a time. Their completion is
+ * signaled by the channel control GSI interrupt type, which is only
+ * enabled when we issue a channel command. Only the channel being
+ * operated on has this interrupt enabled.
+ */
+static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
+{
+ u32 val = BIT(channel_id);
+
+ /* There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
+ */
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+}
+
+/* Disable channel control interrupts */
+static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
+{
+ gsi_irq_type_disable(gsi, GSI_CH_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+}
+
+static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
{
bool enable_ieob = !gsi->ieob_enabled_bitmap;
u32 val;
@@ -234,11 +286,11 @@ static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
gsi_irq_type_enable(gsi, GSI_IEOB);
}
-static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
+static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
{
u32 val;
- gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id);
+ gsi->ieob_enabled_bitmap &= ~event_mask;
/* Disable the interrupt type if this was the last enabled channel */
if (!gsi->ieob_enabled_bitmap)
@@ -248,6 +300,11 @@ static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
}
+static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
+{
+ gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
+}
+
/* Enable all GSI_interrupt types */
static void gsi_irq_enable(struct gsi *gsi)
{
@@ -307,11 +364,13 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
static bool
gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
{
+ unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
+
reinit_completion(completion);
iowrite32(val, gsi->virt + reg);
- return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
+ return !!wait_for_completion_timeout(completion, timeout);
}
/* Return the hardware's notion of the current state of an event ring */
@@ -326,68 +385,54 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
}
/* Issue an event ring command and wait for it to complete */
-static void evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
- enum gsi_evt_cmd_opcode opcode)
+static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
- bool success;
+ bool timeout;
u32 val;
- /* We only perform one event ring command at a time, and event
- * control interrupts should only occur when such a command
- * is issued here. Only permit *this* event ring to trigger
- * an interrupt, and only enable the event control IRQ type
- * when we expect it to occur.
- *
- * There's a small chance that a previous command completed
- * after the interrupt was disabled, so make sure we have no
- * pending interrupts before we enable them.
- */
- iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
-
- val = BIT(evt_ring_id);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
- gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+ /* Enable the completion interrupt for the command */
+ gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
- success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
- /* Disable the interrupt again */
- gsi_irq_type_disable(gsi, GSI_EV_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_ev_ctrl_disable(gsi);
- if (success)
+ if (!timeout)
return;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
- opcode, evt_ring_id, evt_ring->state);
+ opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
}
/* Allocate an event ring in NOT_ALLOCATED state */
static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ enum gsi_evt_ring_state state;
/* Get initial event ring state */
- evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
- if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
return -EINVAL;
}
- evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
/* If successful the event ring state will have changed */
- if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state == GSI_EVT_RING_STATE_ALLOCATED)
return 0;
dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
return -EIO;
}
@@ -395,45 +440,48 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
/* Reset a GSI event ring in ALLOCATED or ERROR state. */
static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- enum gsi_evt_ring_state state = evt_ring->state;
+ enum gsi_evt_ring_state state;
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
return;
}
- evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
/* If successful the event ring state will have changed */
- if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state == GSI_EVT_RING_STATE_ALLOCATED)
return;
dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
}
/* Issue a hardware de-allocation request for an allocated event ring */
static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ enum gsi_evt_ring_state state;
- if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state != GSI_EVT_RING_STATE_ALLOCATED) {
dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
return;
}
- evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
/* If successful the event ring state will have changed */
- if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
return;
dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
}
/* Fetch the current state of a channel from hardware */
@@ -456,34 +504,19 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
- bool success;
+ bool timeout;
u32 val;
- /* We only perform one channel command at a time, and channel
- * control interrupts should only occur when such a command is
- * issued here. So we only permit *this* channel to trigger
- * an interrupt and only enable the channel control IRQ type
- * when we expect it to occur.
- *
- * There's a small chance that a previous command completed
- * after the interrupt was disabled, so make sure we have no
- * pending interrupts before we enable them.
- */
- iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
-
- val = BIT(channel_id);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
- gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+ /* Enable the completion interrupt for the command */
+ gsi_irq_ch_ctrl_enable(gsi, channel_id);
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
- /* Disable the interrupt again */
- gsi_irq_type_disable(gsi, GSI_CH_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_ch_ctrl_disable(gsi);
- if (success)
+ if (!timeout)
return;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
@@ -589,7 +622,8 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- msleep(1); /* A short delay is required before a RESET command */
+ /* A short delay is required before a RESET command */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STOPPED &&
@@ -695,22 +729,38 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
}
-/* Return the last (most recent) transaction completed on a channel. */
+/* Find the transaction whose completion indicates a channel is quiesced */
static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
+ const struct list_head *list;
struct gsi_trans *trans;
spin_lock_bh(&trans_info->spinlock);
- if (!list_empty(&trans_info->complete))
- trans = list_last_entry(&trans_info->complete,
- struct gsi_trans, links);
- else if (!list_empty(&trans_info->polled))
- trans = list_last_entry(&trans_info->polled,
- struct gsi_trans, links);
- else
- trans = NULL;
+ /* There is a small chance a TX transaction got allocated just
+ * before we disabled transmits, so check for that.
+ */
+ if (channel->toward_ipa) {
+ list = &trans_info->alloc;
+ if (!list_empty(list))
+ goto done;
+ list = &trans_info->pending;
+ if (!list_empty(list))
+ goto done;
+ }
+
+ /* Otherwise (TX or RX) we want to wait for anything that
+ * has completed, or has been polled but not released yet.
+ */
+ list = &trans_info->complete;
+ if (!list_empty(list))
+ goto done;
+ list = &trans_info->polled;
+ if (list_empty(list))
+ list = NULL;
+done:
+ trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
/* Caller will wait for this, so take a reference */
if (trans)
@@ -734,24 +784,6 @@ static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
}
}
-/* Stop channel activity. Transactions may not be allocated until thawed. */
-static void gsi_channel_freeze(struct gsi_channel *channel)
-{
- gsi_channel_trans_quiesce(channel);
-
- napi_disable(&channel->napi);
-
- gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
-}
-
-/* Allow transactions to be used on the channel again. */
-static void gsi_channel_thaw(struct gsi_channel *channel)
-{
- gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
-
- napi_enable(&channel->napi);
-}
-
/* Program a channel for use */
static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
{
@@ -843,51 +875,92 @@ static void gsi_channel_deprogram(struct gsi_channel *channel)
/* Nothing to do */
}
-/* Start an allocated GSI channel */
-int gsi_channel_start(struct gsi *gsi, u32 channel_id)
+static int __gsi_channel_start(struct gsi_channel *channel, bool start)
{
- struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi *gsi = channel->gsi;
int ret;
+ if (!start)
+ return 0;
+
mutex_lock(&gsi->mutex);
ret = gsi_channel_start_command(channel);
mutex_unlock(&gsi->mutex);
- gsi_channel_thaw(channel);
-
return ret;
}
-/* Stop a started channel */
-int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
+/* Start an allocated GSI channel */
+int gsi_channel_start(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- u32 retries;
int ret;
- gsi_channel_freeze(channel);
+ /* Enable NAPI and the completion interrupt */
+ napi_enable(&channel->napi);
+ gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
- /* RX channels might require a little time to enter STOPPED state */
- retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
+ ret = __gsi_channel_start(channel, true);
+ if (ret) {
+ gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
+ napi_disable(&channel->napi);
+ }
- mutex_lock(&gsi->mutex);
+ return ret;
+}
+
+static int gsi_channel_stop_retry(struct gsi_channel *channel)
+{
+ u32 retries = GSI_CHANNEL_STOP_RETRIES;
+ int ret;
do {
ret = gsi_channel_stop_command(channel);
if (ret != -EAGAIN)
break;
- msleep(1);
+ usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
} while (retries--);
+ return ret;
+}
+
+static int __gsi_channel_stop(struct gsi_channel *channel, bool stop)
+{
+ struct gsi *gsi = channel->gsi;
+ int ret;
+
+ /* Wait for any underway transactions to complete before stopping. */
+ gsi_channel_trans_quiesce(channel);
+
+ if (!stop)
+ return 0;
+
+ mutex_lock(&gsi->mutex);
+
+ ret = gsi_channel_stop_retry(channel);
+
mutex_unlock(&gsi->mutex);
- /* Thaw the channel if we need to retry (or on error) */
+ return ret;
+}
+
+/* Stop a started channel */
+int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
+
+ ret = __gsi_channel_stop(channel, true);
if (ret)
- gsi_channel_thaw(channel);
+ return ret;
- return ret;
+ /* Disable the completion interrupt and NAPI if successful */
+ gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
+ napi_disable(&channel->napi);
+
+ return 0;
}
/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
@@ -912,11 +985,14 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
- if (stop)
- return gsi_channel_stop(gsi, channel_id);
+ ret = __gsi_channel_stop(channel, stop);
+ if (ret)
+ return ret;
- gsi_channel_freeze(channel);
+ /* Ensure NAPI polling has finished. */
+ napi_synchronize(&channel->napi);
return 0;
}
@@ -926,12 +1002,7 @@ int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- if (start)
- return gsi_channel_start(gsi, channel_id);
-
- gsi_channel_thaw(channel);
-
- return 0;
+ return __gsi_channel_start(channel, start);
}
/**
@@ -1040,7 +1111,6 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
event_mask ^= BIT(evt_ring_id);
evt_ring = &gsi->evt_ring[evt_ring_id];
- evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
complete(&evt_ring->completion);
}
@@ -1178,6 +1248,7 @@ static void gsi_isr_ieob(struct gsi *gsi)
u32 event_mask;
event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
+ gsi_irq_ieob_disable(gsi, event_mask);
iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
while (event_mask) {
@@ -1185,7 +1256,6 @@ static void gsi_isr_ieob(struct gsi *gsi)
event_mask ^= BIT(evt_ring_id);
- gsi_irq_ieob_disable(gsi, evt_ring_id);
napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
}
}
@@ -1430,7 +1500,7 @@ void gsi_channel_doorbell(struct gsi_channel *channel)
}
/* Consult hardware, move any newly completed transactions to completed list */
-static void gsi_channel_update(struct gsi_channel *channel)
+static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
{
u32 evt_ring_id = channel->evt_ring_id;
struct gsi *gsi = channel->gsi;
@@ -1449,7 +1519,7 @@ static void gsi_channel_update(struct gsi_channel *channel)
offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count)
- return;
+ return NULL;
/* Get the transaction for the latest completed event. Take a
* reference to keep it from completing before we give the events
@@ -1474,6 +1544,8 @@ static void gsi_channel_update(struct gsi_channel *channel)
gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
gsi_trans_free(trans);
+
+ return gsi_channel_trans_complete(channel);
}
/**
@@ -1494,11 +1566,8 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
/* Get the first transaction from the completed list */
trans = gsi_channel_trans_complete(channel);
- if (!trans) {
- /* List is empty; see if there's more to do */
- gsi_channel_update(channel);
- trans = gsi_channel_trans_complete(channel);
- }
+ if (!trans) /* List is empty; see if there's more to do */
+ trans = gsi_channel_update(channel);
if (trans)
gsi_trans_move_polled(trans);
@@ -1521,23 +1590,20 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
static int gsi_channel_poll(struct napi_struct *napi, int budget)
{
struct gsi_channel *channel;
- int count = 0;
+ int count;
channel = container_of(napi, struct gsi_channel, napi);
- while (count < budget) {
+ for (count = 0; count < budget; count++) {
struct gsi_trans *trans;
- count++;
trans = gsi_channel_poll_one(channel);
if (!trans)
break;
gsi_trans_complete(trans);
}
- if (count < budget) {
- napi_complete(&channel->napi);
- gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
- }
+ if (count < budget && napi_complete(napi))
+ gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
return count;
}
@@ -1627,7 +1693,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode)
{
struct completion *completion = &gsi->completion;
- bool success;
+ bool timeout;
u32 val;
/* The error global interrupt type is always enabled (until we
@@ -1650,12 +1716,12 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
- success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
/* Disable the GP_INT1 IRQ type again */
iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- if (success)
+ if (!timeout)
return gsi->result;
dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 96c9aed397aa..d674db0ba4eb 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -142,7 +142,6 @@ enum gsi_evt_ring_state {
struct gsi_evt_ring {
struct gsi_channel *channel;
struct completion completion; /* signals event ring state changes */
- enum gsi_evt_ring_state state;
struct gsi_ring ring;
};
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 0e138bbd8205..299456e70f28 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -59,16 +59,6 @@
#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
(0x0000c01c + 0x1000 * (ee))
-#define GSI_INTER_EE_SRC_CH_IRQ_CLR_OFFSET \
- GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(ee) \
- (0x0000c028 + 0x1000 * (ee))
-
-#define GSI_INTER_EE_SRC_EV_CH_IRQ_CLR_OFFSET \
- GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
- (0x0000c02c + 0x1000 * (ee))
-
#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
#define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 4d4606b5fa95..3a4ab8a94d82 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -13,6 +13,7 @@
#include "ipa_cmd.h"
+struct page;
struct scatterlist;
struct device;
struct sk_buff;
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 6c2371084c55..802077631371 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -43,7 +43,7 @@ enum ipa_flag {
* @flags: Boolean state flags
* @version: IPA hardware version
* @pdev: Platform device
- * @modem_rproc: Remoteproc handle for modem subsystem
+ * @completion: Used to signal pipeline clear transfer complete
* @smp2p: SMP2P information
* @clock: IPA clocking information
* @table_addr: DMA address of filter/route table content
@@ -83,7 +83,7 @@ struct ipa {
DECLARE_BITMAP(flags, IPA_FLAG_COUNT);
enum ipa_version version;
struct platform_device *pdev;
- struct rproc *modem_rproc;
+ struct completion completion;
struct notifier_block nb;
void *notifier;
struct ipa_smp2p *smp2p;
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 135c393437f1..354675a643db 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -31,142 +31,154 @@
*/
/**
+ * struct ipa_interconnect - IPA interconnect information
+ * @path: Interconnect path
+ * @average_bandwidth: Average interconnect bandwidth (KB/second)
+ * @peak_bandwidth: Peak interconnect bandwidth (KB/second)
+ */
+struct ipa_interconnect {
+ struct icc_path *path;
+ u32 average_bandwidth;
+ u32 peak_bandwidth;
+};
+
+/**
* struct ipa_clock - IPA clocking information
* @count: Clocking reference count
* @mutex: Protects clock enable/disable
* @core: IPA core clock
- * @memory_path: Memory interconnect
- * @imem_path: Internal memory interconnect
- * @config_path: Configuration space interconnect
- * @interconnect_data: Interconnect configuration data
+ * @interconnect_count: Number of elements in interconnect[]
+ * @interconnect: Interconnect array
*/
struct ipa_clock {
refcount_t count;
struct mutex mutex; /* protects clock enable/disable */
struct clk *core;
- struct icc_path *memory_path;
- struct icc_path *imem_path;
- struct icc_path *config_path;
- const struct ipa_interconnect_data *interconnect_data;
+ u32 interconnect_count;
+ struct ipa_interconnect *interconnect;
};
-static struct icc_path *
-ipa_interconnect_init_one(struct device *dev, const char *name)
+static int ipa_interconnect_init_one(struct device *dev,
+ struct ipa_interconnect *interconnect,
+ const struct ipa_interconnect_data *data)
{
struct icc_path *path;
- path = of_icc_get(dev, name);
- if (IS_ERR(path))
- dev_err(dev, "error %ld getting %s interconnect\n",
- PTR_ERR(path), name);
+ path = of_icc_get(dev, data->name);
+ if (IS_ERR(path)) {
+ int ret = PTR_ERR(path);
+
+ dev_err(dev, "error %d getting %s interconnect\n", ret,
+ data->name);
- return path;
+ return ret;
+ }
+
+ interconnect->path = path;
+ interconnect->average_bandwidth = data->average_bandwidth;
+ interconnect->peak_bandwidth = data->peak_bandwidth;
+
+ return 0;
}
-/* Initialize interconnects required for IPA operation */
-static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev)
+static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
{
- struct icc_path *path;
-
- path = ipa_interconnect_init_one(dev, "memory");
- if (IS_ERR(path))
- goto err_return;
- clock->memory_path = path;
+ icc_put(interconnect->path);
+ memset(interconnect, 0, sizeof(*interconnect));
+}
- path = ipa_interconnect_init_one(dev, "imem");
- if (IS_ERR(path))
- goto err_memory_path_put;
- clock->imem_path = path;
+/* Initialize interconnects required for IPA operation */
+static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev,
+ const struct ipa_interconnect_data *data)
+{
+ struct ipa_interconnect *interconnect;
+ u32 count;
+ int ret;
- path = ipa_interconnect_init_one(dev, "config");
- if (IS_ERR(path))
- goto err_imem_path_put;
- clock->config_path = path;
+ count = clock->interconnect_count;
+ interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
+ if (!interconnect)
+ return -ENOMEM;
+ clock->interconnect = interconnect;
+
+ while (count--) {
+ ret = ipa_interconnect_init_one(dev, interconnect, data++);
+ if (ret)
+ goto out_unwind;
+ interconnect++;
+ }
return 0;
-err_imem_path_put:
- icc_put(clock->imem_path);
-err_memory_path_put:
- icc_put(clock->memory_path);
-err_return:
- return PTR_ERR(path);
+out_unwind:
+ while (interconnect-- > clock->interconnect)
+ ipa_interconnect_exit_one(interconnect);
+ kfree(clock->interconnect);
+ clock->interconnect = NULL;
+
+ return ret;
}
/* Inverse of ipa_interconnect_init() */
static void ipa_interconnect_exit(struct ipa_clock *clock)
{
- icc_put(clock->config_path);
- icc_put(clock->imem_path);
- icc_put(clock->memory_path);
+ struct ipa_interconnect *interconnect;
+
+ interconnect = clock->interconnect + clock->interconnect_count;
+ while (interconnect-- > clock->interconnect)
+ ipa_interconnect_exit_one(interconnect);
+ kfree(clock->interconnect);
+ clock->interconnect = NULL;
}
/* Currently we only use one bandwidth level, so just "enable" interconnects */
static int ipa_interconnect_enable(struct ipa *ipa)
{
- const struct ipa_interconnect_data *data;
+ struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
int ret;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
- if (ret)
- return ret;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- ret = icc_set_bw(clock->imem_path, data->average_rate,
- data->peak_rate);
- if (ret)
- goto err_memory_path_disable;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
- ret = icc_set_bw(clock->config_path, data->average_rate,
- data->peak_rate);
- if (ret)
- goto err_imem_path_disable;
+ u32 i;
+
+ interconnect = clock->interconnect;
+ for (i = 0; i < clock->interconnect_count; i++) {
+ ret = icc_set_bw(interconnect->path,
+ interconnect->average_bandwidth,
+ interconnect->peak_bandwidth);
+ if (ret)
+ goto out_unwind;
+ interconnect++;
+ }
return 0;
-err_imem_path_disable:
- (void)icc_set_bw(clock->imem_path, 0, 0);
-err_memory_path_disable:
- (void)icc_set_bw(clock->memory_path, 0, 0);
+out_unwind:
+ while (interconnect-- > clock->interconnect)
+ (void)icc_set_bw(interconnect->path, 0, 0);
return ret;
}
/* To disable an interconnect, we just its bandwidth to 0 */
-static int ipa_interconnect_disable(struct ipa *ipa)
+static void ipa_interconnect_disable(struct ipa *ipa)
{
- const struct ipa_interconnect_data *data;
+ struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
+ int result = 0;
+ u32 count;
int ret;
- ret = icc_set_bw(clock->memory_path, 0, 0);
- if (ret)
- return ret;
-
- ret = icc_set_bw(clock->imem_path, 0, 0);
- if (ret)
- goto err_memory_path_reenable;
-
- ret = icc_set_bw(clock->config_path, 0, 0);
- if (ret)
- goto err_imem_path_reenable;
-
- return 0;
-
-err_imem_path_reenable:
- data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- (void)icc_set_bw(clock->imem_path, data->average_rate,
- data->peak_rate);
-err_memory_path_reenable:
- data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
- (void)icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
+ count = clock->interconnect_count;
+ interconnect = clock->interconnect + count;
+ while (count--) {
+ interconnect--;
+ ret = icc_set_bw(interconnect->path, 0, 0);
+ if (ret && !result)
+ result = ret;
+ }
- return ret;
+ if (result)
+ dev_err(&ipa->pdev->dev,
+ "error %d disabling IPA interconnects\n", ret);
}
/* Turn on IPA clocks, including interconnects */
@@ -189,7 +201,7 @@ static int ipa_clock_enable(struct ipa *ipa)
static void ipa_clock_disable(struct ipa *ipa)
{
clk_disable_unprepare(ipa->clock->core);
- (void)ipa_interconnect_disable(ipa);
+ ipa_interconnect_disable(ipa);
}
/* Get an IPA clock reference, but only if the reference count is
@@ -286,9 +298,9 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
goto err_clk_put;
}
clock->core = clk;
- clock->interconnect_data = data->interconnect;
+ clock->interconnect_count = data->interconnect_count;
- ret = ipa_interconnect_init(clock, dev);
+ ret = ipa_interconnect_init(clock, dev, data->interconnect_data);
if (ret)
goto err_kfree;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 002e51448510..97b50fee6008 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -529,7 +529,7 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
direction, opcode);
}
-static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag)
+static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
@@ -543,14 +543,14 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag)
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_tag_status;
- payload->tag = u64_encode_bits(tag, IP_PACKET_TAG_STATUS_TAG_FMASK);
+ payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
direction, opcode);
}
/* Issue a small command TX data transfer */
-static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
+static void ipa_cmd_transfer_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum dma_data_direction direction = DMA_TO_DEVICE;
@@ -558,8 +558,6 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
union ipa_cmd_payload *payload;
dma_addr_t payload_addr;
- /* assert(size <= sizeof(*payload)); */
-
/* Just transfer a zero-filled payload structure */
payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
@@ -567,34 +565,53 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
direction, opcode);
}
-void ipa_cmd_tag_process_add(struct gsi_trans *trans)
+/* Add immediate commands to a transaction to clear the hardware pipeline */
+void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
struct ipa_endpoint *endpoint;
- endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ /* This will complete when the transfer is received */
+ reinit_completion(&ipa->completion);
+ /* Issue a no-op register write command (mask 0 means no write) */
ipa_cmd_register_write_add(trans, 0, 0, 0, true);
+
+ /* Send a data packet through the IPA pipeline. The packet_init
+ * command says to send the next packet directly to the exception
+ * endpoint without any other IPA processing. The tag_status
+ * command requests that status be generated on completion of
+ * that transfer, and that it will be tagged with a value.
+ * Finally, the transfer command sends a small packet of data
+ * (instead of a command) using the command endpoint.
+ */
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
- ipa_cmd_ip_tag_status_add(trans, 0xcba987654321);
- ipa_cmd_transfer_add(trans, 4);
+ ipa_cmd_ip_tag_status_add(trans);
+ ipa_cmd_transfer_add(trans);
}
-/* Returns the number of commands required for the tag process */
-u32 ipa_cmd_tag_process_count(void)
+/* Returns the number of commands required to clear the pipeline */
+u32 ipa_cmd_pipeline_clear_count(void)
{
return 4;
}
-void ipa_cmd_tag_process(struct ipa *ipa)
+void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
+{
+ wait_for_completion(&ipa->completion);
+}
+
+void ipa_cmd_pipeline_clear(struct ipa *ipa)
{
- u32 count = ipa_cmd_tag_process_count();
+ u32 count = ipa_cmd_pipeline_clear_count();
struct gsi_trans *trans;
trans = ipa_cmd_trans_alloc(ipa, count);
if (trans) {
- ipa_cmd_tag_process_add(trans);
+ ipa_cmd_pipeline_clear_add(trans);
gsi_trans_commit_wait(trans);
+ ipa_cmd_pipeline_clear_wait(ipa);
} else {
dev_err(&ipa->pdev->dev,
"error allocating %u entry tag transaction\n", count);
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 4ed09c486abc..6dd3d35cf315 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -157,26 +157,30 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset,
u16 size, dma_addr_t addr, bool toward_ipa);
/**
- * ipa_cmd_tag_process_add() - Add IPA tag process commands to a transaction
+ * ipa_cmd_pipeline_clear_add() - Add pipeline clear commands to a transaction
* @trans: GSI transaction
*/
-void ipa_cmd_tag_process_add(struct gsi_trans *trans);
+void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans);
/**
- * ipa_cmd_tag_process_add_count() - Number of commands in a tag process
+ * ipa_cmd_pipeline_clear_count() - # commands required to clear pipeline
*
* Return: The number of elements to allocate in a transaction
- * to hold tag process commands
+ * to hold commands to clear the pipeline
*/
-u32 ipa_cmd_tag_process_count(void);
+u32 ipa_cmd_pipeline_clear_count(void);
/**
- * ipa_cmd_tag_process() - Perform a tag process
- *
- * @Return: The number of elements to allocate in a transaction
- * to hold tag process commands
+ * ipa_cmd_pipeline_clear_wait() - Wait pipeline clear to complete
+ * @ipa: - IPA pointer
+ */
+void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
+
+/**
+ * ipa_cmd_pipeline_clear() - Clear the hardware pipeline
+ * @ipa: - IPA pointer
*/
-void ipa_cmd_tag_process(struct ipa *ipa);
+void ipa_cmd_pipeline_clear(struct ipa *ipa);
/**
* ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
index 5cc0ed77edb9..997b51ceb7d7 100644
--- a/drivers/net/ipa/ipa_data-sc7180.c
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -309,24 +309,30 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+/* Interconnect bandwidths are in 1000 byte/second units */
+static struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 465000, /* 465 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 68570, /* 68.570 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 30000, /* 30 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
static struct ipa_clock_data ipa_clock_data = {
.core_clock_rate = 100 * 1000 * 1000, /* Hz */
- /* Interconnect rates are in 1000 byte/second units */
- .interconnect = {
- [IPA_INTERCONNECT_MEMORY] = {
- .peak_rate = 465000, /* 465 MBps */
- .average_rate = 80000, /* 80 MBps */
- },
- /* Average rate is unused for the next two interconnects */
- [IPA_INTERCONNECT_IMEM] = {
- .peak_rate = 68570, /* 68.570 MBps */
- .average_rate = 0, /* unused */
- },
- [IPA_INTERCONNECT_CONFIG] = {
- .peak_rate = 30000, /* 30 MBps */
- .average_rate = 0, /* unused */
- },
- },
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
};
/* Configuration data for the SC7180 SoC. */
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
index f8fee8d3ca42..88c9c3562ab7 100644
--- a/drivers/net/ipa/ipa_data-sdm845.c
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -329,24 +329,30 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+/* Interconnect bandwidths are in 1000 byte/second units */
+static struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 350000, /* 350 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 40000, /* 40 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
static struct ipa_clock_data ipa_clock_data = {
.core_clock_rate = 75 * 1000 * 1000, /* Hz */
- /* Interconnect rates are in 1000 byte/second units */
- .interconnect = {
- [IPA_INTERCONNECT_MEMORY] = {
- .peak_rate = 600000, /* 600 MBps */
- .average_rate = 80000, /* 80 MBps */
- },
- /* Average rate is unused for the next two interconnects */
- [IPA_INTERCONNECT_IMEM] = {
- .peak_rate = 350000, /* 350 MBps */
- .average_rate = 0, /* unused */
- },
- [IPA_INTERCONNECT_CONFIG] = {
- .peak_rate = 40000, /* 40 MBps */
- .average_rate = 0, /* unused */
- },
- },
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
};
/* Configuration data for the SDM845 SoC. */
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 0ed5ffe2b8da..b476fc373f7f 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -258,32 +258,28 @@ struct ipa_mem_data {
u32 smem_size;
};
-/** enum ipa_interconnect_id - IPA interconnect identifier */
-enum ipa_interconnect_id {
- IPA_INTERCONNECT_MEMORY,
- IPA_INTERCONNECT_IMEM,
- IPA_INTERCONNECT_CONFIG,
- IPA_INTERCONNECT_COUNT, /* Last; not an interconnect */
-};
-
/**
- * struct ipa_interconnect_data - description of IPA interconnect rates
- * @peak_rate: Peak interconnect bandwidth (in 1000 byte/sec units)
- * @average_rate: Average interconnect bandwidth (in 1000 byte/sec units)
+ * struct ipa_interconnect_data - description of IPA interconnect bandwidths
+ * @name: Interconnect name (matches interconnect-name in DT)
+ * @peak_bandwidth: Peak interconnect bandwidth (in 1000 byte/sec units)
+ * @average_bandwidth: Average interconnect bandwidth (in 1000 byte/sec units)
*/
struct ipa_interconnect_data {
- u32 peak_rate;
- u32 average_rate;
+ const char *name;
+ u32 peak_bandwidth;
+ u32 average_bandwidth;
};
/**
* struct ipa_clock_data - description of IPA clock and interconnect rates
* @core_clock_rate: Core clock rate (Hz)
- * @interconnect: Array of interconnect bandwidth parameters
+ * @interconnect_count: Number of entries in the interconnect_data array
+ * @interconnect_data: IPA interconnect configuration data
*/
struct ipa_clock_data {
u32 core_clock_rate;
- struct ipa_interconnect_data interconnect[IPA_INTERCONNECT_COUNT];
+ u32 interconnect_count; /* # entries in interconnect_data[] */
+ const struct ipa_interconnect_data *interconnect_data;
};
/**
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 612afece303f..7209ee3c3124 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -69,8 +69,11 @@ struct ipa_status {
};
/* Field masks for struct ipa_status structure fields */
+#define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
+#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
+#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
#ifdef IPA_VALIDATE
@@ -171,9 +174,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
enum ipa_endpoint_name name;
u32 limit;
- /* Not sure where this constraint come from... */
- BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
-
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
count, IPA_ENDPOINT_COUNT);
@@ -399,7 +399,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
* That won't happen, and we could be more precise, but this is fine
* for now. We need to end the transaction with a "tag process."
*/
- count = hweight32(initialized) + ipa_cmd_tag_process_count();
+ count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
@@ -428,11 +428,13 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
}
- ipa_cmd_tag_process_add(trans);
+ ipa_cmd_pipeline_clear_add(trans);
/* XXX This should have a 1 second timeout */
gsi_trans_commit_wait(trans);
+ ipa_cmd_pipeline_clear_wait(ipa);
+
return 0;
}
@@ -1015,31 +1017,34 @@ err_free_pages:
}
/**
- * ipa_endpoint_replenish() - Replenish the Rx packets cache.
+ * ipa_endpoint_replenish() - Replenish endpoint receive buffers
* @endpoint: Endpoint to be replenished
- * @count: Number of buffers to send to hardware
+ * @add_one: Whether this is replacing a just-consumed buffer
*
- * Allocate RX packet wrapper structures with maximal socket buffers
- * for an endpoint. These are supplied to the hardware, which fills
- * them with incoming data.
+ * The IPA hardware can hold a fixed number of receive buffers for an RX
+ * endpoint, based on the number of entries in the underlying channel ring
+ * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
+ * more receive buffers can be supplied to the hardware. Replenishing for
+ * an endpoint can be disabled, in which case requests to replenish a
+ * buffer are "saved", and transferred to the backlog once it is re-enabled
+ * again.
*/
-static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
+static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
{
struct gsi *gsi;
u32 backlog;
if (!endpoint->replenish_enabled) {
- if (count)
- atomic_add(count, &endpoint->replenish_saved);
+ if (add_one)
+ atomic_inc(&endpoint->replenish_saved);
return;
}
-
while (atomic_dec_not_zero(&endpoint->replenish_backlog))
if (ipa_endpoint_replenish_one(endpoint))
goto try_again_later;
- if (count)
- atomic_add(count, &endpoint->replenish_backlog);
+ if (add_one)
+ atomic_inc(&endpoint->replenish_backlog);
return;
@@ -1047,8 +1052,8 @@ try_again_later:
/* The last one didn't succeed, so fix the backlog */
backlog = atomic_inc_return(&endpoint->replenish_backlog);
- if (count)
- atomic_add(count, &endpoint->replenish_backlog);
+ if (add_one)
+ atomic_inc(&endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
@@ -1075,7 +1080,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
/* Start replenishing if hardware currently has no buffers */
max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
- ipa_endpoint_replenish(endpoint, 0);
+ ipa_endpoint_replenish(endpoint, false);
}
static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
@@ -1094,7 +1099,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work)
endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
- ipa_endpoint_replenish(endpoint, 0);
+ ipa_endpoint_replenish(endpoint, false);
}
static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
@@ -1172,11 +1177,45 @@ static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
return false; /* Don't skip this packet, process it */
}
+static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
+ const struct ipa_status *status)
+{
+ struct ipa_endpoint *command_endpoint;
+ struct ipa *ipa = endpoint->ipa;
+ u32 endpoint_id;
+
+ if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
+ return false; /* No valid tag */
+
+ /* The status contains a valid tag. We know the packet was sent to
+ * this endpoint (already verified by ipa_endpoint_status_skip()).
+ * If the packet came from the AP->command TX endpoint we know
+ * this packet was sent as part of the pipeline clear process.
+ */
+ endpoint_id = u8_get_bits(status->endp_src_idx,
+ IPA_STATUS_SRC_IDX_FMASK);
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ if (endpoint_id == command_endpoint->endpoint_id) {
+ complete(&ipa->completion);
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "unexpected tagged packet from endpoint %u\n",
+ endpoint_id);
+ }
+
+ return true;
+}
+
/* Return whether the status indicates the packet should be dropped */
-static bool ipa_status_drop_packet(const struct ipa_status *status)
+static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
+ const struct ipa_status *status)
{
u32 val;
+ /* If the status indicates a tagged transfer, we'll drop the packet */
+ if (ipa_endpoint_status_tag(endpoint, status))
+ return true;
+
/* Deaggregation exceptions we drop; all other types we consume */
if (status->exception)
return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
@@ -1213,12 +1252,11 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
continue;
}
- /* Compute the amount of buffer space consumed by the
- * packet, including the status element. If the hardware
- * is configured to pad packet data to an aligned boundary,
- * account for that. And if checksum offload is is enabled
- * a trailer containing computed checksum information will
- * be appended.
+ /* Compute the amount of buffer space consumed by the packet,
+ * including the status element. If the hardware is configured
+ * to pad packet data to an aligned boundary, account for that.
+ * And if checksum offload is enabled a trailer containing
+ * computed checksum information will be appended.
*/
align = endpoint->data->rx.pad_align ? : 1;
len = le16_to_cpu(status->pkt_len);
@@ -1226,16 +1264,21 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
if (endpoint->data->checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
- /* Charge the new packet with a proportional fraction of
- * the unused space in the original receive buffer.
- * XXX Charge a proportion of the *whole* receive buffer?
- */
- if (!ipa_status_drop_packet(status)) {
- u32 extra = unused * len / total_len;
- void *data2 = data + sizeof(*status);
- u32 len2 = le16_to_cpu(status->pkt_len);
+ if (!ipa_endpoint_status_drop(endpoint, status)) {
+ void *data2;
+ u32 extra;
+ u32 len2;
/* Client receives only packet data (no status) */
+ data2 = data + sizeof(*status);
+ len2 = le16_to_cpu(status->pkt_len);
+
+ /* Have the true size reflect the extra unused space in
+ * the original receive buffer. Distribute the "cost"
+ * proportionately across all aggregated packets in the
+ * buffer.
+ */
+ extra = DIV_ROUND_CLOSEST(unused * len, total_len);
ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
}
@@ -1257,7 +1300,7 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
{
struct page *page;
- ipa_endpoint_replenish(endpoint, 1);
+ ipa_endpoint_replenish(endpoint, true);
if (trans->cancelled)
return;
@@ -1378,7 +1421,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
do {
if (!ipa_endpoint_aggr_active(endpoint))
break;
- msleep(1);
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
} while (retries--);
/* Check one last time */
@@ -1399,7 +1442,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
*/
gsi_channel_reset(gsi, endpoint->channel_id, true);
- msleep(1);
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
goto out_suspend_again;
@@ -1564,7 +1607,7 @@ void ipa_endpoint_suspend(struct ipa *ipa)
if (ipa->modem_netdev)
ipa_modem_suspend(ipa->modem_netdev);
- ipa_cmd_tag_process(ipa);
+ ipa_cmd_pipeline_clear(ipa);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 84bb8ae92725..c10e7340b031 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
-#include <linux/remoteproc.h>
#include <linux/qcom_scm.h>
#include <linux/soc/qcom/mdt_loader.h>
@@ -729,19 +728,6 @@ static const struct of_device_id ipa_match[] = {
};
MODULE_DEVICE_TABLE(of, ipa_match);
-static phandle of_property_read_phandle(const struct device_node *np,
- const char *name)
-{
- struct property *prop;
- int len = 0;
-
- prop = of_find_property(np, name, &len);
- if (!prop || len != sizeof(__be32))
- return 0;
-
- return be32_to_cpup(prop->value);
-}
-
/* Check things that can be validated at build time. This just
* groups these things BUILD_BUG_ON() calls don't clutter the rest
* of the code.
@@ -807,10 +793,8 @@ static int ipa_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct ipa_data *data;
struct ipa_clock *clock;
- struct rproc *rproc;
bool modem_init;
struct ipa *ipa;
- phandle ph;
int ret;
ipa_validate_build();
@@ -829,25 +813,12 @@ static int ipa_probe(struct platform_device *pdev)
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
- /* We rely on remoteproc to tell us about modem state changes */
- ph = of_property_read_phandle(dev->of_node, "modem-remoteproc");
- if (!ph) {
- dev_err(dev, "DT missing \"modem-remoteproc\" property\n");
- return -EINVAL;
- }
-
- rproc = rproc_get_by_phandle(ph);
- if (!rproc)
- return -EPROBE_DEFER;
-
/* The clock and interconnects might not be ready when we're
* probed, so might return -EPROBE_DEFER.
*/
clock = ipa_clock_init(dev, data->clock_data);
- if (IS_ERR(clock)) {
- ret = PTR_ERR(clock);
- goto err_rproc_put;
- }
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
/* No more EPROBE_DEFER. Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
@@ -858,9 +829,9 @@ static int ipa_probe(struct platform_device *pdev)
ipa->pdev = pdev;
dev_set_drvdata(dev, ipa);
- ipa->modem_rproc = rproc;
ipa->clock = clock;
ipa->version = data->version;
+ init_completion(&ipa->completion);
ret = ipa_reg_init(ipa);
if (ret)
@@ -935,8 +906,6 @@ err_kfree_ipa:
kfree(ipa);
err_clock_exit:
ipa_clock_exit(clock);
-err_rproc_put:
- rproc_put(rproc);
return ret;
}
@@ -944,7 +913,6 @@ err_rproc_put:
static int ipa_remove(struct platform_device *pdev)
{
struct ipa *ipa = dev_get_drvdata(&pdev->dev);
- struct rproc *rproc = ipa->modem_rproc;
struct ipa_clock *clock = ipa->clock;
int ret;
@@ -970,7 +938,6 @@ static int ipa_remove(struct platform_device *pdev)
ipa_reg_exit(ipa);
kfree(ipa);
ipa_clock_exit(clock);
- rproc_put(rproc);
return 0;
}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index e6b0827a244e..732e691e9aa6 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -408,15 +408,18 @@ enum ipa_cs_offload_en {
static inline u32 ipa_header_size_encoded(enum ipa_version version,
u32 header_size)
{
+ u32 size = header_size & field_mask(HDR_LEN_FMASK);
u32 val;
- val = u32_encode_bits(header_size, HDR_LEN_FMASK);
- if (version < IPA_VERSION_4_5)
+ val = u32_encode_bits(size, HDR_LEN_FMASK);
+ if (version < IPA_VERSION_4_5) {
+ /* ipa_assert(header_size == size); */
return val;
+ }
/* IPA v4.5 adds a few more most-significant bits */
- header_size >>= hweight32(HDR_LEN_FMASK);
- val |= u32_encode_bits(header_size, HDR_LEN_MSB_FMASK);
+ size = header_size >> hweight32(HDR_LEN_FMASK);
+ val |= u32_encode_bits(size, HDR_LEN_MSB_FMASK);
return val;
}
@@ -425,15 +428,18 @@ static inline u32 ipa_header_size_encoded(enum ipa_version version,
static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
u32 offset)
{
+ u32 off = offset & field_mask(HDR_OFST_METADATA_FMASK);
u32 val;
- val = u32_encode_bits(offset, HDR_OFST_METADATA_FMASK);
- if (version < IPA_VERSION_4_5)
+ val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
+ if (version < IPA_VERSION_4_5) {
+ /* ipa_assert(offset == off); */
return val;
+ }
/* IPA v4.5 adds a few more most-significant bits */
- offset >>= hweight32(HDR_OFST_METADATA_FMASK);
- val |= u32_encode_bits(offset, HDR_OFST_METADATA_MSB_FMASK);
+ off = offset >> hweight32(HDR_OFST_METADATA_FMASK);
+ val |= u32_encode_bits(off, HDR_OFST_METADATA_MSB_FMASK);
return val;
}