summaryrefslogtreecommitdiffstats
path: root/drivers/net/ipa/gsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ipa/gsi.c')
-rw-r--r--drivers/net/ipa/gsi.c110
1 files changed, 33 insertions, 77 deletions
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 390d3403386a..e374079603cf 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -198,7 +198,7 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
}
-/* Turn off all GSI interrupts initially */
+/* Turn off all GSI interrupts initially; there is no gsi_irq_teardown() */
static void gsi_irq_setup(struct gsi *gsi)
{
/* Disable all interrupt types */
@@ -211,18 +211,12 @@ static void gsi_irq_setup(struct gsi *gsi)
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
/* The inter-EE registers are in the non-adjusted address range */
- iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
- iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
+ iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
}
-/* Turn off all GSI interrupts when we're all done */
-static void gsi_irq_teardown(struct gsi *gsi)
-{
- /* Nothing to do */
-}
-
/* Event ring commands are performed one at a time. Their completion
* is signaled by the event ring control GSI interrupt type, which is
* only enabled when we issue an event ring command. Only the event
@@ -351,7 +345,7 @@ void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
/* Return the 32-bit DMA address associated with a ring index */
static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
{
- return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
+ return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
}
/* Return the ring index of a 32-bit ring offset */
@@ -701,17 +695,16 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
- val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
+ val = ev_r_length_encoded(gsi->version, size);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
/* The context 2 and 3 registers store the low-order and
* high-order 32 bits of the address of the event ring,
* respectively.
*/
- val = evt_ring->ring.addr & GENMASK(31, 0);
+ val = lower_32_bits(evt_ring->ring.addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
-
- val = evt_ring->ring.addr >> 32;
+ val = upper_32_bits(evt_ring->ring.addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
/* Enable interrupt moderation by setting the moderation delay */
@@ -787,7 +780,7 @@ static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
}
}
-/* Program a channel for use */
+/* Program a channel for use; there is no gsi_channel_deprogram() */
static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
{
size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
@@ -802,24 +795,23 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
channel->tre_ring.index = 0;
/* We program all channels as GPI type/protocol */
- val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK);
+ val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
if (channel->toward_ipa)
val |= CHTYPE_DIR_FMASK;
val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
- val = u32_encode_bits(size, R_LENGTH_FMASK);
+ val = r_length_encoded(gsi->version, size);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
/* The context 2 and 3 registers store the low-order and
* high-order 32 bits of the address of the channel ring,
* respectively.
*/
- val = channel->tre_ring.addr & GENMASK(31, 0);
+ val = lower_32_bits(channel->tre_ring.addr);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
-
- val = channel->tre_ring.addr >> 32;
+ val = upper_32_bits(channel->tre_ring.addr);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
/* Command channel gets low weighted round-robin priority */
@@ -829,14 +821,14 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
- /* We enable the doorbell engine for IPA v3.5.1 */
- if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
+ /* No need to use the doorbell engine starting at IPA v4.0 */
+ if (gsi->version < IPA_VERSION_4_0 && doorbell)
val |= USE_DB_ENG_FMASK;
/* v4.0 introduces an escape buffer for prefetch. We use it
* on all but the AP command channel.
*/
- if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) {
+ if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
/* If not otherwise set, prefetch buffers are used */
if (gsi->version < IPA_VERSION_4_5)
val |= USE_ESCAPE_BUF_ONLY_FMASK;
@@ -844,6 +836,9 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
PREFETCH_MODE_FMASK);
}
+ /* All channels set DB_IN_BYTES */
+ if (gsi->version >= IPA_VERSION_4_9)
+ val |= DB_IN_BYTES;
iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
@@ -873,11 +868,6 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* All done! */
}
-static void gsi_channel_deprogram(struct gsi_channel *channel)
-{
- /* Nothing to do */
-}
-
static int __gsi_channel_start(struct gsi_channel *channel, bool start)
{
struct gsi *gsi = channel->gsi;
@@ -975,7 +965,7 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
gsi_channel_reset_command(channel);
/* Due to a hardware quirk we may need to reset RX channels twice. */
- if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa)
+ if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
gsi_channel_reset_command(channel);
gsi_channel_program(channel, doorbell);
@@ -1337,10 +1327,9 @@ static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
int ret;
ret = platform_get_irq_byname(pdev, "gsi");
- if (ret <= 0) {
- dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
+ if (ret <= 0)
return ret ? : -EINVAL;
- }
+
irq = ret;
ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
@@ -1366,7 +1355,7 @@ static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
u32 tre_index;
/* Event xfer_ptr records the TRE it's associated with */
- tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
+ tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
return gsi_channel_trans_mapped(channel, tre_index);
@@ -1439,20 +1428,18 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
/* Initialize a ring, including allocating DMA memory for its entries */
static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
{
- size_t size = count * GSI_RING_ELEMENT_SIZE;
+ u32 size = count * GSI_RING_ELEMENT_SIZE;
struct device *dev = gsi->dev;
dma_addr_t addr;
- /* Hardware requires a 2^n ring size, with alignment equal to size */
+ /* Hardware requires a 2^n ring size, with alignment equal to size.
+ * The DMA address returned by dma_alloc_coherent() is guaranteed to
+ * be a power-of-2 number of pages, which satisfies the requirement.
+ */
ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
- if (ring->virt && addr % size) {
- dma_free_coherent(dev, size, ring->virt, addr);
- dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
- size);
- return -EINVAL; /* Not a good error value, but distinct */
- } else if (!ring->virt) {
+ if (!ring->virt)
return -ENOMEM;
- }
+
ring->addr = addr;
ring->count = count;
@@ -1625,18 +1612,6 @@ static u32 gsi_event_bitmap_init(u32 evt_ring_max)
return event_bitmap;
}
-/* Setup function for event rings */
-static void gsi_evt_ring_setup(struct gsi *gsi)
-{
- /* Nothing to do */
-}
-
-/* Inverse of gsi_evt_ring_setup() */
-static void gsi_evt_ring_teardown(struct gsi *gsi)
-{
- /* Nothing to do */
-}
-
/* Setup function for a single channel */
static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
{
@@ -1686,7 +1661,6 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
netif_napi_del(&channel->napi);
- gsi_channel_deprogram(channel);
gsi_channel_de_alloc_command(gsi, channel_id);
gsi_evt_ring_reset_command(gsi, evt_ring_id);
gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
@@ -1761,7 +1735,6 @@ static int gsi_channel_setup(struct gsi *gsi)
u32 mask;
int ret;
- gsi_evt_ring_setup(gsi);
gsi_irq_enable(gsi);
mutex_lock(&gsi->mutex);
@@ -1821,7 +1794,6 @@ err_unwind:
mutex_unlock(&gsi->mutex);
gsi_irq_disable(gsi);
- gsi_evt_ring_teardown(gsi);
return ret;
}
@@ -1850,7 +1822,6 @@ static void gsi_channel_teardown(struct gsi *gsi)
mutex_unlock(&gsi->mutex);
gsi_irq_disable(gsi);
- gsi_evt_ring_teardown(gsi);
}
/* Setup function for GSI. GSI firmware must be loaded and initialized */
@@ -1858,7 +1829,6 @@ int gsi_setup(struct gsi *gsi)
{
struct device *dev = gsi->dev;
u32 val;
- int ret;
/* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
@@ -1867,7 +1837,7 @@ int gsi_setup(struct gsi *gsi)
return -EIO;
}
- gsi_irq_setup(gsi);
+ gsi_irq_setup(gsi); /* No matching teardown required */
val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
@@ -1901,18 +1871,13 @@ int gsi_setup(struct gsi *gsi)
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
- ret = gsi_channel_setup(gsi);
- if (ret)
- gsi_irq_teardown(gsi);
-
- return ret;
+ return gsi_channel_setup(gsi);
}
/* Inverse of gsi_setup() */
void gsi_teardown(struct gsi *gsi)
{
gsi_channel_teardown(gsi);
- gsi_irq_teardown(gsi);
}
/* Initialize a channel's event ring */
@@ -1954,7 +1919,7 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
gsi_evt_ring_id_free(gsi, evt_ring_id);
}
-/* Init function for event rings */
+/* Init function for event rings; there is no gsi_evt_ring_exit() */
static void gsi_evt_ring_init(struct gsi *gsi)
{
u32 evt_ring_id = 0;
@@ -1966,12 +1931,6 @@ static void gsi_evt_ring_init(struct gsi *gsi)
while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
}
-/* Inverse of gsi_evt_ring_init() */
-static void gsi_evt_ring_exit(struct gsi *gsi)
-{
- /* Nothing to do */
-}
-
static bool gsi_channel_data_valid(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data)
{
@@ -2116,7 +2075,7 @@ static int gsi_channel_init(struct gsi *gsi, u32 count,
/* IPA v4.2 requires the AP to allocate channels for the modem */
modem_alloc = gsi->version == IPA_VERSION_4_2;
- gsi_evt_ring_init(gsi);
+ gsi_evt_ring_init(gsi); /* No matching exit required */
/* The endpoint data array is indexed by endpoint name */
for (i = 0; i < count; i++) {
@@ -2150,7 +2109,6 @@ err_unwind:
}
gsi_channel_exit_one(&gsi->channel[data->channel_id]);
}
- gsi_evt_ring_exit(gsi);
return ret;
}
@@ -2164,8 +2122,6 @@ static void gsi_channel_exit(struct gsi *gsi)
gsi_channel_exit_one(&gsi->channel[channel_id]);
while (channel_id--);
gsi->modem_channel_bitmap = 0;
-
- gsi_evt_ring_exit(gsi);
}
/* Init function for GSI. GSI hardware does not need to be "ready" */