diff options
Diffstat (limited to 'drivers/net/ipa/gsi.c')
-rw-r--r-- | drivers/net/ipa/gsi.c | 499 |
1 files changed, 317 insertions, 182 deletions
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 6bfac1efe037..c4795249719d 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -21,6 +21,7 @@ #include "gsi_trans.h" #include "ipa_gsi.h" #include "ipa_data.h" +#include "ipa_version.h" /** * DOC: The IPA Generic Software Interface @@ -91,6 +92,7 @@ #define GSI_CMD_TIMEOUT 5 /* seconds */ #define GSI_CHANNEL_STOP_RX_RETRIES 10 +#define GSI_CHANNEL_MODEM_HALT_RETRIES 10 #define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */ #define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */ @@ -108,62 +110,6 @@ struct gsi_event { u8 chid; }; -/* Hardware values from the error log register error code field */ -enum gsi_err_code { - GSI_INVALID_TRE_ERR = 0x1, - GSI_OUT_OF_BUFFERS_ERR = 0x2, - GSI_OUT_OF_RESOURCES_ERR = 0x3, - GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4, - GSI_EVT_RING_EMPTY_ERR = 0x5, - GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6, - GSI_HWO_1_ERR = 0x8, -}; - -/* Hardware values from the error log register error type field */ -enum gsi_err_type { - GSI_ERR_TYPE_GLOB = 0x1, - GSI_ERR_TYPE_CHAN = 0x2, - GSI_ERR_TYPE_EVT = 0x3, -}; - -/* Hardware values used when programming an event ring */ -enum gsi_evt_chtype { - GSI_EVT_CHTYPE_MHI_EV = 0x0, - GSI_EVT_CHTYPE_XHCI_EV = 0x1, - GSI_EVT_CHTYPE_GPI_EV = 0x2, - GSI_EVT_CHTYPE_XDCI_EV = 0x3, -}; - -/* Hardware values used when programming a channel */ -enum gsi_channel_protocol { - GSI_CHANNEL_PROTOCOL_MHI = 0x0, - GSI_CHANNEL_PROTOCOL_XHCI = 0x1, - GSI_CHANNEL_PROTOCOL_GPI = 0x2, - GSI_CHANNEL_PROTOCOL_XDCI = 0x3, -}; - -/* Hardware values representing an event ring immediate command opcode */ -enum gsi_evt_cmd_opcode { - GSI_EVT_ALLOCATE = 0x0, - GSI_EVT_RESET = 0x9, - GSI_EVT_DE_ALLOC = 0xa, -}; - -/* Hardware values representing a generic immediate command opcode */ -enum gsi_generic_cmd_opcode { - GSI_GENERIC_HALT_CHANNEL = 0x1, - GSI_GENERIC_ALLOCATE_CHANNEL = 0x2, -}; - -/* Hardware values representing a channel immediate command opcode */ -enum gsi_ch_cmd_opcode { - GSI_CH_ALLOCATE = 0x0, - GSI_CH_START = 0x1, - GSI_CH_STOP = 0x2, - GSI_CH_RESET = 0x9, - GSI_CH_DE_ALLOC = 0xa, -}; - /** gsi_channel_scratch_gpi - GPI protocol scratch register * @max_outstanding_tre: * Defines the maximum number of TREs allowed in a single transaction @@ -229,21 +175,76 @@ static u32 gsi_channel_id(struct gsi_channel *channel) return channel - &channel->gsi->channel[0]; } +/* Update the GSI IRQ type register with the cached value */ +static void gsi_irq_type_update(struct gsi *gsi, u32 val) +{ + gsi->type_enabled_bitmap = val; + iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); +} + +static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) +{ + gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id)); +} + +static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) +{ + gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); +} + +/* Turn off all GSI interrupts initially */ +static void gsi_irq_setup(struct gsi *gsi) +{ + u32 adjust; + + /* Disable all interrupt types */ + gsi_irq_type_update(gsi, 0); + + /* Clear all type-specific interrupt masks */ + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); + iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); + + /* Reverse the offset adjustment for inter-EE register offsets */ + adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; + iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET); + iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET); + + iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); +} + +/* Turn off all GSI interrupts when we're all done */ +static void gsi_irq_teardown(struct gsi *gsi) +{ + /* Nothing to do */ +} + static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id) { + bool enable_ieob = !gsi->ieob_enabled_bitmap; u32 val; - gsi->event_enable_bitmap |= BIT(evt_ring_id); - val = gsi->event_enable_bitmap; + gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); + val = gsi->ieob_enabled_bitmap; iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); + + /* Enable the interrupt type if this is the first channel enabled */ + if (enable_ieob) + gsi_irq_type_enable(gsi, GSI_IEOB); } static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id) { u32 val; - gsi->event_enable_bitmap &= ~BIT(evt_ring_id); - val = gsi->event_enable_bitmap; + gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id); + + /* Disable the interrupt type if this was the last enabled channel */ + if (!gsi->ieob_enabled_bitmap) + gsi_irq_type_disable(gsi, GSI_IEOB); + + val = gsi->ieob_enabled_bitmap; iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); } @@ -252,38 +253,32 @@ static void gsi_irq_enable(struct gsi *gsi) { u32 val; - /* We don't use inter-EE channel or event interrupts */ - val = GSI_CNTXT_TYPE_IRQ_MSK_ALL; - val &= ~INTER_EE_CH_CTRL_FMASK; - val &= ~INTER_EE_EV_CTRL_FMASK; - iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); - - val = GENMASK(gsi->channel_count - 1, 0); - iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); - - val = GENMASK(gsi->evt_ring_count - 1, 0); - iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); - - /* Each IEOB interrupt is enabled (later) as needed by channels */ - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); - - val = GSI_CNTXT_GLOB_IRQ_ALL; - iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); + /* Global interrupts include hardware error reports. Enable + * that so we can at least report the error should it occur. + */ + iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); + gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE)); - /* Never enable GSI_BREAK_POINT */ - val = GSI_CNTXT_GSI_IRQ_ALL & ~BREAK_POINT_FMASK; + /* General GSI interrupts are reported to all EEs; if they occur + * they are unrecoverable (without reset). A breakpoint interrupt + * also exists, but we don't support that. We want to be notified + * of errors so we can report them, even if they can't be handled. + */ + val = BIT(BUS_ERROR); + val |= BIT(CMD_FIFO_OVRFLOW); + val |= BIT(MCS_STACK_OVRFLOW); iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); + gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL)); } -/* Disable all GSI_interrupt types */ +/* Disable all GSI interrupt types */ static void gsi_irq_disable(struct gsi *gsi) { + gsi_irq_type_update(gsi, 0); + + /* Clear the type-specific interrupt masks set by gsi_irq_enable() */ iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); - iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); } /* Return the virtual address associated with a ring index */ @@ -337,13 +332,30 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id, struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; struct completion *completion = &evt_ring->completion; struct device *dev = gsi->dev; + bool success; u32 val; + /* We only perform one event ring command at a time, and event + * control interrupts should only occur when such a command + * is issued here. Only permit *this* event ring to trigger + * an interrupt, and only enable the event control IRQ type + * when we expect it to occur. + */ + val = BIT(evt_ring_id); + iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); + gsi_irq_type_enable(gsi, GSI_EV_CTRL); + val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); - if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion)) - return 0; /* Success! */ + success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion); + + /* Disable the interrupt again */ + gsi_irq_type_disable(gsi, GSI_EV_CTRL); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); + + if (success) + return 0; dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", opcode, evt_ring_id, evt_ring->state); @@ -360,15 +372,15 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) /* Get initial event ring state */ evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { - dev_err(gsi->dev, "bad event ring state %u before alloc\n", - evt_ring->state); + dev_err(gsi->dev, "event ring %u bad state %u before alloc\n", + evt_ring_id, evt_ring->state); return -EINVAL; } ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { - dev_err(gsi->dev, "bad event ring state %u after alloc\n", - evt_ring->state); + dev_err(gsi->dev, "event ring %u bad state %u after alloc\n", + evt_ring_id, evt_ring->state); ret = -EIO; } @@ -384,15 +396,15 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) if (state != GSI_EVT_RING_STATE_ALLOCATED && state != GSI_EVT_RING_STATE_ERROR) { - dev_err(gsi->dev, "bad event ring state %u before reset\n", - evt_ring->state); + dev_err(gsi->dev, "event ring %u bad state %u before reset\n", + evt_ring_id, evt_ring->state); return; } ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) - dev_err(gsi->dev, "bad event ring state %u after reset\n", - evt_ring->state); + dev_err(gsi->dev, "event ring %u bad state %u after reset\n", + evt_ring_id, evt_ring->state); } /* Issue a hardware de-allocation request for an allocated event ring */ @@ -402,15 +414,15 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) int ret; if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { - dev_err(gsi->dev, "bad event ring state %u before dealloc\n", - evt_ring->state); + dev_err(gsi->dev, "event ring %u state %u before dealloc\n", + evt_ring_id, evt_ring->state); return; } ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) - dev_err(gsi->dev, "bad event ring state %u after dealloc\n", - evt_ring->state); + dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n", + evt_ring_id, evt_ring->state); } /* Fetch the current state of a channel from hardware */ @@ -433,13 +445,29 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) u32 channel_id = gsi_channel_id(channel); struct gsi *gsi = channel->gsi; struct device *dev = gsi->dev; + bool success; u32 val; + /* We only perform one channel command at a time, and channel + * control interrupts should only occur when such a command is + * issued here. So we only permit *this* channel to trigger + * an interrupt and only enable the channel control IRQ type + * when we expect it to occur. + */ + val = BIT(channel_id); + iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); + gsi_irq_type_enable(gsi, GSI_CH_CTRL); + val = u32_encode_bits(channel_id, CH_CHID_FMASK); val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); + success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion); - if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion)) - return 0; /* Success! */ + /* Disable the interrupt again */ + gsi_irq_type_disable(gsi, GSI_CH_CTRL); + iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); + + if (success) + return 0; dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", opcode, channel_id, gsi_channel_state(channel)); @@ -458,7 +486,8 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) /* Get initial channel state */ state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { - dev_err(dev, "bad channel state %u before alloc\n", state); + dev_err(dev, "channel %u bad state %u before alloc\n", + channel_id, state); return -EINVAL; } @@ -467,7 +496,8 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) /* Channel state will normally have been updated */ state = gsi_channel_state(channel); if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) { - dev_err(dev, "bad channel state %u after alloc\n", state); + dev_err(dev, "channel %u bad state %u after alloc\n", + channel_id, state); ret = -EIO; } @@ -484,7 +514,8 @@ static int gsi_channel_start_command(struct gsi_channel *channel) state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_ALLOCATED && state != GSI_CHANNEL_STATE_STOPPED) { - dev_err(dev, "bad channel state %u before start\n", state); + dev_err(dev, "channel %u bad state %u before start\n", + gsi_channel_id(channel), state); return -EINVAL; } @@ -493,7 +524,8 @@ static int gsi_channel_start_command(struct gsi_channel *channel) /* Channel state will normally have been updated */ state = gsi_channel_state(channel); if (!ret && state != GSI_CHANNEL_STATE_STARTED) { - dev_err(dev, "bad channel state %u after start\n", state); + dev_err(dev, "channel %u bad state %u after start\n", + gsi_channel_id(channel), state); ret = -EIO; } @@ -517,7 +549,8 @@ static int gsi_channel_stop_command(struct gsi_channel *channel) if (state != GSI_CHANNEL_STATE_STARTED && state != GSI_CHANNEL_STATE_STOP_IN_PROC) { - dev_err(dev, "bad channel state %u before stop\n", state); + dev_err(dev, "channel %u bad state %u before stop\n", + gsi_channel_id(channel), state); return -EINVAL; } @@ -532,7 +565,8 @@ static int gsi_channel_stop_command(struct gsi_channel *channel) if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) return -EAGAIN; - dev_err(dev, "bad channel state %u after stop\n", state); + dev_err(dev, "channel %u bad state %u after stop\n", + gsi_channel_id(channel), state); return -EIO; } @@ -549,7 +583,10 @@ static void gsi_channel_reset_command(struct gsi_channel *channel) state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_STOPPED && state != GSI_CHANNEL_STATE_ERROR) { - dev_err(dev, "bad channel state %u before reset\n", state); + /* No need to reset a channel already in ALLOCATED state */ + if (state != GSI_CHANNEL_STATE_ALLOCATED) + dev_err(dev, "channel %u bad state %u before reset\n", + gsi_channel_id(channel), state); return; } @@ -558,7 +595,8 @@ static void gsi_channel_reset_command(struct gsi_channel *channel) /* Channel state will normally have been updated */ state = gsi_channel_state(channel); if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) - dev_err(dev, "bad channel state %u after reset\n", state); + dev_err(dev, "channel %u bad state %u after reset\n", + gsi_channel_id(channel), state); } /* Deallocate an ALLOCATED GSI channel */ @@ -571,7 +609,8 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_ALLOCATED) { - dev_err(dev, "bad channel state %u before dealloc\n", state); + dev_err(dev, "channel %u bad state %u before dealloc\n", + channel_id, state); return; } @@ -580,7 +619,8 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) /* Channel state will normally have been updated */ state = gsi_channel_state(channel); if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED) - dev_err(dev, "bad channel state %u after dealloc\n", state); + dev_err(dev, "channel %u bad state %u after dealloc\n", + channel_id, state); } /* Ring an event ring doorbell, reporting the last entry processed by the AP. @@ -607,7 +647,8 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE; u32 val; - val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK); + /* We program all event rings as GPI type/protocol */ + val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK); val |= EV_INTYPE_FMASK; val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); @@ -714,8 +755,8 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) /* Arbitrarily pick TRE 0 as the first channel element to use */ channel->tre_ring.index = 0; - /* We program all channels to use GPI protocol */ - val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK); + /* We program all channels as GPI type/protocol */ + val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK); if (channel->toward_ipa) val |= CHTYPE_DIR_FMASK; val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); @@ -742,12 +783,21 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */ - /* Enable the doorbell engine if requested */ - if (doorbell) + /* We enable the doorbell engine for IPA v3.5.1 */ + if (gsi->version == IPA_VERSION_3_5_1 && doorbell) val |= USE_DB_ENG_FMASK; - if (!channel->use_prefetch) - val |= USE_ESCAPE_BUF_ONLY_FMASK; + /* v4.0 introduces an escape buffer for prefetch. We use it + * on all but the AP command channel. + */ + if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) { + /* If not otherwise set, prefetch buffers are used */ + if (gsi->version < IPA_VERSION_4_5) + val |= USE_ESCAPE_BUF_ONLY_FMASK; + else + val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY, + PREFETCH_MODE_FMASK); + } iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); @@ -829,8 +879,8 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id) return ret; } -/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */ -void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy) +/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */ +void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell) { struct gsi_channel *channel = &gsi->channel[channel_id]; @@ -838,10 +888,10 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy) gsi_channel_reset_command(channel); /* Due to a hardware quirk we may need to reset RX channels twice. */ - if (legacy && !channel->toward_ipa) + if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa) gsi_channel_reset_command(channel); - gsi_channel_program(channel, legacy); + gsi_channel_program(channel, doorbell); gsi_channel_trans_cancel_pending(channel); mutex_unlock(&gsi->mutex); @@ -989,7 +1039,7 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi) static void gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) { - if (code == GSI_OUT_OF_RESOURCES_ERR) { + if (code == GSI_OUT_OF_RESOURCES) { dev_err(gsi->dev, "channel %u out of resources\n", channel_id); complete(&gsi->channel[channel_id].completion); return; @@ -1004,7 +1054,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code) static void gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code) { - if (code == GSI_OUT_OF_RESOURCES_ERR) { + if (code == GSI_OUT_OF_RESOURCES) { struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; u32 channel_id = gsi_channel_id(evt_ring->channel); @@ -1034,8 +1084,8 @@ static void gsi_isr_glob_err(struct gsi *gsi) iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); ee = u32_get_bits(val, ERR_EE_FMASK); - which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); type = u32_get_bits(val, ERR_TYPE_FMASK); + which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); code = u32_get_bits(val, ERR_CODE_FMASK); if (type == GSI_ERR_TYPE_CHAN) @@ -1052,10 +1102,38 @@ static void gsi_isr_gp_int1(struct gsi *gsi) u32 result; u32 val; + /* This interrupt is used to handle completions of the two GENERIC + * GSI commands. We use these to allocate and halt channels on + * the modem's behalf due to a hardware quirk on IPA v4.2. Once + * allocated, the modem "owns" these channels, and as a result we + * have no way of knowing the channel's state at any given time. + * + * It is recommended that we halt the modem channels we allocated + * when shutting down, but it's possible the channel isn't running + * at the time we issue the HALT command. We'll get an error in + * that case, but it's harmless (the channel is already halted). + * + * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error + * if we receive it. + */ val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); - if (result != GENERIC_EE_SUCCESS_FVAL) + + switch (result) { + case GENERIC_EE_SUCCESS: + case GENERIC_EE_CHANNEL_NOT_RUNNING: + gsi->result = 0; + break; + + case GENERIC_EE_RETRY: + gsi->result = -EAGAIN; + break; + + default: dev_err(gsi->dev, "global INT1 generic result %u\n", result); + gsi->result = -EIO; + break; + } complete(&gsi->completion); } @@ -1067,15 +1145,15 @@ static void gsi_isr_glob_ee(struct gsi *gsi) val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); - if (val & ERROR_INT_FMASK) + if (val & BIT(ERROR_INT)) gsi_isr_glob_err(gsi); iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); - val &= ~ERROR_INT_FMASK; + val &= ~BIT(ERROR_INT); - if (val & GP_INT1_FMASK) { - val ^= GP_INT1_FMASK; + if (val & BIT(GP_INT1)) { + val ^= BIT(GP_INT1); gsi_isr_gp_int1(gsi); } @@ -1110,8 +1188,7 @@ static void gsi_isr_general(struct gsi *gsi) val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); - if (val) - dev_err(dev, "unexpected general interrupt 0x%08x\n", val); + dev_err(dev, "unexpected general interrupt 0x%08x\n", val); } /** @@ -1128,6 +1205,7 @@ static irqreturn_t gsi_isr(int irq, void *dev_id) u32 intr_mask; u32 cnt = 0; + /* enum gsi_irq_type_id defines GSI interrupt types */ while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { /* intr_mask contains bitmask of pending GSI interrupts */ do { @@ -1136,19 +1214,19 @@ static irqreturn_t gsi_isr(int irq, void *dev_id) intr_mask ^= gsi_intr; switch (gsi_intr) { - case CH_CTRL_FMASK: + case BIT(GSI_CH_CTRL): gsi_isr_chan_ctrl(gsi); break; - case EV_CTRL_FMASK: + case BIT(GSI_EV_CTRL): gsi_isr_evt_ctrl(gsi); break; - case GLOB_EE_FMASK: + case BIT(GSI_GLOB_EE): gsi_isr_glob_ee(gsi); break; - case IEOB_FMASK: + case BIT(GSI_IEOB): gsi_isr_ieob(gsi); break; - case GENERAL_FMASK: + case BIT(GSI_GENERAL): gsi_isr_general(gsi); break; default: @@ -1168,6 +1246,34 @@ static irqreturn_t gsi_isr(int irq, void *dev_id) return IRQ_HANDLED; } +static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + unsigned int irq; + int ret; + + ret = platform_get_irq_byname(pdev, "gsi"); + if (ret <= 0) { + dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret); + return ret ? : -EINVAL; + } + irq = ret; + + ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); + if (ret) { + dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); + return ret; + } + gsi->irq = irq; + + return 0; +} + +static void gsi_irq_exit(struct gsi *gsi) +{ + free_irq(gsi->irq, gsi); +} + /* Return the transaction associated with a transfer completion event */ static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, struct gsi_event *event) @@ -1452,8 +1558,7 @@ static void gsi_evt_ring_teardown(struct gsi *gsi) } /* Setup function for a single channel */ -static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id, - bool legacy) +static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; u32 evt_ring_id = channel->evt_ring_id; @@ -1472,7 +1577,7 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id, if (ret) goto err_evt_ring_de_alloc; - gsi_channel_program(channel, legacy); + gsi_channel_program(channel, true); if (channel->toward_ipa) netif_tx_napi_add(&gsi->dummy_dev, &channel->napi, @@ -1511,8 +1616,19 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, enum gsi_generic_cmd_opcode opcode) { struct completion *completion = &gsi->completion; + bool success; u32 val; + /* The error global interrupt type is always enabled (until we + * teardown), so we won't change that. A generic EE command + * completes with a GSI global interrupt of type GP_INT1. We + * only perform one generic command at a time (to allocate or + * halt a modem channel) and only from this function. So we + * enable the GP_INT1 IRQ type here while we're expecting it. + */ + val = BIT(ERROR_INT) | BIT(GP_INT1); + iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); + /* First zero the result code field */ val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); val &= ~GENERIC_EE_RESULT_FMASK; @@ -1523,8 +1639,13 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); - if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion)) - return 0; /* Success! */ + success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion); + + /* Disable the GP_INT1 IRQ type again */ + iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); + + if (success) + return gsi->result; dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", opcode, channel_id); @@ -1540,16 +1661,21 @@ static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id) static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id) { + u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES; int ret; - ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL); + do + ret = gsi_generic_command(gsi, channel_id, + GSI_GENERIC_HALT_CHANNEL); + while (ret == -EAGAIN && retries--); + if (ret) dev_err(gsi->dev, "error %d halting modem channel %u\n", ret, channel_id); } /* Setup function for channels */ -static int gsi_channel_setup(struct gsi *gsi, bool legacy) +static int gsi_channel_setup(struct gsi *gsi) { u32 channel_id = 0; u32 mask; @@ -1561,7 +1687,7 @@ static int gsi_channel_setup(struct gsi *gsi, bool legacy) mutex_lock(&gsi->mutex); do { - ret = gsi_channel_setup_one(gsi, channel_id, legacy); + ret = gsi_channel_setup_one(gsi, channel_id); if (ret) goto err_unwind; } while (++channel_id < gsi->channel_count); @@ -1647,10 +1773,11 @@ static void gsi_channel_teardown(struct gsi *gsi) } /* Setup function for GSI. GSI firmware must be loaded and initialized */ -int gsi_setup(struct gsi *gsi, bool legacy) +int gsi_setup(struct gsi *gsi) { struct device *dev = gsi->dev; u32 val; + int ret; /* Here is where we first touch the GSI hardware */ val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); @@ -1659,6 +1786,8 @@ int gsi_setup(struct gsi *gsi, bool legacy) return -EIO; } + gsi_irq_setup(gsi); + val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); @@ -1691,13 +1820,18 @@ int gsi_setup(struct gsi *gsi, bool legacy) /* Writing 1 indicates IRQ interrupts; 0 would be MSI */ iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); - return gsi_channel_setup(gsi, legacy); + ret = gsi_channel_setup(gsi); + if (ret) + gsi_irq_teardown(gsi); + + return ret; } /* Inverse of gsi_setup() */ void gsi_teardown(struct gsi *gsi) { gsi_channel_teardown(gsi); + gsi_irq_teardown(gsi); } /* Initialize a channel's event ring */ @@ -1745,7 +1879,7 @@ static void gsi_evt_ring_init(struct gsi *gsi) u32 evt_ring_id = 0; gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); - gsi->event_enable_bitmap = 0; + gsi->ieob_enabled_bitmap = 0; do init_completion(&gsi->evt_ring[evt_ring_id].completion); while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); @@ -1814,7 +1948,7 @@ static bool gsi_channel_data_valid(struct gsi *gsi, /* Init function for a single channel */ static int gsi_channel_init_one(struct gsi *gsi, const struct ipa_gsi_endpoint_data *data, - bool command, bool prefetch) + bool command) { struct gsi_channel *channel; u32 tre_count; @@ -1838,7 +1972,6 @@ static int gsi_channel_init_one(struct gsi *gsi, channel->gsi = gsi; channel->toward_ipa = data->toward_ipa; channel->command = command; - channel->use_prefetch = command && prefetch; channel->tlv_count = data->channel.tlv_count; channel->tre_count = tre_count; channel->event_count = data->channel.event_count; @@ -1892,13 +2025,16 @@ static void gsi_channel_exit_one(struct gsi_channel *channel) } /* Init function for channels */ -static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count, - const struct ipa_gsi_endpoint_data *data, - bool modem_alloc) +static int gsi_channel_init(struct gsi *gsi, u32 count, + const struct ipa_gsi_endpoint_data *data) { + bool modem_alloc; int ret = 0; u32 i; + /* IPA v4.2 requires the AP to allocate channels for the modem */ + modem_alloc = gsi->version == IPA_VERSION_4_2; + gsi_evt_ring_init(gsi); /* The endpoint data array is indexed by endpoint name */ @@ -1916,7 +2052,7 @@ static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count, continue; } - ret = gsi_channel_init_one(gsi, &data[i], command, prefetch); + ret = gsi_channel_init_one(gsi, &data[i], command); if (ret) goto err_unwind; } @@ -1952,19 +2088,20 @@ static void gsi_channel_exit(struct gsi *gsi) } /* Init function for GSI. GSI hardware does not need to be "ready" */ -int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, - u32 count, const struct ipa_gsi_endpoint_data *data, - bool modem_alloc) +int gsi_init(struct gsi *gsi, struct platform_device *pdev, + enum ipa_version version, u32 count, + const struct ipa_gsi_endpoint_data *data) { struct device *dev = &pdev->dev; struct resource *res; resource_size_t size; - unsigned int irq; + u32 adjust; int ret; gsi_validate_build(); gsi->dev = dev; + gsi->version = version; /* The GSI layer performs NAPI on all endpoints. NAPI requires a * network device structure, but the GSI layer does not have one, @@ -1972,55 +2109,53 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, */ init_dummy_netdev(&gsi->dummy_dev); - ret = platform_get_irq_byname(pdev, "gsi"); - if (ret <= 0) { - dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret); - return ret ? : -EINVAL; - } - irq = ret; - - ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); - if (ret) { - dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); - return ret; - } - gsi->irq = irq; - /* Get GSI memory range and map it */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); if (!res) { dev_err(dev, "DT error getting \"gsi\" memory property\n"); - ret = -ENODEV; - goto err_free_irq; + return -ENODEV; } size = resource_size(res); if (res->start > U32_MAX || size > U32_MAX - res->start) { dev_err(dev, "DT memory resource \"gsi\" out of range\n"); - ret = -EINVAL; - goto err_free_irq; + return -EINVAL; + } + + /* Make sure we can make our pointer adjustment if necessary */ + adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST; + if (res->start < adjust) { + dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n", + adjust); + return -EINVAL; } gsi->virt = ioremap(res->start, size); if (!gsi->virt) { dev_err(dev, "unable to remap \"gsi\" memory\n"); - ret = -ENOMEM; - goto err_free_irq; + return -ENOMEM; } + /* Adjust register range pointer downward for newer IPA versions */ + gsi->virt -= adjust; - ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc); + init_completion(&gsi->completion); + + ret = gsi_irq_init(gsi, pdev); if (ret) goto err_iounmap; + ret = gsi_channel_init(gsi, count, data); + if (ret) + goto err_irq_exit; + mutex_init(&gsi->mutex); - init_completion(&gsi->completion); return 0; +err_irq_exit: + gsi_irq_exit(gsi); err_iounmap: iounmap(gsi->virt); -err_free_irq: - free_irq(gsi->irq, gsi); return ret; } @@ -2030,7 +2165,7 @@ void gsi_exit(struct gsi *gsi) { mutex_destroy(&gsi->mutex); gsi_channel_exit(gsi); - free_irq(gsi->irq, gsi); + gsi_irq_exit(gsi); iounmap(gsi->virt); } |