diff options
Diffstat (limited to 'drivers/dma/ti')
-rw-r--r-- | drivers/dma/ti/edma.c | 11 | ||||
-rw-r--r-- | drivers/dma/ti/k3-udma-glue.c | 15 | ||||
-rw-r--r-- | drivers/dma/ti/k3-udma.c | 49 |
3 files changed, 65 insertions, 10 deletions
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 4ece125b2ae7..3ed406f08c44 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> +#include <linux/string_choices.h> #include <linux/of.h> #include <linux/of_dma.h> #include <linux/of_irq.h> @@ -2047,7 +2048,7 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels); dev_dbg(dev, "num_slots: %u\n", ecc->num_slots); dev_dbg(dev, "num_tc: %u\n", ecc->num_tc); - dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no"); + dev_dbg(dev, "chmap_exist: %s\n", str_yes_no(ecc->chmap_exist)); /* Nothing need to be done if queue priority is provided */ if (pdata->queue_priority_mapping) @@ -2258,8 +2259,12 @@ static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, return NULL; out: - /* The channel is going to be used as HW synchronized */ - echan->hw_triggered = true; + /* + * The channel is going to be HW synchronized, unless it was + * reserved as a memcpy channel + */ + echan->hw_triggered = + !edma_is_memcpy_channel(i, ecc->info->memcpy_channels); return dma_get_slave_channel(chan); } #else diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index 7c224c3ab7a0..f87d244cc2d6 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -84,6 +84,7 @@ struct k3_udma_glue_rx_channel { struct k3_udma_glue_rx_flow *flows; u32 flow_num; u32 flows_ready; + bool single_fdq; /* one FDQ for all flows */ }; static void k3_udma_chan_dev_release(struct device *dev) @@ -970,10 +971,13 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, ep_cfg = rx_chn->common.ep_config; - if (xudma_is_pktdma(rx_chn->common.udmax)) + if (xudma_is_pktdma(rx_chn->common.udmax)) { rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id; - else + rx_chn->single_fdq = false; + } else { rx_chn->udma_rchan_id = -1; + rx_chn->single_fdq = true; + } /* request and cfg UDMAP RX channel */ rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, @@ -1103,6 +1107,9 @@ k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn rx_chn->common.chan_dev.dma_coherent = true; dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev, DMA_BIT_MASK(48)); + rx_chn->single_fdq = false; + } else { + rx_chn->single_fdq = true; } ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); @@ -1453,7 +1460,7 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn); void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_num, void *data, - void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq) + void (*cleanup)(void *data, dma_addr_t desc_dma)) { struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; struct device *dev = rx_chn->common.dev; @@ -1465,7 +1472,7 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx); /* Skip RX FDQ in case one FDQ is used for the set of flows */ - if (skip_fdq) + if (rx_chn->single_fdq && flow_num) goto do_reset; /* diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 7ed1956b4642..aa2dc762140f 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -1091,8 +1091,11 @@ static void udma_check_tx_completion(struct work_struct *work) u32 residue_diff; ktime_t time_diff; unsigned long delay; + unsigned long flags; while (1) { + spin_lock_irqsave(&uc->vc.lock, flags); + if (uc->desc) { /* Get previous residue and time stamp */ residue_diff = uc->tx_drain.residue; @@ -1127,6 +1130,8 @@ static void udma_check_tx_completion(struct work_struct *work) break; } + spin_unlock_irqrestore(&uc->vc.lock, flags); + usleep_range(ktime_to_us(delay), ktime_to_us(delay) + 10); continue; @@ -1143,6 +1148,8 @@ static void udma_check_tx_completion(struct work_struct *work) break; } + + spin_unlock_irqrestore(&uc->vc.lock, flags); } static irqreturn_t udma_ring_irq_handler(int irq, void *data) @@ -4246,7 +4253,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct udma_dev *ud = ofdma->of_dma_data; - dma_cap_mask_t mask = ud->ddev.cap_mask; struct udma_filter_param filter_param; struct dma_chan *chan; @@ -4278,7 +4284,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, } } - chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, + chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param, ofdma->of_node); if (!chan) { dev_err(ud->dev, "get channel fail in %s.\n", __func__); @@ -4886,6 +4892,12 @@ static int bcdma_setup_resources(struct udma_dev *ud) irq_res.desc[i].start = rm_res->desc[i].start + oes->bcdma_bchan_ring; irq_res.desc[i].num = rm_res->desc[i].num; + + if (rm_res->desc[i].num_sec) { + irq_res.desc[i].start_sec = rm_res->desc[i].start_sec + + oes->bcdma_bchan_ring; + irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; + } } } } else { @@ -4909,6 +4921,15 @@ static int bcdma_setup_resources(struct udma_dev *ud) irq_res.desc[i + 1].start = rm_res->desc[j].start + oes->bcdma_tchan_ring; irq_res.desc[i + 1].num = rm_res->desc[j].num; + + if (rm_res->desc[j].num_sec) { + irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + + oes->bcdma_tchan_data; + irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; + irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec + + oes->bcdma_tchan_ring; + irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec; + } } } } @@ -4929,6 +4950,15 @@ static int bcdma_setup_resources(struct udma_dev *ud) irq_res.desc[i + 1].start = rm_res->desc[j].start + oes->bcdma_rchan_ring; irq_res.desc[i + 1].num = rm_res->desc[j].num; + + if (rm_res->desc[j].num_sec) { + irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + + oes->bcdma_rchan_data; + irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; + irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec + + oes->bcdma_rchan_ring; + irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec; + } } } } @@ -5063,6 +5093,12 @@ static int pktdma_setup_resources(struct udma_dev *ud) irq_res.desc[i].start = rm_res->desc[i].start + oes->pktdma_tchan_flow; irq_res.desc[i].num = rm_res->desc[i].num; + + if (rm_res->desc[i].num_sec) { + irq_res.desc[i].start_sec = rm_res->desc[i].start_sec + + oes->pktdma_tchan_flow; + irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; + } } } rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; @@ -5074,6 +5110,12 @@ static int pktdma_setup_resources(struct udma_dev *ud) irq_res.desc[i].start = rm_res->desc[j].start + oes->pktdma_rchan_flow; irq_res.desc[i].num = rm_res->desc[j].num; + + if (rm_res->desc[j].num_sec) { + irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + + oes->pktdma_rchan_flow; + irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; + } } } ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); @@ -5582,7 +5624,8 @@ static int udma_probe(struct platform_device *pdev) uc->config.dir = DMA_MEM_TO_MEM; uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", dev_name(dev), i); - + if (!uc->name) + return -ENOMEM; vchan_init(&uc->vc, &ud->ddev); /* Use custom vchan completion handling */ tasklet_setup(&uc->vc.task, udma_vchan_complete); |