summaryrefslogtreecommitdiff
path: root/drivers/dma/ti/k3-udma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ti/k3-udma.c')
-rw-r--r--drivers/dma/ti/k3-udma.c64
1 files changed, 39 insertions, 25 deletions
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index d86dba0fd8e6..82cf6c77f5c9 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/of_device.h>
@@ -91,6 +92,9 @@ struct udma_match_data {
bool enable_memcpy_support;
u32 flags;
u32 statictr_z_mask;
+};
+
+struct udma_soc_data {
u32 rchan_oes_offset;
};
@@ -117,6 +121,7 @@ struct udma_dev {
struct device *dev;
void __iomem *mmrs[MMR_LAST];
const struct udma_match_data *match_data;
+ const struct udma_soc_data *soc_data;
u8 tpl_levels;
u32 tpl_start_idx[3];
@@ -1679,7 +1684,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
{
struct udma_chan *uc = to_udma_chan(chan);
struct udma_dev *ud = to_udma_dev(chan->device);
- const struct udma_match_data *match_data = ud->match_data;
+ const struct udma_soc_data *soc_data = ud->soc_data;
struct k3_ring *irq_ring;
u32 irq_udma_idx;
int ret;
@@ -1779,7 +1784,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
K3_PSIL_DST_THREAD_ID_OFFSET;
irq_ring = uc->rflow->r_ring;
- irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id;
+ irq_udma_idx = soc_data->rchan_oes_offset + uc->rchan->id;
ret = udma_tisci_rx_channel_config(uc);
break;
@@ -2024,11 +2029,6 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
int num_tr = 0;
int tr_idx = 0;
- if (!is_slave_direction(dir)) {
- dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
- return NULL;
- }
-
/* estimate the number of TRs we will need */
for_each_sg(sgl, sgent, sglen, i) {
if (sg_dma_len(sgent) < SZ_64K)
@@ -2400,11 +2400,6 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
unsigned int i;
int num_tr;
- if (!is_slave_direction(dir)) {
- dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
- return NULL;
- }
-
num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
&tr0_cnt1, &tr1_cnt0);
if (num_tr < 0) {
@@ -2914,9 +2909,9 @@ static void udma_desc_pre_callback(struct virt_dma_chan *vc,
* This tasklet handles the completion of a DMA descriptor by
* calling its callback and freeing it.
*/
-static void udma_vchan_complete(unsigned long arg)
+static void udma_vchan_complete(struct tasklet_struct *t)
{
- struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
+ struct virt_dma_chan *vc = from_tasklet(vc, t, task);
struct virt_dma_desc *vd, *_vd;
struct dmaengine_desc_callback cb;
LIST_HEAD(head);
@@ -3101,14 +3096,12 @@ static struct udma_match_data am654_main_data = {
.psil_base = 0x1000,
.enable_memcpy_support = true,
.statictr_z_mask = GENMASK(11, 0),
- .rchan_oes_offset = 0x200,
};
static struct udma_match_data am654_mcu_data = {
.psil_base = 0x6000,
.enable_memcpy_support = false,
.statictr_z_mask = GENMASK(11, 0),
- .rchan_oes_offset = 0x200,
};
static struct udma_match_data j721e_main_data = {
@@ -3116,7 +3109,6 @@ static struct udma_match_data j721e_main_data = {
.enable_memcpy_support = true,
.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
.statictr_z_mask = GENMASK(23, 0),
- .rchan_oes_offset = 0x400,
};
static struct udma_match_data j721e_mcu_data = {
@@ -3124,7 +3116,6 @@ static struct udma_match_data j721e_mcu_data = {
.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
.flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
.statictr_z_mask = GENMASK(23, 0),
- .rchan_oes_offset = 0x400,
};
static const struct of_device_id udma_of_match[] = {
@@ -3145,15 +3136,31 @@ static const struct of_device_id udma_of_match[] = {
{ /* Sentinel */ },
};
+static struct udma_soc_data am654_soc_data = {
+ .rchan_oes_offset = 0x200,
+};
+
+static struct udma_soc_data j721e_soc_data = {
+ .rchan_oes_offset = 0x400,
+};
+
+static struct udma_soc_data j7200_soc_data = {
+ .rchan_oes_offset = 0x80,
+};
+
+static const struct soc_device_attribute k3_soc_devices[] = {
+ { .family = "AM65X", .data = &am654_soc_data },
+ { .family = "J721E", .data = &j721e_soc_data },
+ { .family = "J7200", .data = &j7200_soc_data },
+ { /* sentinel */ }
+};
+
static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
{
- struct resource *res;
int i;
for (i = 0; i < MMR_LAST; i++) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- mmr_names[i]);
- ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res);
+ ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
if (IS_ERR(ud->mmrs[i]))
return PTR_ERR(ud->mmrs[i]);
}
@@ -3287,7 +3294,7 @@ static int udma_setup_resources(struct udma_dev *ud)
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
for (j = 0; j < rm_res->sets; j++, i++) {
irq_res.desc[i].start = rm_res->desc[j].start +
- ud->match_data->rchan_oes_offset;
+ ud->soc_data->rchan_oes_offset;
irq_res.desc[i].num = rm_res->desc[j].num;
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
@@ -3497,6 +3504,7 @@ static void udma_dbg_summary_show(struct seq_file *s,
static int udma_probe(struct platform_device *pdev)
{
struct device_node *navss_node = pdev->dev.parent->of_node;
+ const struct soc_device_attribute *soc;
struct device *dev = &pdev->dev;
struct udma_dev *ud;
const struct of_device_id *match;
@@ -3561,6 +3569,13 @@ static int udma_probe(struct platform_device *pdev)
}
ud->match_data = match->data;
+ soc = soc_device_match(k3_soc_devices);
+ if (!soc) {
+ dev_err(dev, "No compatible SoC found\n");
+ return -ENODEV;
+ }
+ ud->soc_data = soc->data;
+
dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
@@ -3649,8 +3664,7 @@ static int udma_probe(struct platform_device *pdev)
vchan_init(&uc->vc, &ud->ddev);
/* Use custom vchan completion handling */
- tasklet_init(&uc->vc.task, udma_vchan_complete,
- (unsigned long)&uc->vc);
+ tasklet_setup(&uc->vc.task, udma_vchan_complete);
init_completion(&uc->teardown_completed);
INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
}