1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
|
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
// Copyright(c) 2021 Advanced Micro Devices, Inc.
//
// Authors: Ajit Kumar Pandey <AjitKumar.Pandey@amd.com>
/*
* Hardware interface for ACP DSP Firmware binaries loader
*/
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "../ops.h"
#include "acp-dsp-offset.h"
#include "acp.h"
#define FW_BIN 0
#define FW_DATA_BIN 1
#define FW_BIN_PTE_OFFSET 0x00
#define FW_DATA_BIN_PTE_OFFSET 0x08
#define ACP_DSP_RUN 0x00
int acp_dsp_block_read(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
u32 offset, void *dest, size_t size)
{
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
switch (blk_type) {
case SOF_FW_BLK_TYPE_SRAM:
offset = offset - desc->sram_pte_offset;
memcpy_from_scratch(sdev, offset, dest, size);
break;
default:
dev_err(sdev->dev, "bad blk type 0x%x\n", blk_type);
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_NS(acp_dsp_block_read, SND_SOC_SOF_AMD_COMMON);
int acp_dsp_block_write(struct snd_sof_dev *sdev, enum snd_sof_fw_blk_type blk_type,
u32 offset, void *src, size_t size)
{
struct pci_dev *pci = to_pci_dev(sdev->dev);
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
struct acp_dev_data *adata;
void *dest;
u32 dma_size, page_count;
unsigned int size_fw;
adata = sdev->pdata->hw_pdata;
switch (blk_type) {
case SOF_FW_BLK_TYPE_IRAM:
if (!adata->bin_buf) {
size_fw = sdev->basefw.fw->size;
page_count = PAGE_ALIGN(size_fw) >> PAGE_SHIFT;
dma_size = page_count * ACP_PAGE_SIZE;
adata->bin_buf = dma_alloc_coherent(&pci->dev, dma_size,
&adata->sha_dma_addr,
GFP_ATOMIC);
if (!adata->bin_buf)
return -ENOMEM;
}
adata->fw_bin_size = size + offset;
dest = adata->bin_buf + offset;
break;
case SOF_FW_BLK_TYPE_DRAM:
if (!adata->data_buf) {
adata->data_buf = dma_alloc_coherent(&pci->dev,
ACP_DEFAULT_DRAM_LENGTH,
&adata->dma_addr,
GFP_ATOMIC);
if (!adata->data_buf)
return -ENOMEM;
}
dest = adata->data_buf + offset;
adata->fw_data_bin_size = size + offset;
break;
case SOF_FW_BLK_TYPE_SRAM:
offset = offset - desc->sram_pte_offset;
memcpy_to_scratch(sdev, offset, src, size);
return 0;
default:
dev_err(sdev->dev, "bad blk type 0x%x\n", blk_type);
return -EINVAL;
}
memcpy(dest, src, size);
return 0;
}
EXPORT_SYMBOL_NS(acp_dsp_block_write, SND_SOC_SOF_AMD_COMMON);
int acp_get_bar_index(struct snd_sof_dev *sdev, u32 type)
{
return type;
}
EXPORT_SYMBOL_NS(acp_get_bar_index, SND_SOC_SOF_AMD_COMMON);
static void configure_pte_for_fw_loading(int type, int num_pages, struct acp_dev_data *adata)
{
struct snd_sof_dev *sdev = adata->dev;
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
unsigned int low, high;
dma_addr_t addr;
u16 page_idx;
u32 offset;
switch (type) {
case FW_BIN:
offset = FW_BIN_PTE_OFFSET;
addr = adata->sha_dma_addr;
break;
case FW_DATA_BIN:
offset = adata->fw_bin_page_count * 8;
addr = adata->dma_addr;
break;
default:
dev_err(sdev->dev, "Invalid data type %x\n", type);
return;
}
/* Group Enable */
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_BASE_ADDR_GRP_1,
desc->sram_pte_offset | BIT(31));
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_PAGE_SIZE_GRP_1,
PAGE_SIZE_4K_ENABLE);
for (page_idx = 0; page_idx < num_pages; page_idx++) {
low = lower_32_bits(addr);
high = upper_32_bits(addr);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + offset, low);
high |= BIT(31);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_SCRATCH_REG_0 + offset + 4, high);
offset += 8;
addr += PAGE_SIZE;
}
/* Flush ATU Cache after PTE Update */
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACPAXI2AXI_ATU_CTRL, ACP_ATU_CACHE_INVALID);
}
/* pre fw run operations */
int acp_dsp_pre_fw_run(struct snd_sof_dev *sdev)
{
struct pci_dev *pci = to_pci_dev(sdev->dev);
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
struct acp_dev_data *adata;
unsigned int src_addr, size_fw;
u32 page_count, dma_size;
int ret;
adata = sdev->pdata->hw_pdata;
size_fw = adata->fw_bin_size;
page_count = PAGE_ALIGN(size_fw) >> PAGE_SHIFT;
adata->fw_bin_page_count = page_count;
configure_pte_for_fw_loading(FW_BIN, page_count, adata);
ret = configure_and_run_sha_dma(adata, adata->bin_buf, ACP_SYSTEM_MEMORY_WINDOW,
ACP_IRAM_BASE_ADDRESS, size_fw);
if (ret < 0) {
dev_err(sdev->dev, "SHA DMA transfer failed status: %d\n", ret);
return ret;
}
configure_pte_for_fw_loading(FW_DATA_BIN, ACP_DRAM_PAGE_COUNT, adata);
src_addr = ACP_SYSTEM_MEMORY_WINDOW + page_count * ACP_PAGE_SIZE;
ret = configure_and_run_dma(adata, src_addr, ACP_DATA_RAM_BASE_ADDRESS,
adata->fw_data_bin_size);
if (ret < 0) {
dev_err(sdev->dev, "acp dma configuration failed: %d\n", ret);
return ret;
}
ret = acp_dma_status(adata, 0);
if (ret < 0)
dev_err(sdev->dev, "acp dma transfer status: %d\n", ret);
if (desc->rev > 3) {
/* Cache Window enable */
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP0_CACHE_OFFSET0, desc->sram_pte_offset);
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP0_CACHE_SIZE0, SRAM1_SIZE | BIT(31));
}
/* Free memory once DMA is complete */
dma_size = (PAGE_ALIGN(sdev->basefw.fw->size) >> PAGE_SHIFT) * ACP_PAGE_SIZE;
dma_free_coherent(&pci->dev, dma_size, adata->bin_buf, adata->sha_dma_addr);
dma_free_coherent(&pci->dev, ACP_DEFAULT_DRAM_LENGTH, adata->data_buf, adata->dma_addr);
adata->bin_buf = NULL;
adata->data_buf = NULL;
return ret;
}
EXPORT_SYMBOL_NS(acp_dsp_pre_fw_run, SND_SOC_SOF_AMD_COMMON);
int acp_sof_dsp_run(struct snd_sof_dev *sdev)
{
const struct sof_amd_acp_desc *desc = get_chip_info(sdev->pdata);
int val;
snd_sof_dsp_write(sdev, ACP_DSP_BAR, ACP_DSP0_RUNSTALL, ACP_DSP_RUN);
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, ACP_DSP0_RUNSTALL);
dev_dbg(sdev->dev, "ACP_DSP0_RUNSTALL : 0x%0x\n", val);
/* Some platforms won't support fusion DSP,keep offset zero for no support */
if (desc->fusion_dsp_offset) {
snd_sof_dsp_write(sdev, ACP_DSP_BAR, desc->fusion_dsp_offset, ACP_DSP_RUN);
val = snd_sof_dsp_read(sdev, ACP_DSP_BAR, desc->fusion_dsp_offset);
dev_dbg(sdev->dev, "ACP_DSP0_FUSION_RUNSTALL : 0x%0x\n", val);
}
return 0;
}
EXPORT_SYMBOL_NS(acp_sof_dsp_run, SND_SOC_SOF_AMD_COMMON);
|