1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/cache.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include <linux/dma-noncoherent.h>
#include <linux/genalloc.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
#include <linux/version.h>
#include <asm/cache.h>
static int __init atomic_pool_init(void)
{
return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
}
postcore_initcall(atomic_pool_init);
void arch_dma_prep_coherent(struct page *page, size_t size)
{
if (PageHighMem(page)) {
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
do {
void *ptr = kmap_atomic(page);
size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE;
memset(ptr, 0, _size);
dma_wbinv_range((unsigned long)ptr,
(unsigned long)ptr + _size);
kunmap_atomic(ptr);
page++;
size -= PAGE_SIZE;
count--;
} while (count);
} else {
void *ptr = page_address(page);
memset(ptr, 0, size);
dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size);
}
}
static inline void cache_op(phys_addr_t paddr, size_t size,
void (*fn)(unsigned long start, unsigned long end))
{
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
unsigned int offset = paddr & ~PAGE_MASK;
size_t left = size;
unsigned long start;
do {
size_t len = left;
if (PageHighMem(page)) {
void *addr;
if (offset + len > PAGE_SIZE) {
if (offset >= PAGE_SIZE) {
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
}
len = PAGE_SIZE - offset;
}
addr = kmap_atomic(page);
start = (unsigned long)(addr + offset);
fn(start, start + len);
kunmap_atomic(addr);
} else {
start = (unsigned long)phys_to_virt(paddr);
fn(start, start + size);
}
offset = 0;
page++;
left -= len;
} while (left);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
cache_op(paddr, size, dma_wb_range);
break;
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
cache_op(paddr, size, dma_wbinv_range);
break;
default:
BUG();
}
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
cache_op(paddr, size, dma_wb_range);
break;
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
cache_op(paddr, size, dma_wbinv_range);
break;
default:
BUG();
}
}
|