diff options
author | Vladimir Murzin <vladimir.murzin@arm.com> | 2018-11-23 14:25:21 +0300 |
---|---|---|
committer | Russell King <rmk+kernel@armlinux.org.uk> | 2018-12-05 01:38:33 +0300 |
commit | 3d0358d0ba048c5afb1385787aaec8fa5ad78fcc (patch) | |
tree | 705264c7e2cbe848b867bbf2a0eb0cb7034ecacb /arch | |
parent | a1208f6a822ac29933e772ef1f637c5d67838da9 (diff) | |
download | linux-3d0358d0ba048c5afb1385787aaec8fa5ad78fcc.tar.xz |
ARM: 8815/1: V7M: align v7m_dma_inv_range() with v7 counterpart
Chris has discovered and reported that v7_dma_inv_range() may corrupt
memory if address range is not aligned to cache line size.
Since the whole cache-v7m.S was lifted form cache-v7.S the same
observation applies to v7m_dma_inv_range(). So the fix just mirrors
what has been done for v7 with a little specific of M-class.
Cc: Chris Cole <chris@sageembedded.com>
Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/mm/cache-v7m.S | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S index 788486e830d3..32aa2a2aa260 100644 --- a/arch/arm/mm/cache-v7m.S +++ b/arch/arm/mm/cache-v7m.S @@ -73,9 +73,11 @@ /* * dcimvac: Invalidate data cache line by MVA to PoC */ -.macro dcimvac, rt, tmp - v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC +.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo +.macro dcimvac\c, rt, tmp + v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c .endm +.endr /* * dccmvau: Clean data cache line by MVA to PoU @@ -369,14 +371,16 @@ v7m_dma_inv_range: tst r0, r3 bic r0, r0, r3 dccimvacne r0, r3 + addne r0, r0, r2 subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac tst r1, r3 bic r1, r1, r3 dccimvacne r1, r3 -1: - dcimvac r0, r3 - add r0, r0, r2 cmp r0, r1 +1: + dcimvaclo r0, r3 + addlo r0, r0, r2 + cmplo r0, r1 blo 1b dsb st ret lr |