summaryrefslogtreecommitdiff
path: root/arch/blackfin
diff options
context:
space:
mode:
authorMike Frysinger <vapier@gentoo.org>2009-06-29 22:20:10 +0400
committerMike Frysinger <vapier@gentoo.org>2009-07-16 09:52:24 +0400
commitfb4b5d3a379824d94fd71fc1aa78e9dbcb15b948 (patch)
tree104b640b09ebbc58f4eb3b67fd190bf7bf9a3912 /arch/blackfin
parent8399a74f61c69c7d233924de3dd314ca0effa16a (diff)
downloadlinux-fb4b5d3a379824d94fd71fc1aa78e9dbcb15b948.tar.xz
Blackfin: handle BF561 Core B memory regions better when SMP=n
Rather than assume Core B is always run with caches turned on, let people load into any of the on-chip memory regions. It is their business how the SRAM/Cache regions are utilized, so don't prevent them from being able to load into them. Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin')
-rw-r--r--arch/blackfin/kernel/process.c14
-rw-r--r--arch/blackfin/mach-bf561/include/mach/mem_map.h23
2 files changed, 29 insertions, 8 deletions
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 79cad0ac5892..9da36bab7ccb 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -361,7 +361,7 @@ static inline
int in_mem_const(unsigned long addr, unsigned long size,
unsigned long const_addr, unsigned long const_size)
{
- return in_mem_const_off(addr, 0, size, const_addr, const_size);
+ return in_mem_const_off(addr, size, 0, const_addr, const_size);
}
#define IN_ASYNC(bnum, bctlnum) \
({ \
@@ -390,13 +390,13 @@ int bfin_mem_access_type(unsigned long addr, unsigned long size)
if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
#ifdef COREB_L1_CODE_START
- if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
+ if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
- if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
+ if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
- if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
+ if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
#endif
if (in_mem_const(addr, size, L2_START, L2_LENGTH))
@@ -472,13 +472,13 @@ int _access_ok(unsigned long addr, unsigned long size)
if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
return 1;
#ifdef COREB_L1_CODE_START
- if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
+ if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
return 1;
if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
return 1;
- if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
+ if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
return 1;
- if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
+ if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
return 1;
#endif
if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
diff --git a/arch/blackfin/mach-bf561/include/mach/mem_map.h b/arch/blackfin/mach-bf561/include/mach/mem_map.h
index a63e15c86d90..5b96ea549a04 100644
--- a/arch/blackfin/mach-bf561/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf561/include/mach/mem_map.h
@@ -37,7 +37,6 @@
/* Memory Map for ADSP-BF561 processors */
-#ifdef CONFIG_BF561
#define COREA_L1_CODE_START 0xFFA00000
#define COREA_L1_DATA_A_START 0xFF800000
#define COREA_L1_DATA_B_START 0xFF900000
@@ -74,6 +73,28 @@
#define BFIN_DCACHESIZE (0*1024)
#define BFIN_DSUPBANKS 0
#endif /*CONFIG_BFIN_DCACHE*/
+
+/*
+ * If we are in SMP mode, then the cache settings of Core B will match
+ * the settings of Core A. If we aren't, then we assume Core B is not
+ * using any cache. This allows the rest of the kernel to work with
+ * the core in either mode as we are only loading user code into it and
+ * it is the user's problem to make sure they aren't doing something
+ * stupid there.
+ *
+ * Note that we treat the L1 code region as a contiguous blob to make
+ * the rest of the kernel simpler. Easier to check one region than a
+ * bunch of small ones. Again, possible misbehavior here is the fault
+ * of the user -- don't try to use memory that doesn't exist.
+ */
+#ifdef CONFIG_SMP
+# define COREB_L1_CODE_LENGTH L1_CODE_LENGTH
+# define COREB_L1_DATA_A_LENGTH L1_DATA_A_LENGTH
+# define COREB_L1_DATA_B_LENGTH L1_DATA_B_LENGTH
+#else
+# define COREB_L1_CODE_LENGTH 0x14000
+# define COREB_L1_DATA_A_LENGTH 0x8000
+# define COREB_L1_DATA_B_LENGTH 0x8000
#endif
/* Level 2 Memory */