summaryrefslogtreecommitdiff
path: root/arch/mips/cavium-octeon
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/cavium-octeon')
-rw-r--r--arch/mips/cavium-octeon/Kconfig85
-rw-r--r--arch/mips/cavium-octeon/Makefile16
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c58
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c32
-rw-r--r--arch/mips/cavium-octeon/executive/Makefile13
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c586
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c734
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-sysinfo.c116
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c358
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c84
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c497
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S521
-rw-r--r--arch/mips/cavium-octeon/serial.c136
-rw-r--r--arch/mips/cavium-octeon/setup.c929
-rw-r--r--arch/mips/cavium-octeon/smp.c211
15 files changed, 4376 insertions, 0 deletions
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
new file mode 100644
index 000000000000..094c17e38e16
--- /dev/null
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -0,0 +1,85 @@
+config CAVIUM_OCTEON_SPECIFIC_OPTIONS
+ bool "Enable Octeon specific options"
+ depends on CPU_CAVIUM_OCTEON
+ default "y"
+
+config CAVIUM_OCTEON_2ND_KERNEL
+ bool "Build the kernel to be used as a 2nd kernel on the same chip"
+ depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+ default "n"
+ help
+ This option configures this kernel to be linked at a different
+ address and use the 2nd uart for output. This allows a kernel built
+ with this option to be run at the same time as one built without this
+ option.
+
+config CAVIUM_OCTEON_HW_FIX_UNALIGNED
+ bool "Enable hardware fixups of unaligned loads and stores"
+ depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+ default "y"
+ help
+ Configure the Octeon hardware to automatically fix unaligned loads
+ and stores. Normally unaligned accesses are fixed using a kernel
+ exception handler. This option enables the hardware automatic fixups,
+ which requires only an extra 3 cycles. Disable this option if you
+ are running code that relies on address exceptions on unaligned
+ accesses.
+
+config CAVIUM_OCTEON_CVMSEG_SIZE
+ int "Number of L1 cache lines reserved for CVMSEG memory"
+ depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+ range 0 54
+ default 1
+ help
+ CVMSEG LM is a segment that accesses portions of the dcache as a
+ local memory; the larger CVMSEG is, the smaller the cache is.
+ This selects the size of CVMSEG LM, which is in cache blocks. The
+ legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
+ between zero and 6192 bytes).
+
+config CAVIUM_OCTEON_LOCK_L2
+ bool "Lock often used kernel code in the L2"
+ depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+ default "y"
+ help
+ Enable locking parts of the kernel into the L2 cache.
+
+config CAVIUM_OCTEON_LOCK_L2_TLB
+ bool "Lock the TLB handler in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the low level TLB fast path into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_EXCEPTION
+ bool "Lock the exception handler in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the low level exception handler into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
+ bool "Lock the interrupt handler in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the low level interrupt handler into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_INTERRUPT
+ bool "Lock the 2nd level interrupt handler in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the 2nd level interrupt handler in L2.
+
+config CAVIUM_OCTEON_LOCK_L2_MEMCPY
+ bool "Lock memcpy() in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the kernel's implementation of memcpy() into L2.
+
+config ARCH_SPARSEMEM_ENABLE
+ def_bool y
+ select SPARSEMEM_STATIC
+ depends on CPU_CAVIUM_OCTEON
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
new file mode 100644
index 000000000000..1c2a7faf5881
--- /dev/null
+++ b/arch/mips/cavium-octeon/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for the Cavium Octeon specific kernel interface routines
+# under Linux.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2005-2008 Cavium Networks
+#
+
+obj-y := setup.o serial.o octeon-irq.o csrc-octeon.o
+obj-y += dma-octeon.o flash_setup.o
+obj-y += octeon-memcpy.o
+
+obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
new file mode 100644
index 000000000000..70fd92c31657
--- /dev/null
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -0,0 +1,58 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-ipd-defs.h>
+
+/*
+ * Set the current core's cvmcount counter to the value of the
+ * IPD_CLK_COUNT. We do this on all cores as they are brought
+ * on-line. This allows for a read from a local cpu register to
+ * access a synchronized counter.
+ *
+ */
+void octeon_init_cvmcount(void)
+{
+ unsigned long flags;
+ unsigned loops = 2;
+
+ /* Clobber loops so GCC will not unroll the following while loop. */
+ asm("" : "+r" (loops));
+
+ local_irq_save(flags);
+ /*
+ * Loop several times so we are executing from the cache,
+ * which should give more deterministic timing.
+ */
+ while (loops--)
+ write_c0_cvmcount(cvmx_read_csr(CVMX_IPD_CLK_COUNT));
+ local_irq_restore(flags);
+}
+
+static cycle_t octeon_cvmcount_read(void)
+{
+ return read_c0_cvmcount();
+}
+
+static struct clocksource clocksource_mips = {
+ .name = "OCTEON_CVMCOUNT",
+ .read = octeon_cvmcount_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init plat_time_init(void)
+{
+ clocksource_mips.rating = 300;
+ clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
+ clocksource_register(&clocksource_mips);
+}
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
new file mode 100644
index 000000000000..01b1ef94b361
--- /dev/null
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -0,0 +1,32 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
+ * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
+ * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
+ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
+ * IP32 changes by Ilya.
+ * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on
+ * the kernels original.
+ */
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <dma-coherence.h>
+
+dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size)
+{
+ /* Without PCI/PCIe this function can be called for Octeon internal
+ devices such as USB. These devices all support 64bit addressing */
+ mb();
+ return virt_to_phys(ptr);
+}
+
+void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
+{
+ /* Without PCI/PCIe this function can be called for Octeon internal
+ * devices such as USB. These devices all support 64bit addressing */
+ return;
+}
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile
new file mode 100644
index 000000000000..80d6cb26766b
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the Cavium Octeon specific kernel interface routines
+# under Linux.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2005-2008 Cavium Networks
+#
+
+obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
+
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
new file mode 100644
index 000000000000..4f5a08b37ccd
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -0,0 +1,586 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Simple allocate only memory allocator. Used to allocate memory at
+ * application start time.
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#include <asm/octeon/cvmx-bootmem.h>
+
+/*#define DEBUG */
+
+
+static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
+
+/* See header file for descriptions of functions */
+
+/*
+ * Wrapper functions are provided for reading/writing the size and
+ * next block values as these may not be directly addressible (in 32
+ * bit applications, for instance.) Offsets of data elements in
+ * bootmem list, must match cvmx_bootmem_block_header_t.
+ */
+#define NEXT_OFFSET 0
+#define SIZE_OFFSET 8
+
+static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size)
+{
+ cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
+}
+
+static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next)
+{
+ cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
+}
+
+static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr)
+{
+ return cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63));
+}
+
+static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
+{
+ return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63));
+}
+
+void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
+ uint64_t min_addr, uint64_t max_addr)
+{
+ int64_t address;
+ address =
+ cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);
+
+ if (address > 0)
+ return cvmx_phys_to_ptr(address);
+ else
+ return NULL;
+}
+
+void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
+ uint64_t alignment)
+{
+ return cvmx_bootmem_alloc_range(size, alignment, address,
+ address + size);
+}
+
+void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
+{
+ return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
+}
+
+int cvmx_bootmem_free_named(char *name)
+{
+ return cvmx_bootmem_phy_named_block_free(name, 0);
+}
+
+struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name)
+{
+ return cvmx_bootmem_phy_named_block_find(name, 0);
+}
+
+void cvmx_bootmem_lock(void)
+{
+ cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
+}
+
+void cvmx_bootmem_unlock(void)
+{
+ cvmx_spinlock_unlock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
+}
+
+int cvmx_bootmem_init(void *mem_desc_ptr)
+{
+ /* Here we set the global pointer to the bootmem descriptor
+ * block. This pointer will be used directly, so we will set
+ * it up to be directly usable by the application. It is set
+ * up as follows for the various runtime/ABI combinations:
+ *
+ * Linux 64 bit: Set XKPHYS bit
+ * Linux 32 bit: use mmap to create mapping, use virtual address
+ * CVMX 64 bit: use physical address directly
+ * CVMX 32 bit: use physical address directly
+ *
+ * Note that the CVMX environment assumes the use of 1-1 TLB
+ * mappings so that the physical addresses can be used
+ * directly
+ */
+ if (!cvmx_bootmem_desc) {
+#if defined(CVMX_ABI_64)
+ /* Set XKPHYS bit */
+ cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
+#else
+ cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
+#endif
+ }
+
+ return 0;
+}
+
+/*
+ * The cvmx_bootmem_phy* functions below return 64 bit physical
+ * addresses, and expose more features that the cvmx_bootmem_functions
+ * above. These are required for full memory space access in 32 bit
+ * applications, as well as for using some advance features. Most
+ * applications should not need to use these.
+ */
+
+int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
+ uint64_t address_max, uint64_t alignment,
+ uint32_t flags)
+{
+
+ uint64_t head_addr;
+ uint64_t ent_addr;
+ /* points to previous list entry, NULL current entry is head of list */
+ uint64_t prev_addr = 0;
+ uint64_t new_ent_addr = 0;
+ uint64_t desired_min_addr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "
+ "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
+ (unsigned long long)req_size,
+ (unsigned long long)address_min,
+ (unsigned long long)address_max,
+ (unsigned long long)alignment);
+#endif
+
+ if (cvmx_bootmem_desc->major_version > 3) {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+ "version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ goto error_out;
+ }
+
+ /*
+ * Do a variety of checks to validate the arguments. The
+ * allocator code will later assume that these checks have
+ * been made. We validate that the requested constraints are
+ * not self-contradictory before we look through the list of
+ * available memory.
+ */
+
+ /* 0 is not a valid req_size for this allocator */
+ if (!req_size)
+ goto error_out;
+
+ /* Round req_size up to mult of minimum alignment bytes */
+ req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
+ ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
+
+ /*
+ * Convert !0 address_min and 0 address_max to special case of
+ * range that specifies an exact memory block to allocate. Do
+ * this before other checks and adjustments so that this
+ * tranformation will be validated.
+ */
+ if (address_min && !address_max)
+ address_max = address_min + req_size;
+ else if (!address_min && !address_max)
+ address_max = ~0ull; /* If no limits given, use max limits */
+
+
+ /*
+ * Enforce minimum alignment (this also keeps the minimum free block
+ * req_size the same as the alignment req_size.
+ */
+ if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
+ alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;
+
+ /*
+ * Adjust address minimum based on requested alignment (round
+ * up to meet alignment). Do this here so we can reject
+ * impossible requests up front. (NOP for address_min == 0)
+ */
+ if (alignment)
+ address_min = __ALIGN_MASK(address_min, (alignment - 1));
+
+ /*
+ * Reject inconsistent args. We have adjusted these, so this
+ * may fail due to our internal changes even if this check
+ * would pass for the values the user supplied.
+ */
+ if (req_size > address_max - address_min)
+ goto error_out;
+
+ /* Walk through the list entries - first fit found is returned */
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_lock();
+ head_addr = cvmx_bootmem_desc->head_addr;
+ ent_addr = head_addr;
+ for (; ent_addr;
+ prev_addr = ent_addr,
+ ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
+ uint64_t usable_base, usable_max;
+ uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);
+
+ if (cvmx_bootmem_phy_get_next(ent_addr)
+ && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {
+ cvmx_dprintf("Internal bootmem_alloc() error: ent: "
+ "0x%llx, next: 0x%llx\n",
+ (unsigned long long)ent_addr,
+ (unsigned long long)
+ cvmx_bootmem_phy_get_next(ent_addr));
+ goto error_out;
+ }
+
+ /*
+ * Determine if this is an entry that can satisify the
+ * request Check to make sure entry is large enough to
+ * satisfy request.
+ */
+ usable_base =
+ __ALIGN_MASK(max(address_min, ent_addr), alignment - 1);
+ usable_max = min(address_max, ent_addr + ent_size);
+ /*
+ * We should be able to allocate block at address
+ * usable_base.
+ */
+
+ desired_min_addr = usable_base;
+ /*
+ * Determine if request can be satisfied from the
+ * current entry.
+ */
+ if (!((ent_addr + ent_size) > usable_base
+ && ent_addr < address_max
+ && req_size <= usable_max - usable_base))
+ continue;
+ /*
+ * We have found an entry that has room to satisfy the
+ * request, so allocate it from this entry. If end
+ * CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from
+ * the end of this block rather than the beginning.
+ */
+ if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) {
+ desired_min_addr = usable_max - req_size;
+ /*
+ * Align desired address down to required
+ * alignment.
+ */
+ desired_min_addr &= ~(alignment - 1);
+ }
+
+ /* Match at start of entry */
+ if (desired_min_addr == ent_addr) {
+ if (req_size < ent_size) {
+ /*
+ * big enough to create a new block
+ * from top portion of block.
+ */
+ new_ent_addr = ent_addr + req_size;
+ cvmx_bootmem_phy_set_next(new_ent_addr,
+ cvmx_bootmem_phy_get_next(ent_addr));
+ cvmx_bootmem_phy_set_size(new_ent_addr,
+ ent_size -
+ req_size);
+
+ /*
+ * Adjust next pointer as following
+ * code uses this.
+ */
+ cvmx_bootmem_phy_set_next(ent_addr,
+ new_ent_addr);
+ }
+
+ /*
+ * adjust prev ptr or head to remove this
+ * entry from list.
+ */
+ if (prev_addr)
+ cvmx_bootmem_phy_set_next(prev_addr,
+ cvmx_bootmem_phy_get_next(ent_addr));
+ else
+ /*
+ * head of list being returned, so
+ * update head ptr.
+ */
+ cvmx_bootmem_desc->head_addr =
+ cvmx_bootmem_phy_get_next(ent_addr);
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+ return desired_min_addr;
+ }
+ /*
+ * block returned doesn't start at beginning of entry,
+ * so we know that we will be splitting a block off
+ * the front of this one. Create a new block from the
+ * beginning, add to list, and go to top of loop
+ * again.
+ *
+ * create new block from high portion of
+ * block, so that top block starts at desired
+ * addr.
+ */
+ new_ent_addr = desired_min_addr;
+ cvmx_bootmem_phy_set_next(new_ent_addr,
+ cvmx_bootmem_phy_get_next
+ (ent_addr));
+ cvmx_bootmem_phy_set_size(new_ent_addr,
+ cvmx_bootmem_phy_get_size
+ (ent_addr) -
+ (desired_min_addr -
+ ent_addr));
+ cvmx_bootmem_phy_set_size(ent_addr,
+ desired_min_addr - ent_addr);
+ cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
+ /* Loop again to handle actual alloc from new block */
+ }
+error_out:
+ /* We didn't find anything, so return error */
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+ return -1;
+}
+
+int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
+{
+ uint64_t cur_addr;
+ uint64_t prev_addr = 0; /* zero is invalid */
+ int retval = 0;
+
+#ifdef DEBUG
+ cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n",
+ (unsigned long long)phy_addr, (unsigned long long)size);
+#endif
+ if (cvmx_bootmem_desc->major_version > 3) {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+ "version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ return 0;
+ }
+
+ /* 0 is not a valid size for this allocator */
+ if (!size)
+ return 0;
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_lock();
+ cur_addr = cvmx_bootmem_desc->head_addr;
+ if (cur_addr == 0 || phy_addr < cur_addr) {
+ /* add at front of list - special case with changing head ptr */
+ if (cur_addr && phy_addr + size > cur_addr)
+ goto bootmem_free_done; /* error, overlapping section */
+ else if (phy_addr + size == cur_addr) {
+ /* Add to front of existing first block */
+ cvmx_bootmem_phy_set_next(phy_addr,
+ cvmx_bootmem_phy_get_next
+ (cur_addr));
+ cvmx_bootmem_phy_set_size(phy_addr,
+ cvmx_bootmem_phy_get_size
+ (cur_addr) + size);
+ cvmx_bootmem_desc->head_addr = phy_addr;
+
+ } else {
+ /* New block before first block. OK if cur_addr is 0 */
+ cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_desc->head_addr = phy_addr;
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ }
+
+ /* Find place in list to add block */
+ while (cur_addr && phy_addr > cur_addr) {
+ prev_addr = cur_addr;
+ cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
+ }
+
+ if (!cur_addr) {
+ /*
+ * We have reached the end of the list, add on to end,
+ * checking to see if we need to combine with last
+ * block
+ */
+ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
+ phy_addr) {
+ cvmx_bootmem_phy_set_size(prev_addr,
+ cvmx_bootmem_phy_get_size
+ (prev_addr) + size);
+ } else {
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_phy_set_next(phy_addr, 0);
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ } else {
+ /*
+ * insert between prev and cur nodes, checking for
+ * merge with either/both.
+ */
+ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
+ phy_addr) {
+ /* Merge with previous */
+ cvmx_bootmem_phy_set_size(prev_addr,
+ cvmx_bootmem_phy_get_size
+ (prev_addr) + size);
+ if (phy_addr + size == cur_addr) {
+ /* Also merge with current */
+ cvmx_bootmem_phy_set_size(prev_addr,
+ cvmx_bootmem_phy_get_size(cur_addr) +
+ cvmx_bootmem_phy_get_size(prev_addr));
+ cvmx_bootmem_phy_set_next(prev_addr,
+ cvmx_bootmem_phy_get_next(cur_addr));
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ } else if (phy_addr + size == cur_addr) {
+ /* Merge with current */
+ cvmx_bootmem_phy_set_size(phy_addr,
+ cvmx_bootmem_phy_get_size
+ (cur_addr) + size);
+ cvmx_bootmem_phy_set_next(phy_addr,
+ cvmx_bootmem_phy_get_next
+ (cur_addr));
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ retval = 1;
+ goto bootmem_free_done;
+ }
+
+ /* It is a standalone block, add in between prev and cur */
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+
+ }
+ retval = 1;
+
+bootmem_free_done:
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+ return retval;
+
+}
+
+struct cvmx_bootmem_named_block_desc *
+ cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
+{
+ unsigned int i;
+ struct cvmx_bootmem_named_block_desc *named_block_array_ptr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
+#endif
+ /*
+ * Lock the structure to make sure that it is not being
+ * changed while we are examining it.
+ */
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_lock();
+
+ /* Use XKPHYS for 64 bit linux */
+ named_block_array_ptr = (struct cvmx_bootmem_named_block_desc *)
+ cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
+
+#ifdef DEBUG
+ cvmx_dprintf
+ ("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n",
+ named_block_array_ptr);
+#endif
+ if (cvmx_bootmem_desc->major_version == 3) {
+ for (i = 0;
+ i < cvmx_bootmem_desc->named_block_num_blocks; i++) {
+ if ((name && named_block_array_ptr[i].size
+ && !strncmp(name, named_block_array_ptr[i].name,
+ cvmx_bootmem_desc->named_block_name_len
+ - 1))
+ || (!name && !named_block_array_ptr[i].size)) {
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+
+ return &(named_block_array_ptr[i]);
+ }
+ }
+ } else {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+ "version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ }
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+
+ return NULL;
+}
+
+int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
+{
+ struct cvmx_bootmem_named_block_desc *named_block_ptr;
+
+ if (cvmx_bootmem_desc->major_version != 3) {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
+ "%d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ return 0;
+ }
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
+#endif
+
+ /*
+ * Take lock here, as name lookup/block free/name free need to
+ * be atomic.
+ */
+ cvmx_bootmem_lock();
+
+ named_block_ptr =
+ cvmx_bootmem_phy_named_block_find(name,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (named_block_ptr) {
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_free: "
+ "%s, base: 0x%llx, size: 0x%llx\n",
+ name,
+ (unsigned long long)named_block_ptr->base_addr,
+ (unsigned long long)named_block_ptr->size);
+#endif
+ __cvmx_bootmem_phy_free(named_block_ptr->base_addr,
+ named_block_ptr->size,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ named_block_ptr->size = 0;
+ /* Set size to zero to indicate block not used. */
+ }
+
+ cvmx_bootmem_unlock();
+ return named_block_ptr != NULL; /* 0 on failure, 1 on success */
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
new file mode 100644
index 000000000000..6abe56f1e097
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -0,0 +1,734 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Implementation of the Level 2 Cache (L2C) control, measurement, and
+ * debugging facilities.
+ */
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-l2c.h>
+#include <asm/octeon/cvmx-spinlock.h>
+
+/*
+ * This spinlock is used internally to ensure that only one core is
+ * performing certain L2 operations at a time.
+ *
+ * NOTE: This only protects calls from within a single application -
+ * if multiple applications or operating systems are running, then it
+ * is up to the user program to coordinate between them.
+ */
+static cvmx_spinlock_t cvmx_l2c_spinlock;
+
+static inline int l2_size_half(void)
+{
+ uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3);
+ return !!(val & (1ull << 34));
+}
+
+int cvmx_l2c_get_core_way_partition(uint32_t core)
+{
+ uint32_t field;
+
+ /* Validate the core number */
+ if (core >= cvmx_octeon_num_cores())
+ return -1;
+
+ /*
+ * Use the lower two bits of the coreNumber to determine the
+ * bit offset of the UMSK[] field in the L2C_SPAR register.
+ */
+ field = (core & 0x3) * 8;
+
+ /*
+ * Return the UMSK[] field from the appropriate L2C_SPAR
+ * register based on the coreNumber.
+ */
+
+ switch (core & 0xC) {
+ case 0x0:
+ return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >>
+ field;
+ case 0x4:
+ return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >>
+ field;
+ case 0x8:
+ return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >>
+ field;
+ case 0xC:
+ return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >>
+ field;
+ }
+ return 0;
+}
+
+int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
+{
+ uint32_t field;
+ uint32_t valid_mask;
+
+ valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
+
+ mask &= valid_mask;
+
+ /* A UMSK setting which blocks all L2C Ways is an error. */
+ if (mask == valid_mask)
+ return -1;
+
+ /* Validate the core number */
+ if (core >= cvmx_octeon_num_cores())
+ return -1;
+
+ /* Check to make sure current mask & new mask don't block all ways */
+ if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) ==
+ valid_mask)
+ return -1;
+
+ /* Use the lower two bits of core to determine the bit offset of the
+ * UMSK[] field in the L2C_SPAR register.
+ */
+ field = (core & 0x3) * 8;
+
+ /* Assign the new mask setting to the UMSK[] field in the appropriate
+ * L2C_SPAR register based on the core_num.
+ *
+ */
+ switch (core & 0xC) {
+ case 0x0:
+ cvmx_write_csr(CVMX_L2C_SPAR0,
+ (cvmx_read_csr(CVMX_L2C_SPAR0) &
+ ~(0xFF << field)) | mask << field);
+ break;
+ case 0x4:
+ cvmx_write_csr(CVMX_L2C_SPAR1,
+ (cvmx_read_csr(CVMX_L2C_SPAR1) &
+ ~(0xFF << field)) | mask << field);
+ break;
+ case 0x8:
+ cvmx_write_csr(CVMX_L2C_SPAR2,
+ (cvmx_read_csr(CVMX_L2C_SPAR2) &
+ ~(0xFF << field)) | mask << field);
+ break;
+ case 0xC:
+ cvmx_write_csr(CVMX_L2C_SPAR3,
+ (cvmx_read_csr(CVMX_L2C_SPAR3) &
+ ~(0xFF << field)) | mask << field);
+ break;
+ }
+ return 0;
+}
+
+int cvmx_l2c_set_hw_way_partition(uint32_t mask)
+{
+ uint32_t valid_mask;
+
+ valid_mask = 0xff;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ if (l2_size_half())
+ valid_mask = 0xf;
+ } else if (l2_size_half())
+ valid_mask = 0x3;
+
+ mask &= valid_mask;
+
+ /* A UMSK setting which blocks all L2C Ways is an error. */
+ if (mask == valid_mask)
+ return -1;
+ /* Check to make sure current mask & new mask don't block all ways */
+ if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) ==
+ valid_mask)
+ return -1;
+
+ cvmx_write_csr(CVMX_L2C_SPAR4,
+ (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
+ return 0;
+}
+
+int cvmx_l2c_get_hw_way_partition(void)
+{
+ return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
+}
+
+void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
+ uint32_t clear_on_read)
+{
+ union cvmx_l2c_pfctl pfctl;
+
+ pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
+
+ switch (counter) {
+ case 0:
+ pfctl.s.cnt0sel = event;
+ pfctl.s.cnt0ena = 1;
+ if (!cvmx_octeon_is_pass1())
+ pfctl.s.cnt0rdclr = clear_on_read;
+ break;
+ case 1:
+ pfctl.s.cnt1sel = event;
+ pfctl.s.cnt1ena = 1;
+ if (!cvmx_octeon_is_pass1())
+ pfctl.s.cnt1rdclr = clear_on_read;
+ break;
+ case 2:
+ pfctl.s.cnt2sel = event;
+ pfctl.s.cnt2ena = 1;
+ if (!cvmx_octeon_is_pass1())
+ pfctl.s.cnt2rdclr = clear_on_read;
+ break;
+ case 3:
+ default:
+ pfctl.s.cnt3sel = event;
+ pfctl.s.cnt3ena = 1;
+ if (!cvmx_octeon_is_pass1())
+ pfctl.s.cnt3rdclr = clear_on_read;
+ break;
+ }
+
+ cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
+}
+
+uint64_t cvmx_l2c_read_perf(uint32_t counter)
+{
+ switch (counter) {
+ case 0:
+ return cvmx_read_csr(CVMX_L2C_PFC0);
+ case 1:
+ return cvmx_read_csr(CVMX_L2C_PFC1);
+ case 2:
+ return cvmx_read_csr(CVMX_L2C_PFC2);
+ case 3:
+ default:
+ return cvmx_read_csr(CVMX_L2C_PFC3);
+ }
+}
+
+/**
+ * @INTERNAL
+ * Helper function use to fault in cache lines for L2 cache locking
+ *
+ * @addr: Address of base of memory region to read into L2 cache
+ * @len: Length (in bytes) of region to fault in
+ */
+static void fault_in(uint64_t addr, int len)
+{
+ volatile char *ptr;
+ volatile char dummy;
+ /*
+ * Adjust addr and length so we get all cache lines even for
+ * small ranges spanning two cache lines
+ */
+ len += addr & CVMX_CACHE_LINE_MASK;
+ addr &= ~CVMX_CACHE_LINE_MASK;
+ ptr = (volatile char *)cvmx_phys_to_ptr(addr);
+ /*
+ * Invalidate L1 cache to make sure all loads result in data
+ * being in L2.
+ */
+ CVMX_DCACHE_INVALIDATE;
+ while (len > 0) {
+ dummy += *ptr;
+ len -= CVMX_CACHE_LINE_SIZE;
+ ptr += CVMX_CACHE_LINE_SIZE;
+ }
+}
+
+int cvmx_l2c_lock_line(uint64_t addr)
+{
+ int retval = 0;
+ union cvmx_l2c_dbg l2cdbg;
+ union cvmx_l2c_lckbase lckbase;
+ union cvmx_l2c_lckoff lckoff;
+ union cvmx_l2t_err l2t_err;
+ l2cdbg.u64 = 0;
+ lckbase.u64 = 0;
+ lckoff.u64 = 0;
+
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+
+ /* Clear l2t error bits if set */
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ l2t_err.s.lckerr = 1;
+ l2t_err.s.lckerr2 = 1;
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
+
+ addr &= ~CVMX_CACHE_LINE_MASK;
+
+ /* Set this core as debug core */
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
+ cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
+ cvmx_read_csr(CVMX_L2C_LCKOFF);
+
+ if (((union cvmx_l2c_cfg) (cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
+ int alias_shift =
+ CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
+ uint64_t addr_tmp =
+ addr ^ (addr & ((1 << alias_shift) - 1)) >>
+ CVMX_L2_SET_BITS;
+ lckbase.s.lck_base = addr_tmp >> 7;
+ } else {
+ lckbase.s.lck_base = addr >> 7;
+ }
+
+ lckbase.s.lck_ena = 1;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
+
+ fault_in(addr, CVMX_CACHE_LINE_SIZE);
+
+ lckbase.s.lck_ena = 0;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
+
+ /* Stop being debug core */
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
+ retval = 1; /* We were unable to lock the line */
+
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+
+ return retval;
+}
+
+int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
+{
+ int retval = 0;
+
+ /* Round start/end to cache line boundaries */
+ len += start & CVMX_CACHE_LINE_MASK;
+ start &= ~CVMX_CACHE_LINE_MASK;
+ len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
+
+ while (len) {
+ retval += cvmx_l2c_lock_line(start);
+ start += CVMX_CACHE_LINE_SIZE;
+ len -= CVMX_CACHE_LINE_SIZE;
+ }
+
+ return retval;
+}
+
+void cvmx_l2c_flush(void)
+{
+ uint64_t assoc, set;
+ uint64_t n_assoc, n_set;
+ union cvmx_l2c_dbg l2cdbg;
+
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+
+ l2cdbg.u64 = 0;
+ if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ l2cdbg.s.finv = 1;
+ n_set = CVMX_L2_SETS;
+ n_assoc = l2_size_half() ? (CVMX_L2_ASSOC / 2) : CVMX_L2_ASSOC;
+ for (set = 0; set < n_set; set++) {
+ for (assoc = 0; assoc < n_assoc; assoc++) {
+ l2cdbg.s.set = assoc;
+ /* Enter debug mode, and make sure all other
+ ** writes complete before we enter debug
+ ** mode */
+ CVMX_SYNCW;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
+ (CVMX_MIPS_SPACE_XKPHYS,
+ set * CVMX_CACHE_LINE_SIZE), 0);
+ CVMX_SYNCW; /* Push STF out to L2 */
+ /* Exit debug mode */
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+ }
+ }
+
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+}
+
+int cvmx_l2c_unlock_line(uint64_t address)
+{
+ int assoc;
+ union cvmx_l2c_tag tag;
+ union cvmx_l2c_dbg l2cdbg;
+ uint32_t tag_addr;
+
+ uint32_t index = cvmx_l2c_address_to_index(address);
+
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+ /* Compute portion of address that is stored in tag */
+ tag_addr =
+ ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) &
+ ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+ for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
+ tag = cvmx_get_l2c_tag(assoc, index);
+
+ if (tag.s.V && (tag.s.addr == tag_addr)) {
+ l2cdbg.u64 = 0;
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ l2cdbg.s.set = assoc;
+ l2cdbg.s.finv = 1;
+
+ CVMX_SYNC;
+ /* Enter debug mode */
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
+ (CVMX_MIPS_SPACE_XKPHYS,
+ address), 0);
+ CVMX_SYNC;
+ /* Exit debug mode */
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ return tag.s.L;
+ }
+ }
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ return 0;
+}
+
+int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
+{
+ int num_unlocked = 0;
+ /* Round start/end to cache line boundaries */
+ len += start & CVMX_CACHE_LINE_MASK;
+ start &= ~CVMX_CACHE_LINE_MASK;
+ len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
+ while (len > 0) {
+ num_unlocked += cvmx_l2c_unlock_line(start);
+ start += CVMX_CACHE_LINE_SIZE;
+ len -= CVMX_CACHE_LINE_SIZE;
+ }
+
+ return num_unlocked;
+}
+
+/*
+ * Internal l2c tag types. These are converted to a generic structure
+ * that can be used on all chips.
+ */
+union __cvmx_l2c_tag {
+ uint64_t u64;
+ struct cvmx_l2c_tag_cn50xx {
+ uint64_t reserved:40;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:20; /* Phys mem addr (33..14) */
+ } cn50xx;
+ struct cvmx_l2c_tag_cn30xx {
+ uint64_t reserved:41;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:19; /* Phys mem addr (33..15) */
+ } cn30xx;
+ struct cvmx_l2c_tag_cn31xx {
+ uint64_t reserved:42;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:18; /* Phys mem addr (33..16) */
+ } cn31xx;
+ struct cvmx_l2c_tag_cn38xx {
+ uint64_t reserved:43;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:17; /* Phys mem addr (33..17) */
+ } cn38xx;
+ struct cvmx_l2c_tag_cn58xx {
+ uint64_t reserved:44;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:16; /* Phys mem addr (33..18) */
+ } cn58xx;
+ struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
+ struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
+};
+
+/**
+ * @INTERNAL
+ * Function to read a L2C tag. This code make the current core
+ * the 'debug core' for the L2. This code must only be executed by
+ * 1 core at a time.
+ *
+ * @assoc: Association (way) of the tag to dump
+ * @index: Index of the cacheline
+ *
+ * Returns The Octeon model specific tag structure. This is
+ * translated by a wrapper function to a generic form that is
+ * easier for applications to use.
+ */
+static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
+{
+
+ uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96);
+ uint64_t core = cvmx_get_core_num();
+ union __cvmx_l2c_tag tag_val;
+ uint64_t dbg_addr = CVMX_L2C_DBG;
+ unsigned long flags;
+
+ union cvmx_l2c_dbg debug_val;
+ debug_val.u64 = 0;
+ /*
+ * For low core count parts, the core number is always small enough
+ * to stay in the correct field and not set any reserved bits.
+ */
+ debug_val.s.ppnum = core;
+ debug_val.s.l2t = 1;
+ debug_val.s.set = assoc;
+ /*
+ * Make sure core is quiet (no prefetches, etc.) before
+ * entering debug mode.
+ */
+ CVMX_SYNC;
+ /* Flush L1 to make sure debug load misses L1 */
+ CVMX_DCACHE_INVALIDATE;
+
+ local_irq_save(flags);
+
+ /*
+ * The following must be done in assembly as when in debug
+ * mode all data loads from L2 return special debug data, not
+ * normal memory contents. Also, interrupts must be
+ * disabled, since if an interrupt occurs while in debug mode
+ * the ISR will get debug data from all its memory reads
+ * instead of the contents of memory
+ */
+
+ asm volatile (".set push \n"
+ " .set mips64 \n"
+ " .set noreorder \n"
+ /* Enter debug mode, wait for store */
+ " sd %[dbg_val], 0(%[dbg_addr]) \n"
+ " ld $0, 0(%[dbg_addr]) \n"
+ /* Read L2C tag data */
+ " ld %[tag_val], 0(%[tag_addr]) \n"
+ /* Exit debug mode, wait for store */
+ " sd $0, 0(%[dbg_addr]) \n"
+ " ld $0, 0(%[dbg_addr]) \n"
+ /* Invalidate dcache to discard debug data */
+ " cache 9, 0($0) \n"
+ " .set pop" :
+ [tag_val] "=r"(tag_val.u64) : [dbg_addr] "r"(dbg_addr),
+ [dbg_val] "r"(debug_val.u64),
+ [tag_addr] "r"(debug_tag_addr) : "memory");
+
+ local_irq_restore(flags);
+ return tag_val;
+
+}
+
+union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
+{
+ union __cvmx_l2c_tag tmp_tag;
+ union cvmx_l2c_tag tag;
+ tag.u64 = 0;
+
+ if ((int)association >= cvmx_l2c_get_num_assoc()) {
+ cvmx_dprintf
+ ("ERROR: cvmx_get_l2c_tag association out of range\n");
+ return tag;
+ }
+ if ((int)index >= cvmx_l2c_get_num_sets()) {
+ cvmx_dprintf("ERROR: cvmx_get_l2c_tag "
+ "index out of range (arg: %d, max: %d\n",
+ index, cvmx_l2c_get_num_sets());
+ return tag;
+ }
+ /* __read_l2_tag is intended for internal use only */
+ tmp_tag = __read_l2_tag(association, index);
+
+ /*
+ * Convert all tag structure types to generic version, as it
+ * can represent all models.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ tag.s.V = tmp_tag.cn58xx.V;
+ tag.s.D = tmp_tag.cn58xx.D;
+ tag.s.L = tmp_tag.cn58xx.L;
+ tag.s.U = tmp_tag.cn58xx.U;
+ tag.s.addr = tmp_tag.cn58xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ tag.s.V = tmp_tag.cn38xx.V;
+ tag.s.D = tmp_tag.cn38xx.D;
+ tag.s.L = tmp_tag.cn38xx.L;
+ tag.s.U = tmp_tag.cn38xx.U;
+ tag.s.addr = tmp_tag.cn38xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ tag.s.V = tmp_tag.cn31xx.V;
+ tag.s.D = tmp_tag.cn31xx.D;
+ tag.s.L = tmp_tag.cn31xx.L;
+ tag.s.U = tmp_tag.cn31xx.U;
+ tag.s.addr = tmp_tag.cn31xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
+ tag.s.V = tmp_tag.cn30xx.V;
+ tag.s.D = tmp_tag.cn30xx.D;
+ tag.s.L = tmp_tag.cn30xx.L;
+ tag.s.U = tmp_tag.cn30xx.U;
+ tag.s.addr = tmp_tag.cn30xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ tag.s.V = tmp_tag.cn50xx.V;
+ tag.s.D = tmp_tag.cn50xx.D;
+ tag.s.L = tmp_tag.cn50xx.L;
+ tag.s.U = tmp_tag.cn50xx.U;
+ tag.s.addr = tmp_tag.cn50xx.addr;
+ } else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ }
+
+ return tag;
+}
+
+uint32_t cvmx_l2c_address_to_index(uint64_t addr)
+{
+ uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
+ union cvmx_l2c_cfg l2c_cfg;
+ l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+
+ if (l2c_cfg.s.idxalias) {
+ idx ^=
+ ((addr & CVMX_L2C_ALIAS_MASK) >>
+ CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+ }
+ idx &= CVMX_L2C_IDX_MASK;
+ return idx;
+}
+
+int cvmx_l2c_get_cache_size_bytes(void)
+{
+ return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
+ CVMX_CACHE_LINE_SIZE;
+}
+
+/**
+ * Return log base 2 of the number of sets in the L2 cache
+ * Returns
+ */
+int cvmx_l2c_get_set_bits(void)
+{
+ int l2_set_bits;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ l2_set_bits = 11; /* 2048 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
+ l2_set_bits = 10; /* 1024 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ l2_set_bits = 9; /* 512 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2_set_bits = 8; /* 256 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+ l2_set_bits = 7; /* 128 sets */
+ else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ l2_set_bits = 11; /* 2048 sets */
+ }
+ return l2_set_bits;
+
+}
+
+/* Return the number of sets in the L2 Cache */
+int cvmx_l2c_get_num_sets(void)
+{
+ return 1 << cvmx_l2c_get_set_bits();
+}
+
+/* Return the number of associations in the L2 Cache */
+int cvmx_l2c_get_num_assoc(void)
+{
+ int l2_assoc;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN58XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX))
+ l2_assoc = 8;
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2_assoc = 4;
+ else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ l2_assoc = 8;
+ }
+
+ /* Check to see if part of the cache is disabled */
+ if (cvmx_fuse_read(265))
+ l2_assoc = l2_assoc >> 2;
+ else if (cvmx_fuse_read(264))
+ l2_assoc = l2_assoc >> 1;
+
+ return l2_assoc;
+}
+
+/**
+ * Flush a line from the L2 cache
+ * This should only be called from one core at a time, as this routine
+ * sets the core to the 'debug' core in order to flush the line.
+ *
+ * @assoc: Association (or way) to flush
+ * @index: Index to flush
+ */
+void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
+{
+ union cvmx_l2c_dbg l2cdbg;
+
+ l2cdbg.u64 = 0;
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ l2cdbg.s.finv = 1;
+
+ l2cdbg.s.set = assoc;
+ /*
+ * Enter debug mode, and make sure all other writes complete
+ * before we enter debug mode.
+ */
+ asm volatile ("sync" : : : "memory");
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ CVMX_PREPARE_FOR_STORE(((1ULL << 63) + (index) * 128), 0);
+ /* Exit debug mode */
+ asm volatile ("sync" : : : "memory");
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
new file mode 100644
index 000000000000..4812370706a1
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
@@ -0,0 +1,116 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * This module provides system/board/application information obtained
+ * by the bootloader.
+ */
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+
+/**
+ * This structure defines the private state maintained by sysinfo module.
+ *
+ */
+static struct {
+ struct cvmx_sysinfo sysinfo; /* system information */
+ cvmx_spinlock_t lock; /* mutex spinlock */
+
+} state = {
+ .lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER
+};
+
+
+/*
+ * Global variables that define the min/max of the memory region set
+ * up for 32 bit userspace access.
+ */
+uint64_t linux_mem32_min;
+uint64_t linux_mem32_max;
+uint64_t linux_mem32_wired;
+uint64_t linux_mem32_offset;
+
+/**
+ * This function returns the application information as obtained
+ * by the bootloader. This provides the core mask of the cores
+ * running the same application image, as well as the physical
+ * memory regions available to the core.
+ *
+ * Returns Pointer to the boot information structure
+ *
+ */
+struct cvmx_sysinfo *cvmx_sysinfo_get(void)
+{
+ return &(state.sysinfo);
+}
+
+/**
+ * This function is used in non-simple executive environments (such as
+ * Linux kernel, u-boot, etc.) to configure the minimal fields that
+ * are required to use simple executive files directly.
+ *
+ * Locking (if required) must be handled outside of this
+ * function
+ *
+ * @phy_mem_desc_ptr:
+ * Pointer to global physical memory descriptor
+ * (bootmem descriptor) @board_type: Octeon board
+ * type enumeration
+ *
+ * @board_rev_major:
+ * Board major revision
+ * @board_rev_minor:
+ * Board minor revision
+ * @cpu_clock_hz:
+ * CPU clock freqency in hertz
+ *
+ * Returns 0: Failure
+ * 1: success
+ */
+int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
+ uint16_t board_type,
+ uint8_t board_rev_major,
+ uint8_t board_rev_minor,
+ uint32_t cpu_clock_hz)
+{
+
+ /* The sysinfo structure was already initialized */
+ if (state.sysinfo.board_type)
+ return 0;
+
+ memset(&(state.sysinfo), 0x0, sizeof(state.sysinfo));
+ state.sysinfo.phy_mem_desc_ptr = phy_mem_desc_ptr;
+ state.sysinfo.board_type = board_type;
+ state.sysinfo.board_rev_major = board_rev_major;
+ state.sysinfo.board_rev_minor = board_rev_minor;
+ state.sysinfo.cpu_clock_hz = cpu_clock_hz;
+
+ return 1;
+}
+
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
new file mode 100644
index 000000000000..9afc3794ed1b
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -0,0 +1,358 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * File defining functions for working with different Octeon
+ * models.
+ */
+#include <asm/octeon/octeon.h>
+
+/**
+ * Given the chip processor ID from COP0, this function returns a
+ * string representing the chip model number. The string is of the
+ * form CNXXXXpX.X-FREQ-SUFFIX.
+ * - XXXX = The chip model number
+ * - X.X = Chip pass number
+ * - FREQ = Current frequency in Mhz
+ * - SUFFIX = NSP, EXP, SCP, SSP, or CP
+ *
+ * @chip_id: Chip ID
+ *
+ * Returns Model string
+ */
+const char *octeon_model_get_string(uint32_t chip_id)
+{
+ static char buffer[32];
+ return octeon_model_get_string_buffer(chip_id, buffer);
+}
+
+/*
+ * Version of octeon_model_get_string() that takes buffer as argument,
+ * as running early in u-boot static/global variables don't work when
+ * running from flash.
+ */
+const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer)
+{
+ const char *family;
+ const char *core_model;
+ char pass[4];
+ int clock_mhz;
+ const char *suffix;
+ union cvmx_l2d_fus3 fus3;
+ int num_cores;
+ union cvmx_mio_fus_dat2 fus_dat2;
+ union cvmx_mio_fus_dat3 fus_dat3;
+ char fuse_model[10];
+ uint32_t fuse_data = 0;
+
+ fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
+ fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
+ fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
+
+ num_cores = cvmx_octeon_num_cores();
+
+ /* Make sure the non existant devices look disabled */
+ switch ((chip_id >> 8) & 0xff) {
+ case 6: /* CN50XX */
+ case 2: /* CN30XX */
+ fus_dat3.s.nodfa_dte = 1;
+ fus_dat3.s.nozip = 1;
+ break;
+ case 4: /* CN57XX or CN56XX */
+ fus_dat3.s.nodfa_dte = 1;
+ break;
+ default:
+ break;
+ }
+
+ /* Make a guess at the suffix */
+ /* NSP = everything */
+ /* EXP = No crypto */
+ /* SCP = No DFA, No zip */
+ /* CP = No DFA, No crypto, No zip */
+ if (fus_dat3.s.nodfa_dte) {
+ if (fus_dat2.s.nocrypto)
+ suffix = "CP";
+ else
+ suffix = "SCP";
+ } else if (fus_dat2.s.nocrypto)
+ suffix = "EXP";
+ else
+ suffix = "NSP";
+
+ /*
+ * Assume pass number is encoded using <5:3><2:0>. Exceptions
+ * will be fixed later.
+ */
+ sprintf(pass, "%u.%u", ((chip_id >> 3) & 7) + 1, chip_id & 7);
+
+ /*
+ * Use the number of cores to determine the last 2 digits of
+ * the model number. There are some exceptions that are fixed
+ * later.
+ */
+ switch (num_cores) {
+ case 16:
+ core_model = "60";
+ break;
+ case 15:
+ core_model = "58";
+ break;
+ case 14:
+ core_model = "55";
+ break;
+ case 13:
+ core_model = "52";
+ break;
+ case 12:
+ core_model = "50";
+ break;
+ case 11:
+ core_model = "48";
+ break;
+ case 10:
+ core_model = "45";
+ break;
+ case 9:
+ core_model = "42";
+ break;
+ case 8:
+ core_model = "40";
+ break;
+ case 7:
+ core_model = "38";
+ break;
+ case 6:
+ core_model = "34";
+ break;
+ case 5:
+ core_model = "32";
+ break;
+ case 4:
+ core_model = "30";
+ break;
+ case 3:
+ core_model = "25";
+ break;
+ case 2:
+ core_model = "20";
+ break;
+ case 1:
+ core_model = "10";
+ break;
+ default:
+ core_model = "XX";
+ break;
+ }
+
+ /* Now figure out the family, the first two digits */
+ switch ((chip_id >> 8) & 0xff) {
+ case 0: /* CN38XX, CN37XX or CN36XX */
+ if (fus3.cn38xx.crip_512k) {
+ /*
+ * For some unknown reason, the 16 core one is
+ * called 37 instead of 36.
+ */
+ if (num_cores >= 16)
+ family = "37";
+ else
+ family = "36";
+ } else
+ family = "38";
+ /*
+ * This series of chips didn't follow the standard
+ * pass numbering.
+ */
+ switch (chip_id & 0xf) {
+ case 0:
+ strcpy(pass, "1.X");
+ break;
+ case 1:
+ strcpy(pass, "2.X");
+ break;
+ case 3:
+ strcpy(pass, "3.X");
+ break;
+ default:
+ strcpy(pass, "X.X");
+ break;
+ }
+ break;
+ case 1: /* CN31XX or CN3020 */
+ if ((chip_id & 0x10) || fus3.cn31xx.crip_128k)
+ family = "30";
+ else
+ family = "31";
+ /*
+ * This series of chips didn't follow the standard
+ * pass numbering.
+ */
+ switch (chip_id & 0xf) {
+ case 0:
+ strcpy(pass, "1.0");
+ break;
+ case 2:
+ strcpy(pass, "1.1");
+ break;
+ default:
+ strcpy(pass, "X.X");
+ break;
+ }
+ break;
+ case 2: /* CN3010 or CN3005 */
+ family = "30";
+ /* A chip with half cache is an 05 */
+ if (fus3.cn30xx.crip_64k)
+ core_model = "05";
+ /*
+ * This series of chips didn't follow the standard
+ * pass numbering.
+ */
+ switch (chip_id & 0xf) {
+ case 0:
+ strcpy(pass, "1.0");
+ break;
+ case 2:
+ strcpy(pass, "1.1");
+ break;
+ default:
+ strcpy(pass, "X.X");
+ break;
+ }
+ break;
+ case 3: /* CN58XX */
+ family = "58";
+ /* Special case. 4 core, no crypto */
+ if ((num_cores == 4) && fus_dat2.cn38xx.nocrypto)
+ core_model = "29";
+
+ /* Pass 1 uses different encodings for pass numbers */
+ if ((chip_id & 0xFF) < 0x8) {
+ switch (chip_id & 0x3) {
+ case 0:
+ strcpy(pass, "1.0");
+ break;
+ case 1:
+ strcpy(pass, "1.1");
+ break;
+ case 3:
+ strcpy(pass, "1.2");
+ break;
+ default:
+ strcpy(pass, "1.X");
+ break;
+ }
+ }
+ break;
+ case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */
+ if (fus_dat2.cn56xx.raid_en) {
+ if (fus3.cn56xx.crip_1024k)
+ family = "55";
+ else
+ family = "57";
+ if (fus_dat2.cn56xx.nocrypto)
+ suffix = "SP";
+ else
+ suffix = "SSP";
+ } else {
+ if (fus_dat2.cn56xx.nocrypto)
+ suffix = "CP";
+ else {
+ suffix = "NSP";
+ if (fus_dat3.s.nozip)
+ suffix = "SCP";
+ }
+ if (fus3.cn56xx.crip_1024k)
+ family = "54";
+ else
+ family = "56";
+ }
+ break;
+ case 6: /* CN50XX */
+ family = "50";
+ break;
+ case 7: /* CN52XX */
+ if (fus3.cn52xx.crip_256k)
+ family = "51";
+ else
+ family = "52";
+ break;
+ default:
+ family = "XX";
+ core_model = "XX";
+ strcpy(pass, "X.X");
+ suffix = "XXX";
+ break;
+ }
+
+ clock_mhz = octeon_get_clock_rate() / 1000000;
+
+ if (family[0] != '3') {
+ /* Check for model in fuses, overrides normal decode */
+ /* This is _not_ valid for Octeon CN3XXX models */
+ fuse_data |= cvmx_fuse_read_byte(51);
+ fuse_data = fuse_data << 8;
+ fuse_data |= cvmx_fuse_read_byte(50);
+ fuse_data = fuse_data << 8;
+ fuse_data |= cvmx_fuse_read_byte(49);
+ fuse_data = fuse_data << 8;
+ fuse_data |= cvmx_fuse_read_byte(48);
+ if (fuse_data & 0x7ffff) {
+ int model = fuse_data & 0x3fff;
+ int suffix = (fuse_data >> 14) & 0x1f;
+ if (suffix && model) {
+ /*
+ * Have both number and suffix in
+ * fuses, so both
+ */
+ sprintf(fuse_model, "%d%c",
+ model, 'A' + suffix - 1);
+ core_model = "";
+ family = fuse_model;
+ } else if (suffix && !model) {
+ /*
+ * Only have suffix, so add suffix to
+ * 'normal' model number.
+ */
+ sprintf(fuse_model, "%s%c", core_model,
+ 'A' + suffix - 1);
+ core_model = fuse_model;
+ } else {
+ /*
+ * Don't have suffix, so just use
+ * model from fuses.
+ */
+ sprintf(fuse_model, "%d", model);
+ core_model = "";
+ family = fuse_model;
+ }
+ }
+ }
+ sprintf(buffer, "CN%s%sp%s-%d-%s",
+ family, core_model, pass, clock_mhz, suffix);
+ return buffer;
+}
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
new file mode 100644
index 000000000000..553d36cbcc42
--- /dev/null
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -0,0 +1,84 @@
+/*
+ * Octeon Bootbus flash setup
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007, 2008 Cavium Networks
+ */
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/octeon/octeon.h>
+
+static struct map_info flash_map;
+static struct mtd_info *mymtd;
+#ifdef CONFIG_MTD_PARTITIONS
+static int nr_parts;
+static struct mtd_partition *parts;
+static const char *part_probe_types[] = {
+ "cmdlinepart",
+#ifdef CONFIG_MTD_REDBOOT_PARTS
+ "RedBoot",
+#endif
+ NULL
+};
+#endif
+
+/**
+ * Module/ driver initialization.
+ *
+ * Returns Zero on success
+ */
+static int __init flash_init(void)
+{
+ /*
+ * Read the bootbus region 0 setup to determine the base
+ * address of the flash.
+ */
+ union cvmx_mio_boot_reg_cfgx region_cfg;
+ region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(0));
+ if (region_cfg.s.en) {
+ /*
+ * The bootloader always takes the flash and sets its
+ * address so the entire flash fits below
+ * 0x1fc00000. This way the flash aliases to
+ * 0x1fc00000 for booting. Software can access the
+ * full flash at the true address, while core boot can
+ * access 4MB.
+ */
+ /* Use this name so old part lines work */
+ flash_map.name = "phys_mapped_flash";
+ flash_map.phys = region_cfg.s.base << 16;
+ flash_map.size = 0x1fc00000 - flash_map.phys;
+ flash_map.bankwidth = 1;
+ flash_map.virt = ioremap(flash_map.phys, flash_map.size);
+ pr_notice("Bootbus flash: Setting flash for %luMB flash at "
+ "0x%08lx\n", flash_map.size >> 20, flash_map.phys);
+ simple_map_init(&flash_map);
+ mymtd = do_map_probe("cfi_probe", &flash_map);
+ if (mymtd) {
+ mymtd->owner = THIS_MODULE;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ nr_parts = parse_mtd_partitions(mymtd,
+ part_probe_types,
+ &parts, 0);
+ if (nr_parts > 0)
+ add_mtd_partitions(mymtd, parts, nr_parts);
+ else
+ add_mtd_device(mymtd);
+#else
+ add_mtd_device(mymtd);
+#endif
+ } else {
+ pr_err("Failed to register MTD device for flash\n");
+ }
+ }
+ return 0;
+}
+
+late_initcall(flash_init);
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
new file mode 100644
index 000000000000..fc72984a5dae
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -0,0 +1,497 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Cavium Networks
+ */
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/hardirq.h>
+
+#include <asm/octeon/octeon.h>
+
+DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
+DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
+DEFINE_SPINLOCK(octeon_irq_msi_lock);
+
+static void octeon_irq_core_ack(unsigned int irq)
+{
+ unsigned int bit = irq - OCTEON_IRQ_SW0;
+ /*
+ * We don't need to disable IRQs to make these atomic since
+ * they are already disabled earlier in the low level
+ * interrupt code.
+ */
+ clear_c0_status(0x100 << bit);
+ /* The two user interrupts must be cleared manually. */
+ if (bit < 2)
+ clear_c0_cause(0x100 << bit);
+}
+
+static void octeon_irq_core_eoi(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned int bit = irq - OCTEON_IRQ_SW0;
+ /*
+ * If an IRQ is being processed while we are disabling it the
+ * handler will attempt to unmask the interrupt after it has
+ * been disabled.
+ */
+ if (desc->status & IRQ_DISABLED)
+ return;
+
+ /* There is a race here. We should fix it. */
+
+ /*
+ * We don't need to disable IRQs to make these atomic since
+ * they are already disabled earlier in the low level
+ * interrupt code.
+ */
+ set_c0_status(0x100 << bit);
+}
+
+static void octeon_irq_core_enable(unsigned int irq)
+{
+ unsigned long flags;
+ unsigned int bit = irq - OCTEON_IRQ_SW0;
+
+ /*
+ * We need to disable interrupts to make sure our updates are
+ * atomic.
+ */
+ local_irq_save(flags);
+ set_c0_status(0x100 << bit);
+ local_irq_restore(flags);
+}
+
+static void octeon_irq_core_disable_local(unsigned int irq)
+{
+ unsigned long flags;
+ unsigned int bit = irq - OCTEON_IRQ_SW0;
+ /*
+ * We need to disable interrupts to make sure our updates are
+ * atomic.
+ */
+ local_irq_save(flags);
+ clear_c0_status(0x100 << bit);
+ local_irq_restore(flags);
+}
+
+static void octeon_irq_core_disable(unsigned int irq)
+{
+#ifdef CONFIG_SMP
+ on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
+ (void *) (long) irq, 1);
+#else
+ octeon_irq_core_disable_local(irq);
+#endif
+}
+
+static struct irq_chip octeon_irq_chip_core = {
+ .name = "Core",
+ .enable = octeon_irq_core_enable,
+ .disable = octeon_irq_core_disable,
+ .ack = octeon_irq_core_ack,
+ .eoi = octeon_irq_core_eoi,
+};
+
+
+static void octeon_irq_ciu0_ack(unsigned int irq)
+{
+ /*
+ * In order to avoid any locking accessing the CIU, we
+ * acknowledge CIU interrupts by disabling all of them. This
+ * way we can use a per core register and avoid any out of
+ * core locking requirements. This has the side affect that
+ * CIU interrupts can't be processed recursively.
+ *
+ * We don't need to disable IRQs to make these atomic since
+ * they are already disabled earlier in the low level
+ * interrupt code.
+ */
+ clear_c0_status(0x100 << 2);
+}
+
+static void octeon_irq_ciu0_eoi(unsigned int irq)
+{
+ /*
+ * Enable all CIU interrupts again. We don't need to disable
+ * IRQs to make these atomic since they are already disabled
+ * earlier in the low level interrupt code.
+ */
+ set_c0_status(0x100 << 2);
+}
+
+static void octeon_irq_ciu0_enable(unsigned int irq)
+{
+ int coreid = cvmx_get_core_num();
+ unsigned long flags;
+ uint64_t en0;
+ int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
+
+ /*
+ * A read lock is used here to make sure only one core is ever
+ * updating the CIU enable bits at a time. During an enable
+ * the cores don't interfere with each other. During a disable
+ * the write lock stops any enables that might cause a
+ * problem.
+ */
+ read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
+ en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+ en0 |= 1ull << bit;
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+ cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+ read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
+}
+
+static void octeon_irq_ciu0_disable(unsigned int irq)
+{
+ int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
+ unsigned long flags;
+ uint64_t en0;
+#ifdef CONFIG_SMP
+ int cpu;
+ write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
+ for_each_online_cpu(cpu) {
+ int coreid = cpu_logical_map(cpu);
+ en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+ en0 &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+ }
+ /*
+ * We need to do a read after the last update to make sure all
+ * of them are done.
+ */
+ cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
+ write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
+#else
+ int coreid = cvmx_get_core_num();
+ local_irq_save(flags);
+ en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+ en0 &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+ cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+ local_irq_restore(flags);
+#endif
+}
+
+#ifdef CONFIG_SMP
+static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
+{
+ int cpu;
+ int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
+
+ write_lock(&octeon_irq_ciu0_rwlock);
+ for_each_online_cpu(cpu) {
+ int coreid = cpu_logical_map(cpu);
+ uint64_t en0 =
+ cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+ if (cpumask_test_cpu(cpu, dest))
+ en0 |= 1ull << bit;
+ else
+ en0 &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+ }
+ /*
+ * We need to do a read after the last update to make sure all
+ * of them are done.
+ */
+ cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
+ write_unlock(&octeon_irq_ciu0_rwlock);
+}
+#endif
+
+static struct irq_chip octeon_irq_chip_ciu0 = {
+ .name = "CIU0",
+ .enable = octeon_irq_ciu0_enable,
+ .disable = octeon_irq_ciu0_disable,
+ .ack = octeon_irq_ciu0_ack,
+ .eoi = octeon_irq_ciu0_eoi,
+#ifdef CONFIG_SMP
+ .set_affinity = octeon_irq_ciu0_set_affinity,
+#endif
+};
+
+
+static void octeon_irq_ciu1_ack(unsigned int irq)
+{
+ /*
+ * In order to avoid any locking accessing the CIU, we
+ * acknowledge CIU interrupts by disabling all of them. This
+ * way we can use a per core register and avoid any out of
+ * core locking requirements. This has the side affect that
+ * CIU interrupts can't be processed recursively. We don't
+ * need to disable IRQs to make these atomic since they are
+ * already disabled earlier in the low level interrupt code.
+ */
+ clear_c0_status(0x100 << 3);
+}
+
+static void octeon_irq_ciu1_eoi(unsigned int irq)
+{
+ /*
+ * Enable all CIU interrupts again. We don't need to disable
+ * IRQs to make these atomic since they are already disabled
+ * earlier in the low level interrupt code.
+ */
+ set_c0_status(0x100 << 3);
+}
+
+static void octeon_irq_ciu1_enable(unsigned int irq)
+{
+ int coreid = cvmx_get_core_num();
+ unsigned long flags;
+ uint64_t en1;
+ int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
+
+ /*
+ * A read lock is used here to make sure only one core is ever
+ * updating the CIU enable bits at a time. During an enable
+ * the cores don't interfere with each other. During a disable
+ * the write lock stops any enables that might cause a
+ * problem.
+ */
+ read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
+ en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+ en1 |= 1ull << bit;
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+ cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+ read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
+}
+
+static void octeon_irq_ciu1_disable(unsigned int irq)
+{
+ int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
+ unsigned long flags;
+ uint64_t en1;
+#ifdef CONFIG_SMP
+ int cpu;
+ write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
+ for_each_online_cpu(cpu) {
+ int coreid = cpu_logical_map(cpu);
+ en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+ en1 &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+ }
+ /*
+ * We need to do a read after the last update to make sure all
+ * of them are done.
+ */
+ cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
+ write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
+#else
+ int coreid = cvmx_get_core_num();
+ local_irq_save(flags);
+ en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+ en1 &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+ cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+ local_irq_restore(flags);
+#endif
+}
+
+#ifdef CONFIG_SMP
+static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest)
+{
+ int cpu;
+ int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
+
+ write_lock(&octeon_irq_ciu1_rwlock);
+ for_each_online_cpu(cpu) {
+ int coreid = cpu_logical_map(cpu);
+ uint64_t en1 =
+ cvmx_read_csr(CVMX_CIU_INTX_EN1
+ (coreid * 2 + 1));
+ if (cpumask_test_cpu(cpu, dest))
+ en1 |= 1ull << bit;
+ else
+ en1 &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+ }
+ /*
+ * We need to do a read after the last update to make sure all
+ * of them are done.
+ */
+ cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
+ write_unlock(&octeon_irq_ciu1_rwlock);
+}
+#endif
+
+static struct irq_chip octeon_irq_chip_ciu1 = {
+ .name = "CIU1",
+ .enable = octeon_irq_ciu1_enable,
+ .disable = octeon_irq_ciu1_disable,
+ .ack = octeon_irq_ciu1_ack,
+ .eoi = octeon_irq_ciu1_eoi,
+#ifdef CONFIG_SMP
+ .set_affinity = octeon_irq_ciu1_set_affinity,
+#endif
+};
+
+#ifdef CONFIG_PCI_MSI
+
+static void octeon_irq_msi_ack(unsigned int irq)
+{
+ if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
+ /* These chips have PCI */
+ cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
+ 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
+ } else {
+ /*
+ * These chips have PCIe. Thankfully the ACK doesn't
+ * need any locking.
+ */
+ cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
+ 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
+ }
+}
+
+static void octeon_irq_msi_eoi(unsigned int irq)
+{
+ /* Nothing needed */
+}
+
+static void octeon_irq_msi_enable(unsigned int irq)
+{
+ if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
+ /*
+ * Octeon PCI doesn't have the ability to mask/unmask
+ * MSI interrupts individually. Instead of
+ * masking/unmasking them in groups of 16, we simple
+ * assume MSI devices are well behaved. MSI
+ * interrupts are always enable and the ACK is assumed
+ * to be enough.
+ */
+ } else {
+ /* These chips have PCIe. Note that we only support
+ * the first 64 MSI interrupts. Unfortunately all the
+ * MSI enables are in the same register. We use
+ * MSI0's lock to control access to them all.
+ */
+ uint64_t en;
+ unsigned long flags;
+ spin_lock_irqsave(&octeon_irq_msi_lock, flags);
+ en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
+ en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
+ cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
+ cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
+ spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
+ }
+}
+
+static void octeon_irq_msi_disable(unsigned int irq)
+{
+ if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
+ /* See comment in enable */
+ } else {
+ /*
+ * These chips have PCIe. Note that we only support
+ * the first 64 MSI interrupts. Unfortunately all the
+ * MSI enables are in the same register. We use
+ * MSI0's lock to control access to them all.
+ */
+ uint64_t en;
+ unsigned long flags;
+ spin_lock_irqsave(&octeon_irq_msi_lock, flags);
+ en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
+ en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
+ cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
+ cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
+ spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
+ }
+}
+
+static struct irq_chip octeon_irq_chip_msi = {
+ .name = "MSI",
+ .enable = octeon_irq_msi_enable,
+ .disable = octeon_irq_msi_disable,
+ .ack = octeon_irq_msi_ack,
+ .eoi = octeon_irq_msi_eoi,
+};
+#endif
+
+void __init arch_init_irq(void)
+{
+ int irq;
+
+#ifdef CONFIG_SMP
+ /* Set the default affinity to the boot cpu. */
+ cpumask_clear(irq_default_affinity);
+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+#endif
+
+ if (NR_IRQS < OCTEON_IRQ_LAST)
+ pr_err("octeon_irq_init: NR_IRQS is set too low\n");
+
+ /* 0 - 15 reserved for i8259 master and slave controller. */
+
+ /* 17 - 23 Mips internal */
+ for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
+ set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
+ handle_percpu_irq);
+ }
+
+ /* 24 - 87 CIU_INT_SUM0 */
+ for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
+ set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0,
+ handle_percpu_irq);
+ }
+
+ /* 88 - 151 CIU_INT_SUM1 */
+ for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
+ set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1,
+ handle_percpu_irq);
+ }
+
+#ifdef CONFIG_PCI_MSI
+ /* 152 - 215 PCI/PCIe MSI interrupts */
+ for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
+ set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
+ handle_percpu_irq);
+ }
+#endif
+ set_c0_status(0x300 << 2);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ const unsigned long core_id = cvmx_get_core_num();
+ const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
+ const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
+ const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
+ const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
+ unsigned long cop0_cause;
+ unsigned long cop0_status;
+ uint64_t ciu_en;
+ uint64_t ciu_sum;
+
+ while (1) {
+ cop0_cause = read_c0_cause();
+ cop0_status = read_c0_status();
+ cop0_cause &= cop0_status;
+ cop0_cause &= ST0_IM;
+
+ if (unlikely(cop0_cause & STATUSF_IP2)) {
+ ciu_sum = cvmx_read_csr(ciu_sum0_address);
+ ciu_en = cvmx_read_csr(ciu_en0_address);
+ ciu_sum &= ciu_en;
+ if (likely(ciu_sum))
+ do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
+ else
+ spurious_interrupt();
+ } else if (unlikely(cop0_cause & STATUSF_IP3)) {
+ ciu_sum = cvmx_read_csr(ciu_sum1_address);
+ ciu_en = cvmx_read_csr(ciu_en1_address);
+ ciu_sum &= ciu_en;
+ if (likely(ciu_sum))
+ do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
+ else
+ spurious_interrupt();
+ } else if (likely(cop0_cause)) {
+ do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
+ } else {
+ break;
+ }
+ }
+}
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
new file mode 100644
index 000000000000..88e0cddca205
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -0,0 +1,521 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Unified implementation of memcpy, memmove and the __copy_user backend.
+ *
+ * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
+ * Copyright (C) 2002 Broadcom, Inc.
+ * memcpy/copy_user author: Mark Vandevoorde
+ *
+ * Mnemonic names for arguments to memcpy/__copy_user
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+#define dst a0
+#define src a1
+#define len a2
+
+/*
+ * Spec
+ *
+ * memcpy copies len bytes from src to dst and sets v0 to dst.
+ * It assumes that
+ * - src and dst don't overlap
+ * - src is readable
+ * - dst is writable
+ * memcpy uses the standard calling convention
+ *
+ * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
+ * the number of uncopied bytes due to an exception caused by a read or write.
+ * __copy_user assumes that src and dst don't overlap, and that the call is
+ * implementing one of the following:
+ * copy_to_user
+ * - src is readable (no exceptions when reading src)
+ * copy_from_user
+ * - dst is writable (no exceptions when writing dst)
+ * __copy_user uses a non-standard calling convention; see
+ * arch/mips/include/asm/uaccess.h
+ *
+ * When an exception happens on a load, the handler must
+ # ensure that all of the destination buffer is overwritten to prevent
+ * leaking information to user mode programs.
+ */
+
+/*
+ * Implementation
+ */
+
+/*
+ * The exception handler for loads requires that:
+ * 1- AT contain the address of the byte just past the end of the source
+ * of the copy,
+ * 2- src_entry <= src < AT, and
+ * 3- (dst - src) == (dst_entry - src_entry),
+ * The _entry suffix denotes values when __copy_user was called.
+ *
+ * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
+ * (2) is met by incrementing src by the number of bytes copied
+ * (3) is met by not doing loads between a pair of increments of dst and src
+ *
+ * The exception handlers for stores adjust len (if necessary) and return.
+ * These handlers do not need to overwrite any data.
+ *
+ * For __rmemcpy and memmove an exception is always a kernel bug, therefore
+ * they're not protected.
+ */
+
+#define EXC(inst_reg,addr,handler) \
+9: inst_reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+/*
+ * Only on the 64-bit kernel we can made use of 64-bit registers.
+ */
+#ifdef CONFIG_64BIT
+#define USE_DOUBLE
+#endif
+
+#ifdef USE_DOUBLE
+
+#define LOAD ld
+#define LOADL ldl
+#define LOADR ldr
+#define STOREL sdl
+#define STORER sdr
+#define STORE sd
+#define ADD daddu
+#define SUB dsubu
+#define SRL dsrl
+#define SRA dsra
+#define SLL dsll
+#define SLLV dsllv
+#define SRLV dsrlv
+#define NBYTES 8
+#define LOG_NBYTES 3
+
+/*
+ * As we are sharing code base with the mips32 tree (which use the o32 ABI
+ * register definitions). We need to redefine the register definitions from
+ * the n64 ABI register naming to the o32 ABI register naming.
+ */
+#undef t0
+#undef t1
+#undef t2
+#undef t3
+#define t0 $8
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
+#define t5 $13
+#define t6 $14
+#define t7 $15
+
+#else
+
+#define LOAD lw
+#define LOADL lwl
+#define LOADR lwr
+#define STOREL swl
+#define STORER swr
+#define STORE sw
+#define ADD addu
+#define SUB subu
+#define SRL srl
+#define SLL sll
+#define SRA sra
+#define SLLV sllv
+#define SRLV srlv
+#define NBYTES 4
+#define LOG_NBYTES 2
+
+#endif /* USE_DOUBLE */
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST LOADL
+#define STFIRST STORER
+#define STREST STOREL
+#define SHIFT_DISCARD SLLV
+#else
+#define LDFIRST LOADL
+#define LDREST LOADR
+#define STFIRST STOREL
+#define STREST STORER
+#define SHIFT_DISCARD SRLV
+#endif
+
+#define FIRST(unit) ((unit)*NBYTES)
+#define REST(unit) (FIRST(unit)+NBYTES-1)
+#define UNIT(unit) FIRST(unit)
+
+#define ADDRMASK (NBYTES-1)
+
+ .text
+ .set noreorder
+ .set noat
+
+/*
+ * A combined memcpy/__copy_user
+ * __copy_user sets len to 0 for success; else to an upper bound of
+ * the number of uncopied bytes.
+ * memcpy sets v0 to dst.
+ */
+ .align 5
+LEAF(memcpy) /* a0=dst a1=src a2=len */
+ move v0, dst /* return value */
+__memcpy:
+FEXPORT(__copy_user)
+ /*
+ * Note: dst & src may be unaligned, len may be 0
+ * Temps
+ */
+ #
+ # Octeon doesn't care if the destination is unaligned. The hardware
+ # can fix it faster than we can special case the assembly.
+ #
+ pref 0, 0(src)
+ sltu t0, len, NBYTES # Check if < 1 word
+ bnez t0, copy_bytes_checklen
+ and t0, src, ADDRMASK # Check if src unaligned
+ bnez t0, src_unaligned
+ sltu t0, len, 4*NBYTES # Check if < 4 words
+ bnez t0, less_than_4units
+ sltu t0, len, 8*NBYTES # Check if < 8 words
+ bnez t0, less_than_8units
+ sltu t0, len, 16*NBYTES # Check if < 16 words
+ bnez t0, cleanup_both_aligned
+ sltu t0, len, 128+1 # Check if len < 129
+ bnez t0, 1f # Skip prefetch if len is too short
+ sltu t0, len, 256+1 # Check if len < 257
+ bnez t0, 1f # Skip prefetch if len is too short
+ pref 0, 128(src) # We must not prefetch invalid addresses
+ #
+ # This is where we loop if there is more than 128 bytes left
+2: pref 0, 256(src) # We must not prefetch invalid addresses
+ #
+ # This is where we loop if we can't prefetch anymore
+1:
+EXC( LOAD t0, UNIT(0)(src), l_exc)
+EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
+ SUB len, len, 16*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p16u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p15u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p14u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p13u)
+EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
+EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
+EXC( STORE t0, UNIT(4)(dst), s_exc_p12u)
+EXC( STORE t1, UNIT(5)(dst), s_exc_p11u)
+EXC( STORE t2, UNIT(6)(dst), s_exc_p10u)
+ ADD src, src, 16*NBYTES
+EXC( STORE t3, UNIT(7)(dst), s_exc_p9u)
+ ADD dst, dst, 16*NBYTES
+EXC( LOAD t0, UNIT(-8)(src), l_exc_copy)
+EXC( LOAD t1, UNIT(-7)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(-6)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(-5)(src), l_exc_copy)
+EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u)
+EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u)
+EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
+EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
+EXC( LOAD t0, UNIT(-4)(src), l_exc_copy)
+EXC( LOAD t1, UNIT(-3)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(-2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(-1)(src), l_exc_copy)
+EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u)
+ sltu t0, len, 256+1 # See if we can prefetch more
+ beqz t0, 2b
+ sltu t0, len, 128 # See if we can loop more time
+ beqz t0, 1b
+ nop
+ #
+ # Jump here if there are less than 16*NBYTES left.
+ #
+cleanup_both_aligned:
+ beqz len, done
+ sltu t0, len, 8*NBYTES
+ bnez t0, less_than_8units
+ nop
+EXC( LOAD t0, UNIT(0)(src), l_exc)
+EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
+ SUB len, len, 8*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p8u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p7u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p6u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p5u)
+EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
+EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
+EXC( STORE t0, UNIT(4)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(5)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(6)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(7)(dst), s_exc_p1u)
+ ADD src, src, 8*NBYTES
+ beqz len, done
+ ADD dst, dst, 8*NBYTES
+ #
+ # Jump here if there are less than 8*NBYTES left.
+ #
+less_than_8units:
+ sltu t0, len, 4*NBYTES
+ bnez t0, less_than_4units
+ nop
+EXC( LOAD t0, UNIT(0)(src), l_exc)
+EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
+ SUB len, len, 4*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
+ ADD src, src, 4*NBYTES
+ beqz len, done
+ ADD dst, dst, 4*NBYTES
+ #
+ # Jump here if there are less than 4*NBYTES left. This means
+ # we may need to copy up to 3 NBYTES words.
+ #
+less_than_4units:
+ sltu t0, len, 1*NBYTES
+ bnez t0, copy_bytes_checklen
+ nop
+ #
+ # 1) Copy NBYTES, then check length again
+ #
+EXC( LOAD t0, 0(src), l_exc)
+ SUB len, len, NBYTES
+ sltu t1, len, 8
+EXC( STORE t0, 0(dst), s_exc_p1u)
+ ADD src, src, NBYTES
+ bnez t1, copy_bytes_checklen
+ ADD dst, dst, NBYTES
+ #
+ # 2) Copy NBYTES, then check length again
+ #
+EXC( LOAD t0, 0(src), l_exc)
+ SUB len, len, NBYTES
+ sltu t1, len, 8
+EXC( STORE t0, 0(dst), s_exc_p1u)
+ ADD src, src, NBYTES
+ bnez t1, copy_bytes_checklen
+ ADD dst, dst, NBYTES
+ #
+ # 3) Copy NBYTES, then check length again
+ #
+EXC( LOAD t0, 0(src), l_exc)
+ SUB len, len, NBYTES
+ ADD src, src, NBYTES
+ ADD dst, dst, NBYTES
+ b copy_bytes_checklen
+EXC( STORE t0, -8(dst), s_exc_p1u)
+
+src_unaligned:
+#define rem t8
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ beqz t0, cleanup_src_unaligned
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+EXC( LDFIRST t0, FIRST(0)(src), l_exc)
+EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
+ SUB len, len, 4*NBYTES
+EXC( LDREST t0, REST(0)(src), l_exc_copy)
+EXC( LDREST t1, REST(1)(src), l_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
+EXC( LDREST t2, REST(2)(src), l_exc_copy)
+EXC( LDREST t3, REST(3)(src), l_exc_copy)
+ ADD src, src, 4*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
+ bne len, rem, 1b
+ ADD dst, dst, 4*NBYTES
+
+cleanup_src_unaligned:
+ beqz len, done
+ and rem, len, NBYTES-1 # rem = len % NBYTES
+ beq rem, len, copy_bytes
+ nop
+1:
+EXC( LDFIRST t0, FIRST(0)(src), l_exc)
+EXC( LDREST t0, REST(0)(src), l_exc_copy)
+ SUB len, len, NBYTES
+EXC( STORE t0, 0(dst), s_exc_p1u)
+ ADD src, src, NBYTES
+ bne len, rem, 1b
+ ADD dst, dst, NBYTES
+
+copy_bytes_checklen:
+ beqz len, done
+ nop
+copy_bytes:
+ /* 0 < len < NBYTES */
+#define COPY_BYTE(N) \
+EXC( lb t0, N(src), l_exc); \
+ SUB len, len, 1; \
+ beqz len, done; \
+EXC( sb t0, N(dst), s_exc_p1)
+
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+#ifdef USE_DOUBLE
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+#endif
+EXC( lb t0, NBYTES-2(src), l_exc)
+ SUB len, len, 1
+ jr ra
+EXC( sb t0, NBYTES-2(dst), s_exc_p1)
+done:
+ jr ra
+ nop
+ END(memcpy)
+
+l_exc_copy:
+ /*
+ * Copy bytes from src until faulting load address (or until a
+ * lb faults)
+ *
+ * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+ * may be more than a byte beyond the last address.
+ * Hence, the lb below may get an exception.
+ *
+ * Assumes src < THREAD_BUADDR($28)
+ */
+ LOAD t0, TI_TASK($28)
+ nop
+ LOAD t0, THREAD_BUADDR(t0)
+1:
+EXC( lb t1, 0(src), l_exc)
+ ADD src, src, 1
+ sb t1, 0(dst) # can't fault -- we're copy_from_user
+ bne src, t0, 1b
+ ADD dst, dst, 1
+l_exc:
+ LOAD t0, TI_TASK($28)
+ nop
+ LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
+ nop
+ SUB len, AT, t0 # len number of uncopied bytes
+ /*
+ * Here's where we rely on src and dst being incremented in tandem,
+ * See (3) above.
+ * dst += (fault addr - src) to put dst at first byte to clear
+ */
+ ADD dst, t0 # compute start address in a1
+ SUB dst, src
+ /*
+ * Clear len bytes starting at dst. Can't call __bzero because it
+ * might modify len. An inefficient loop for these rare times...
+ */
+ beqz len, done
+ SUB src, len, 1
+1: sb zero, 0(dst)
+ ADD dst, dst, 1
+ bnez src, 1b
+ SUB src, src, 1
+ jr ra
+ nop
+
+
+#define SEXC(n) \
+s_exc_p ## n ## u: \
+ jr ra; \
+ ADD len, len, n*NBYTES
+
+SEXC(16)
+SEXC(15)
+SEXC(14)
+SEXC(13)
+SEXC(12)
+SEXC(11)
+SEXC(10)
+SEXC(9)
+SEXC(8)
+SEXC(7)
+SEXC(6)
+SEXC(5)
+SEXC(4)
+SEXC(3)
+SEXC(2)
+SEXC(1)
+
+s_exc_p1:
+ jr ra
+ ADD len, len, 1
+s_exc:
+ jr ra
+ nop
+
+ .align 5
+LEAF(memmove)
+ ADD t0, a0, a2
+ ADD t1, a1, a2
+ sltu t0, a1, t0 # dst + len <= src -> memcpy
+ sltu t1, a0, t1 # dst >= src + len -> memcpy
+ and t0, t1
+ beqz t0, __memcpy
+ move v0, a0 /* return value */
+ beqz a2, r_out
+ END(memmove)
+
+ /* fall through to __rmemcpy */
+LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
+ sltu t0, a1, a0
+ beqz t0, r_end_bytes_up # src >= dst
+ nop
+ ADD a0, a2 # dst = dst + len
+ ADD a1, a2 # src = src + len
+
+r_end_bytes:
+ lb t0, -1(a1)
+ SUB a2, a2, 0x1
+ sb t0, -1(a0)
+ SUB a1, a1, 0x1
+ bnez a2, r_end_bytes
+ SUB a0, a0, 0x1
+
+r_out:
+ jr ra
+ move a2, zero
+
+r_end_bytes_up:
+ lb t0, (a1)
+ SUB a2, a2, 0x1
+ sb t0, (a0)
+ ADD a1, a1, 0x1
+ bnez a2, r_end_bytes_up
+ ADD a0, a0, 0x1
+
+ jr ra
+ move a2, zero
+ END(__rmemcpy)
diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c
new file mode 100644
index 000000000000..8240728d485a
--- /dev/null
+++ b/arch/mips/cavium-octeon/serial.c
@@ -0,0 +1,136 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2007 Cavium Networks
+ */
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_reg.h>
+#include <linux/tty.h>
+
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+
+#ifdef CONFIG_GDB_CONSOLE
+#define DEBUG_UART 0
+#else
+#define DEBUG_UART 1
+#endif
+
+unsigned int octeon_serial_in(struct uart_port *up, int offset)
+{
+ int rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3)));
+ if (offset == UART_IIR && (rv & 0xf) == 7) {
+ /* Busy interrupt, read the USR (39) and try again. */
+ cvmx_read_csr((uint64_t)(up->membase + (39 << 3)));
+ rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3)));
+ }
+ return rv;
+}
+
+void octeon_serial_out(struct uart_port *up, int offset, int value)
+{
+ /*
+ * If bits 6 or 7 of the OCTEON UART's LCR are set, it quits
+ * working.
+ */
+ if (offset == UART_LCR)
+ value &= 0x9f;
+ cvmx_write_csr((uint64_t)(up->membase + (offset << 3)), (u8)value);
+}
+
+/*
+ * Allocated in .bss, so it is all zeroed.
+ */
+#define OCTEON_MAX_UARTS 3
+static struct plat_serial8250_port octeon_uart8250_data[OCTEON_MAX_UARTS + 1];
+static struct platform_device octeon_uart8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = octeon_uart8250_data,
+ },
+};
+
+static void __init octeon_uart_set_common(struct plat_serial8250_port *p)
+{
+ p->flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
+ p->type = PORT_OCTEON;
+ p->iotype = UPIO_MEM;
+ p->regshift = 3; /* I/O addresses are every 8 bytes */
+ p->uartclk = mips_hpt_frequency;
+ p->serial_in = octeon_serial_in;
+ p->serial_out = octeon_serial_out;
+}
+
+static int __init octeon_serial_init(void)
+{
+ int enable_uart0;
+ int enable_uart1;
+ int enable_uart2;
+ struct plat_serial8250_port *p;
+
+#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+ /*
+ * If we are configured to run as the second of two kernels,
+ * disable uart0 and enable uart1. Uart0 is owned by the first
+ * kernel
+ */
+ enable_uart0 = 0;
+ enable_uart1 = 1;
+#else
+ /*
+ * We are configured for the first kernel. We'll enable uart0
+ * if the bootloader told us to use 0, otherwise will enable
+ * uart 1.
+ */
+ enable_uart0 = (octeon_get_boot_uart() == 0);
+ enable_uart1 = (octeon_get_boot_uart() == 1);
+#ifdef CONFIG_KGDB
+ enable_uart1 = 1;
+#endif
+#endif
+
+ /* Right now CN52XX is the only chip with a third uart */
+ enable_uart2 = OCTEON_IS_MODEL(OCTEON_CN52XX);
+
+ p = octeon_uart8250_data;
+ if (enable_uart0) {
+ /* Add a ttyS device for hardware uart 0 */
+ octeon_uart_set_common(p);
+ p->membase = (void *) CVMX_MIO_UARTX_RBR(0);
+ p->mapbase = CVMX_MIO_UARTX_RBR(0) & ((1ull << 49) - 1);
+ p->irq = OCTEON_IRQ_UART0;
+ p++;
+ }
+
+ if (enable_uart1) {
+ /* Add a ttyS device for hardware uart 1 */
+ octeon_uart_set_common(p);
+ p->membase = (void *) CVMX_MIO_UARTX_RBR(1);
+ p->mapbase = CVMX_MIO_UARTX_RBR(1) & ((1ull << 49) - 1);
+ p->irq = OCTEON_IRQ_UART1;
+ p++;
+ }
+ if (enable_uart2) {
+ /* Add a ttyS device for hardware uart 2 */
+ octeon_uart_set_common(p);
+ p->membase = (void *) CVMX_MIO_UART2_RBR;
+ p->mapbase = CVMX_MIO_UART2_RBR & ((1ull << 49) - 1);
+ p->irq = OCTEON_IRQ_UART2;
+ p++;
+ }
+
+ BUG_ON(p > &octeon_uart8250_data[OCTEON_MAX_UARTS]);
+
+ return platform_device_register(&octeon_uart8250_device);
+}
+
+device_initcall(octeon_serial_init);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
new file mode 100644
index 000000000000..e085feddb4a4
--- /dev/null
+++ b/arch/mips/cavium-octeon/setup.c
@@ -0,0 +1,929 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2007 Cavium Networks
+ * Copyright (C) 2008 Wind River Systems
+ */
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/serial.h>
+#include <linux/types.h>
+#include <linux/string.h> /* for memset */
+#include <linux/serial.h>
+#include <linux/tty.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
+#include <linux/string.h>
+
+#include <asm/processor.h>
+#include <asm/reboot.h>
+#include <asm/smp-ops.h>
+#include <asm/system.h>
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/bootinfo.h>
+#include <asm/sections.h>
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+
+#ifdef CONFIG_CAVIUM_DECODE_RSL
+extern void cvmx_interrupt_rsl_decode(void);
+extern int __cvmx_interrupt_ecc_report_single_bit_errors;
+extern void cvmx_interrupt_rsl_enable(void);
+#endif
+
+extern struct plat_smp_ops octeon_smp_ops;
+
+#ifdef CONFIG_PCI
+extern void pci_console_init(const char *arg);
+#endif
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+extern uint64_t octeon_reserve32_memory;
+#endif
+static unsigned long long MAX_MEMORY = 512ull << 20;
+
+struct octeon_boot_descriptor *octeon_boot_desc_ptr;
+
+struct cvmx_bootinfo *octeon_bootinfo;
+EXPORT_SYMBOL(octeon_bootinfo);
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+uint64_t octeon_reserve32_memory;
+EXPORT_SYMBOL(octeon_reserve32_memory);
+#endif
+
+static int octeon_uart;
+
+extern asmlinkage void handle_int(void);
+extern asmlinkage void plat_irq_dispatch(void);
+
+/**
+ * Return non zero if we are currently running in the Octeon simulator
+ *
+ * Returns
+ */
+int octeon_is_simulation(void)
+{
+ return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
+}
+EXPORT_SYMBOL(octeon_is_simulation);
+
+/**
+ * Return true if Octeon is in PCI Host mode. This means
+ * Linux can control the PCI bus.
+ *
+ * Returns Non zero if Octeon in host mode.
+ */
+int octeon_is_pci_host(void)
+{
+#ifdef CONFIG_PCI
+ return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * Get the clock rate of Octeon
+ *
+ * Returns Clock rate in HZ
+ */
+uint64_t octeon_get_clock_rate(void)
+{
+ if (octeon_is_simulation())
+ octeon_bootinfo->eclock_hz = 6000000;
+ return octeon_bootinfo->eclock_hz;
+}
+EXPORT_SYMBOL(octeon_get_clock_rate);
+
+/**
+ * Write to the LCD display connected to the bootbus. This display
+ * exists on most Cavium evaluation boards. If it doesn't exist, then
+ * this function doesn't do anything.
+ *
+ * @s: String to write
+ */
+void octeon_write_lcd(const char *s)
+{
+ if (octeon_bootinfo->led_display_base_addr) {
+ void __iomem *lcd_address =
+ ioremap_nocache(octeon_bootinfo->led_display_base_addr,
+ 8);
+ int i;
+ for (i = 0; i < 8; i++, s++) {
+ if (*s)
+ iowrite8(*s, lcd_address + i);
+ else
+ iowrite8(' ', lcd_address + i);
+ }
+ iounmap(lcd_address);
+ }
+}
+
+/**
+ * Return the console uart passed by the bootloader
+ *
+ * Returns uart (0 or 1)
+ */
+int octeon_get_boot_uart(void)
+{
+ int uart;
+#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+ uart = 1;
+#else
+ uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
+ 1 : 0;
+#endif
+ return uart;
+}
+
+/**
+ * Get the coremask Linux was booted on.
+ *
+ * Returns Core mask
+ */
+int octeon_get_boot_coremask(void)
+{
+ return octeon_boot_desc_ptr->core_mask;
+}
+
+/**
+ * Check the hardware BIST results for a CPU
+ */
+void octeon_check_cpu_bist(void)
+{
+ const int coreid = cvmx_get_core_num();
+ unsigned long long mask;
+ unsigned long long bist_val;
+
+ /* Check BIST results for COP0 registers */
+ mask = 0x1f00000000ull;
+ bist_val = read_octeon_c0_icacheerr();
+ if (bist_val & mask)
+ pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
+ coreid, bist_val);
+
+ bist_val = read_octeon_c0_dcacheerr();
+ if (bist_val & 1)
+ pr_err("Core%d L1 Dcache parity error: "
+ "CacheErr(dcache) = 0x%llx\n",
+ coreid, bist_val);
+
+ mask = 0xfc00000000000000ull;
+ bist_val = read_c0_cvmmemctl();
+ if (bist_val & mask)
+ pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
+ coreid, bist_val);
+
+ write_octeon_c0_dcacheerr(0);
+}
+
+#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
+/**
+ * Called on every core to setup the wired tlb entry needed
+ * if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set.
+ *
+ */
+static void octeon_hal_setup_per_cpu_reserved32(void *unused)
+{
+ /*
+ * The config has selected to wire the reserve32 memory for all
+ * userspace applications. We need to put a wired TLB entry in for each
+ * 512MB of reserve32 memory. We only handle double 256MB pages here,
+ * so reserve32 must be multiple of 512MB.
+ */
+ uint32_t size = CONFIG_CAVIUM_RESERVE32;
+ uint32_t entrylo0 =
+ 0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6);
+ uint32_t entrylo1 = entrylo0 + (256 << 14);
+ uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20));
+ while (size >= 512) {
+#if 0
+ pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n",
+ smp_processor_id(), entryhi);
+#endif
+ add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M);
+ entrylo0 += 512 << 14;
+ entrylo1 += 512 << 14;
+ entryhi += 512 << 20;
+ size -= 512;
+ }
+}
+#endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */
+
+/**
+ * Called to release the named block which was used to made sure
+ * that nobody used the memory for something else during
+ * init. Now we'll free it so userspace apps can use this
+ * memory region with bootmem_alloc.
+ *
+ * This function is called only once from prom_free_prom_memory().
+ */
+void octeon_hal_setup_reserved32(void)
+{
+#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
+ on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1);
+#endif
+}
+
+/**
+ * Reboot Octeon
+ *
+ * @command: Command to pass to the bootloader. Currently ignored.
+ */
+static void octeon_restart(char *command)
+{
+ /* Disable all watchdogs before soft reset. They don't get cleared */
+#ifdef CONFIG_SMP
+ int cpu;
+ for_each_online_cpu(cpu)
+ cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+#else
+ cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+#endif
+
+ mb();
+ while (1)
+ cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
+}
+
+
+/**
+ * Permanently stop a core.
+ *
+ * @arg: Ignored.
+ */
+static void octeon_kill_core(void *arg)
+{
+ mb();
+ if (octeon_is_simulation()) {
+ /* The simulator needs the watchdog to stop for dead cores */
+ cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+ /* A break instruction causes the simulator stop a core */
+ asm volatile ("sync\nbreak");
+ }
+}
+
+
+/**
+ * Halt the system
+ */
+static void octeon_halt(void)
+{
+ smp_call_function(octeon_kill_core, NULL, 0);
+
+ switch (octeon_bootinfo->board_type) {
+ case CVMX_BOARD_TYPE_NAO38:
+ /* Driving a 1 to GPIO 12 shuts off this board */
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
+ cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
+ break;
+ default:
+ octeon_write_lcd("PowerOff");
+ break;
+ }
+
+ octeon_kill_core(NULL);
+}
+
+#if 0
+/**
+ * Platform time init specifics.
+ * Returns
+ */
+void __init plat_time_init(void)
+{
+ /* Nothing special here, but we are required to have one */
+}
+
+#endif
+
+/**
+ * Handle all the error condition interrupts that might occur.
+ *
+ */
+#ifdef CONFIG_CAVIUM_DECODE_RSL
+static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id)
+{
+ cvmx_interrupt_rsl_decode();
+ return IRQ_HANDLED;
+}
+#endif
+
+/**
+ * Return a string representing the system type
+ *
+ * Returns
+ */
+const char *octeon_board_type_string(void)
+{
+ static char name[80];
+ sprintf(name, "%s (%s)",
+ cvmx_board_type_to_string(octeon_bootinfo->board_type),
+ octeon_model_get_string(read_c0_prid()));
+ return name;
+}
+
+const char *get_system_type(void)
+ __attribute__ ((alias("octeon_board_type_string")));
+
+void octeon_user_io_init(void)
+{
+ union octeon_cvmemctl cvmmemctl;
+ union cvmx_iob_fau_timeout fau_timeout;
+ union cvmx_pow_nw_tim nm_tim;
+ uint64_t cvmctl;
+
+ /* Get the current settings for CP0_CVMMEMCTL_REG */
+ cvmmemctl.u64 = read_c0_cvmmemctl();
+ /* R/W If set, marked write-buffer entries time out the same
+ * as as other entries; if clear, marked write-buffer entries
+ * use the maximum timeout. */
+ cvmmemctl.s.dismarkwblongto = 1;
+ /* R/W If set, a merged store does not clear the write-buffer
+ * entry timeout state. */
+ cvmmemctl.s.dismrgclrwbto = 0;
+ /* R/W Two bits that are the MSBs of the resultant CVMSEG LM
+ * word location for an IOBDMA. The other 8 bits come from the
+ * SCRADDR field of the IOBDMA. */
+ cvmmemctl.s.iobdmascrmsb = 0;
+ /* R/W If set, SYNCWS and SYNCS only order marked stores; if
+ * clear, SYNCWS and SYNCS only order unmarked
+ * stores. SYNCWSMARKED has no effect when DISSYNCWS is
+ * set. */
+ cvmmemctl.s.syncwsmarked = 0;
+ /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
+ cvmmemctl.s.dissyncws = 0;
+ /* R/W If set, no stall happens on write buffer full. */
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
+ cvmmemctl.s.diswbfst = 1;
+ else
+ cvmmemctl.s.diswbfst = 0;
+ /* R/W If set (and SX set), supervisor-level loads/stores can
+ * use XKPHYS addresses with <48>==0 */
+ cvmmemctl.s.xkmemenas = 0;
+
+ /* R/W If set (and UX set), user-level loads/stores can use
+ * XKPHYS addresses with VA<48>==0 */
+ cvmmemctl.s.xkmemenau = 0;
+
+ /* R/W If set (and SX set), supervisor-level loads/stores can
+ * use XKPHYS addresses with VA<48>==1 */
+ cvmmemctl.s.xkioenas = 0;
+
+ /* R/W If set (and UX set), user-level loads/stores can use
+ * XKPHYS addresses with VA<48>==1 */
+ cvmmemctl.s.xkioenau = 0;
+
+ /* R/W If set, all stores act as SYNCW (NOMERGE must be set
+ * when this is set) RW, reset to 0. */
+ cvmmemctl.s.allsyncw = 0;
+
+ /* R/W If set, no stores merge, and all stores reach the
+ * coherent bus in order. */
+ cvmmemctl.s.nomerge = 0;
+ /* R/W Selects the bit in the counter used for DID time-outs 0
+ * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
+ * between 1x and 2x this interval. For example, with
+ * DIDTTO=3, expiration interval is between 16K and 32K. */
+ cvmmemctl.s.didtto = 0;
+ /* R/W If set, the (mem) CSR clock never turns off. */
+ cvmmemctl.s.csrckalwys = 0;
+ /* R/W If set, mclk never turns off. */
+ cvmmemctl.s.mclkalwys = 0;
+ /* R/W Selects the bit in the counter used for write buffer
+ * flush time-outs (WBFLT+11) is the bit position in an
+ * internal counter used to determine expiration. The write
+ * buffer expires between 1x and 2x this interval. For
+ * example, with WBFLT = 0, a write buffer expires between 2K
+ * and 4K cycles after the write buffer entry is allocated. */
+ cvmmemctl.s.wbfltime = 0;
+ /* R/W If set, do not put Istream in the L2 cache. */
+ cvmmemctl.s.istrnol2 = 0;
+ /* R/W The write buffer threshold. */
+ cvmmemctl.s.wbthresh = 10;
+ /* R/W If set, CVMSEG is available for loads/stores in
+ * kernel/debug mode. */
+#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+ cvmmemctl.s.cvmsegenak = 1;
+#else
+ cvmmemctl.s.cvmsegenak = 0;
+#endif
+ /* R/W If set, CVMSEG is available for loads/stores in
+ * supervisor mode. */
+ cvmmemctl.s.cvmsegenas = 0;
+ /* R/W If set, CVMSEG is available for loads/stores in user
+ * mode. */
+ cvmmemctl.s.cvmsegenau = 0;
+ /* R/W Size of local memory in cache blocks, 54 (6912 bytes)
+ * is max legal value. */
+ cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
+
+
+ if (smp_processor_id() == 0)
+ pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
+ CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
+ CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
+
+ write_c0_cvmmemctl(cvmmemctl.u64);
+
+ /* Move the performance counter interrupts to IRQ 6 */
+ cvmctl = read_c0_cvmctl();
+ cvmctl &= ~(7 << 7);
+ cvmctl |= 6 << 7;
+ write_c0_cvmctl(cvmctl);
+
+ /* Set a default for the hardware timeouts */
+ fau_timeout.u64 = 0;
+ fau_timeout.s.tout_val = 0xfff;
+ /* Disable tagwait FAU timeout */
+ fau_timeout.s.tout_enb = 0;
+ cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
+
+ nm_tim.u64 = 0;
+ /* 4096 cycles */
+ nm_tim.s.nw_tim = 3;
+ cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
+
+ write_octeon_c0_icacheerr(0);
+ write_c0_derraddr1(0);
+}
+
+/**
+ * Early entry point for arch setup
+ */
+void __init prom_init(void)
+{
+ struct cvmx_sysinfo *sysinfo;
+ const int coreid = cvmx_get_core_num();
+ int i;
+ int argc;
+ struct uart_port octeon_port;
+#ifdef CONFIG_CAVIUM_RESERVE32
+ int64_t addr = -1;
+#endif
+ /*
+ * The bootloader passes a pointer to the boot descriptor in
+ * $a3, this is available as fw_arg3.
+ */
+ octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
+ octeon_bootinfo =
+ cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
+ cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
+
+ /*
+ * Only enable the LED controller if we're running on a CN38XX, CN58XX,
+ * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
+ */
+ if (!octeon_is_simulation() &&
+ octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
+ cvmx_write_csr(CVMX_LED_EN, 0);
+ cvmx_write_csr(CVMX_LED_PRT, 0);
+ cvmx_write_csr(CVMX_LED_DBG, 0);
+ cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
+ cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
+ cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
+ cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
+ cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
+ cvmx_write_csr(CVMX_LED_EN, 1);
+ }
+#ifdef CONFIG_CAVIUM_RESERVE32
+ /*
+ * We need to temporarily allocate all memory in the reserve32
+ * region. This makes sure the kernel doesn't allocate this
+ * memory when it is getting memory from the
+ * bootloader. Later, after the memory allocations are
+ * complete, the reserve32 will be freed.
+ */
+#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
+ if (CONFIG_CAVIUM_RESERVE32 & 0x1ff)
+ pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. "
+ "This is required if CAVIUM_RESERVE32_USE_WIRED_TLB "
+ "is set\n");
+ else
+ addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
+ 0, 0, 512 << 20,
+ "CAVIUM_RESERVE32", 0);
+#else
+ /*
+ * Allocate memory for RESERVED32 aligned on 2MB boundary. This
+ * is in case we later use hugetlb entries with it.
+ */
+ addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
+ 0, 0, 2 << 20,
+ "CAVIUM_RESERVE32", 0);
+#endif
+ if (addr < 0)
+ pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
+ else
+ octeon_reserve32_memory = addr;
+#endif
+
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
+ if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
+ pr_info("Skipping L2 locking due to reduced L2 cache size\n");
+ } else {
+ uint32_t ebase = read_c0_ebase() & 0x3ffff000;
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
+ /* TLB refill */
+ cvmx_l2c_lock_mem_region(ebase, 0x100);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
+ /* General exception */
+ cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
+ /* Interrupt handler */
+ cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
+ cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
+ cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
+ cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
+#endif
+ }
+#endif
+
+ sysinfo = cvmx_sysinfo_get();
+ memset(sysinfo, 0, sizeof(*sysinfo));
+ sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
+ sysinfo->phy_mem_desc_ptr =
+ cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
+ sysinfo->core_mask = octeon_bootinfo->core_mask;
+ sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
+ sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
+ sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
+ sysinfo->board_type = octeon_bootinfo->board_type;
+ sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
+ sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
+ memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
+ sizeof(sysinfo->mac_addr_base));
+ sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
+ memcpy(sysinfo->board_serial_number,
+ octeon_bootinfo->board_serial_number,
+ sizeof(sysinfo->board_serial_number));
+ sysinfo->compact_flash_common_base_addr =
+ octeon_bootinfo->compact_flash_common_base_addr;
+ sysinfo->compact_flash_attribute_base_addr =
+ octeon_bootinfo->compact_flash_attribute_base_addr;
+ sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
+ sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
+ sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
+
+
+ octeon_check_cpu_bist();
+
+ octeon_uart = octeon_get_boot_uart();
+
+ /*
+ * Disable All CIU Interrupts. The ones we need will be
+ * enabled later. Read the SUM register so we know the write
+ * completed.
+ */
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
+ cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
+
+#ifdef CONFIG_SMP
+ octeon_write_lcd("LinuxSMP");
+#else
+ octeon_write_lcd("Linux");
+#endif
+
+#ifdef CONFIG_CAVIUM_GDB
+ /*
+ * When debugging the linux kernel, force the cores to enter
+ * the debug exception handler to break in.
+ */
+ if (octeon_get_boot_debug_flag()) {
+ cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num());
+ cvmx_read_csr(CVMX_CIU_DINT);
+ }
+#endif
+
+ /*
+ * BIST should always be enabled when doing a soft reset. L2
+ * Cache locking for instance is not cleared unless BIST is
+ * enabled. Unfortunately due to a chip errata G-200 for
+ * Cn38XX and CN31XX, BIST msut be disabled on these parts.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
+ OCTEON_IS_MODEL(OCTEON_CN31XX))
+ cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
+ else
+ cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
+
+ /* Default to 64MB in the simulator to speed things up */
+ if (octeon_is_simulation())
+ MAX_MEMORY = 64ull << 20;
+
+ arcs_cmdline[0] = 0;
+ argc = octeon_boot_desc_ptr->argc;
+ for (i = 0; i < argc; i++) {
+ const char *arg =
+ cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
+ if ((strncmp(arg, "MEM=", 4) == 0) ||
+ (strncmp(arg, "mem=", 4) == 0)) {
+ sscanf(arg + 4, "%llu", &MAX_MEMORY);
+ MAX_MEMORY <<= 20;
+ if (MAX_MEMORY == 0)
+ MAX_MEMORY = 32ull << 30;
+ } else if (strcmp(arg, "ecc_verbose") == 0) {
+#ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
+ __cvmx_interrupt_ecc_report_single_bit_errors = 1;
+ pr_notice("Reporting of single bit ECC errors is "
+ "turned on\n");
+#endif
+ } else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
+ sizeof(arcs_cmdline) - 1) {
+ strcat(arcs_cmdline, " ");
+ strcat(arcs_cmdline, arg);
+ }
+ }
+
+ if (strstr(arcs_cmdline, "console=") == NULL) {
+#ifdef CONFIG_GDB_CONSOLE
+ strcat(arcs_cmdline, " console=gdb");
+#else
+#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+ strcat(arcs_cmdline, " console=ttyS0,115200");
+#else
+ if (octeon_uart == 1)
+ strcat(arcs_cmdline, " console=ttyS1,115200");
+ else
+ strcat(arcs_cmdline, " console=ttyS0,115200");
+#endif
+#endif
+ }
+
+ if (octeon_is_simulation()) {
+ /*
+ * The simulator uses a mtdram device pre filled with
+ * the filesystem. Also specify the calibration delay
+ * to avoid calculating it every time.
+ */
+ strcat(arcs_cmdline, " rw root=1f00"
+ " lpj=60176 slram=root,0x40000000,+1073741824");
+ }
+
+ mips_hpt_frequency = octeon_get_clock_rate();
+
+ octeon_init_cvmcount();
+
+ _machine_restart = octeon_restart;
+ _machine_halt = octeon_halt;
+
+ memset(&octeon_port, 0, sizeof(octeon_port));
+ /*
+ * For early_serial_setup we don't set the port type or
+ * UPF_FIXED_TYPE.
+ */
+ octeon_port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ;
+ octeon_port.iotype = UPIO_MEM;
+ /* I/O addresses are every 8 bytes */
+ octeon_port.regshift = 3;
+ /* Clock rate of the chip */
+ octeon_port.uartclk = mips_hpt_frequency;
+ octeon_port.fifosize = 64;
+ octeon_port.mapbase = 0x0001180000000800ull + (1024 * octeon_uart);
+ octeon_port.membase = cvmx_phys_to_ptr(octeon_port.mapbase);
+ octeon_port.serial_in = octeon_serial_in;
+ octeon_port.serial_out = octeon_serial_out;
+#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+ octeon_port.line = 0;
+#else
+ octeon_port.line = octeon_uart;
+#endif
+ octeon_port.irq = 42 + octeon_uart;
+ early_serial_setup(&octeon_port);
+
+ octeon_user_io_init();
+ register_smp_ops(&octeon_smp_ops);
+}
+
+void __init plat_mem_setup(void)
+{
+ uint64_t mem_alloc_size;
+ uint64_t total;
+ int64_t memory;
+
+ total = 0;
+
+ /* First add the init memory we will be returning. */
+ memory = __pa_symbol(&__init_begin) & PAGE_MASK;
+ mem_alloc_size = (__pa_symbol(&__init_end) & PAGE_MASK) - memory;
+ if (mem_alloc_size > 0) {
+ add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
+ total += mem_alloc_size;
+ }
+
+ /*
+ * The Mips memory init uses the first memory location for
+ * some memory vectors. When SPARSEMEM is in use, it doesn't
+ * verify that the size is big enough for the final
+ * vectors. Making the smallest chuck 4MB seems to be enough
+ * to consistantly work.
+ */
+ mem_alloc_size = 4 << 20;
+ if (mem_alloc_size > MAX_MEMORY)
+ mem_alloc_size = MAX_MEMORY;
+
+ /*
+ * When allocating memory, we want incrementing addresses from
+ * bootmem_alloc so the code in add_memory_region can merge
+ * regions next to each other.
+ */
+ cvmx_bootmem_lock();
+ while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
+ && (total < MAX_MEMORY)) {
+#if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR)
+ memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
+ __pa_symbol(&__init_end), -1,
+ 0x100000,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+#elif defined(CONFIG_HIGHMEM)
+ memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 1ull << 31,
+ 0x100000,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+#else
+ memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 512 << 20,
+ 0x100000,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+#endif
+ if (memory >= 0) {
+ /*
+ * This function automatically merges address
+ * regions next to each other if they are
+ * received in incrementing order.
+ */
+ add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
+ total += mem_alloc_size;
+ } else {
+ break;
+ }
+ }
+ cvmx_bootmem_unlock();
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+ /*
+ * Now that we've allocated the kernel memory it is safe to
+ * free the reserved region. We free it here so that builtin
+ * drivers can use the memory.
+ */
+ if (octeon_reserve32_memory)
+ cvmx_bootmem_free_named("CAVIUM_RESERVE32");
+#endif /* CONFIG_CAVIUM_RESERVE32 */
+
+ if (total == 0)
+ panic("Unable to allocate memory from "
+ "cvmx_bootmem_phy_alloc\n");
+}
+
+
+int prom_putchar(char c)
+{
+ uint64_t lsrval;
+
+ /* Spin until there is room */
+ do {
+ lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
+ } while ((lsrval & 0x20) == 0);
+
+ /* Write the byte */
+ cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c);
+ return 1;
+}
+
+void prom_free_prom_memory(void)
+{
+#ifdef CONFIG_CAVIUM_DECODE_RSL
+ cvmx_interrupt_rsl_enable();
+
+ /* Add an interrupt handler for general failures. */
+ if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED,
+ "RML/RSL", octeon_rlm_interrupt)) {
+ panic("Unable to request_irq(OCTEON_IRQ_RML)\n");
+ }
+#endif
+
+ /* This call is here so that it is performed after any TLB
+ initializations. It needs to be after these in case the
+ CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */
+ octeon_hal_setup_reserved32();
+}
+
+static struct octeon_cf_data octeon_cf_data;
+
+static int __init octeon_cf_device_init(void)
+{
+ union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
+ unsigned long base_ptr, region_base, region_size;
+ struct platform_device *pd;
+ struct resource cf_resources[3];
+ unsigned int num_resources;
+ int i;
+ int ret = 0;
+
+ /* Setup octeon-cf platform device if present. */
+ base_ptr = 0;
+ if (octeon_bootinfo->major_version == 1
+ && octeon_bootinfo->minor_version >= 1) {
+ if (octeon_bootinfo->compact_flash_common_base_addr)
+ base_ptr =
+ octeon_bootinfo->compact_flash_common_base_addr;
+ } else {
+ base_ptr = 0x1d000800;
+ }
+
+ if (!base_ptr)
+ return ret;
+
+ /* Find CS0 region. */
+ for (i = 0; i < 8; i++) {
+ mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i));
+ region_base = mio_boot_reg_cfg.s.base << 16;
+ region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+ if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
+ && base_ptr < region_base + region_size)
+ break;
+ }
+ if (i >= 7) {
+ /* i and i + 1 are CS0 and CS1, both must be less than 8. */
+ goto out;
+ }
+ octeon_cf_data.base_region = i;
+ octeon_cf_data.is16bit = mio_boot_reg_cfg.s.width;
+ octeon_cf_data.base_region_bias = base_ptr - region_base;
+ memset(cf_resources, 0, sizeof(cf_resources));
+ num_resources = 0;
+ cf_resources[num_resources].flags = IORESOURCE_MEM;
+ cf_resources[num_resources].start = region_base;
+ cf_resources[num_resources].end = region_base + region_size - 1;
+ num_resources++;
+
+
+ if (!(base_ptr & 0xfffful)) {
+ /*
+ * Boot loader signals availability of DMA (true_ide
+ * mode) by setting low order bits of base_ptr to
+ * zero.
+ */
+
+ /* Asume that CS1 immediately follows. */
+ mio_boot_reg_cfg.u64 =
+ cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1));
+ region_base = mio_boot_reg_cfg.s.base << 16;
+ region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+ if (!mio_boot_reg_cfg.s.en)
+ goto out;
+
+ cf_resources[num_resources].flags = IORESOURCE_MEM;
+ cf_resources[num_resources].start = region_base;
+ cf_resources[num_resources].end = region_base + region_size - 1;
+ num_resources++;
+
+ octeon_cf_data.dma_engine = 0;
+ cf_resources[num_resources].flags = IORESOURCE_IRQ;
+ cf_resources[num_resources].start = OCTEON_IRQ_BOOTDMA;
+ cf_resources[num_resources].end = OCTEON_IRQ_BOOTDMA;
+ num_resources++;
+ } else {
+ octeon_cf_data.dma_engine = -1;
+ }
+
+ pd = platform_device_alloc("pata_octeon_cf", -1);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pd->dev.platform_data = &octeon_cf_data;
+
+ ret = platform_device_add_resources(pd, cf_resources, num_resources);
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto fail;
+
+ return ret;
+fail:
+ platform_device_put(pd);
+out:
+ return ret;
+}
+device_initcall(octeon_cf_device_init);
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
new file mode 100644
index 000000000000..24e0ad63980a
--- /dev/null
+++ b/arch/mips/cavium-octeon/smp.c
@@ -0,0 +1,211 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Cavium Networks
+ */
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+
+volatile unsigned long octeon_processor_boot = 0xff;
+volatile unsigned long octeon_processor_sp;
+volatile unsigned long octeon_processor_gp;
+
+static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
+{
+ const int coreid = cvmx_get_core_num();
+ uint64_t action;
+
+ /* Load the mailbox register to figure out what we're supposed to do */
+ action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid));
+
+ /* Clear the mailbox to clear the interrupt */
+ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
+
+ if (action & SMP_CALL_FUNCTION)
+ smp_call_function_interrupt();
+
+ /* Check if we've been told to flush the icache */
+ if (action & SMP_ICACHE_FLUSH)
+ asm volatile ("synci 0($0)\n");
+ return IRQ_HANDLED;
+}
+
+/**
+ * Cause the function described by call_data to be executed on the passed
+ * cpu. When the function has finished, increment the finished field of
+ * call_data.
+ */
+void octeon_send_ipi_single(int cpu, unsigned int action)
+{
+ int coreid = cpu_logical_map(cpu);
+ /*
+ pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
+ coreid, action);
+ */
+ cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
+}
+
+static inline void octeon_send_ipi_mask(cpumask_t mask, unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu_mask(i, mask)
+ octeon_send_ipi_single(i, action);
+}
+
+/**
+ * Detect available CPUs, populate phys_cpu_present_map
+ */
+static void octeon_smp_setup(void)
+{
+ const int coreid = cvmx_get_core_num();
+ int cpus;
+ int id;
+
+ int core_mask = octeon_get_boot_coremask();
+
+ cpus_clear(cpu_possible_map);
+ __cpu_number_map[coreid] = 0;
+ __cpu_logical_map[0] = coreid;
+ cpu_set(0, cpu_possible_map);
+
+ cpus = 1;
+ for (id = 0; id < 16; id++) {
+ if ((id != coreid) && (core_mask & (1 << id))) {
+ cpu_set(cpus, cpu_possible_map);
+ __cpu_number_map[id] = cpus;
+ __cpu_logical_map[cpus] = id;
+ cpus++;
+ }
+ }
+}
+
+/**
+ * Firmware CPU startup hook
+ *
+ */
+static void octeon_boot_secondary(int cpu, struct task_struct *idle)
+{
+ int count;
+
+ pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
+ cpu_logical_map(cpu));
+
+ octeon_processor_sp = __KSTK_TOS(idle);
+ octeon_processor_gp = (unsigned long)(task_thread_info(idle));
+ octeon_processor_boot = cpu_logical_map(cpu);
+ mb();
+
+ count = 10000;
+ while (octeon_processor_sp && count) {
+ /* Waiting for processor to get the SP and GP */
+ udelay(1);
+ count--;
+ }
+ if (count == 0)
+ pr_err("Secondary boot timeout\n");
+}
+
+/**
+ * After we've done initial boot, this function is called to allow the
+ * board code to clean up state, if needed
+ */
+static void octeon_init_secondary(void)
+{
+ const int coreid = cvmx_get_core_num();
+ union cvmx_ciu_intx_sum0 interrupt_enable;
+
+ octeon_check_cpu_bist();
+ octeon_init_cvmcount();
+ /*
+ pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid);
+ */
+ /* Enable Mailbox interrupts to this core. These are the only
+ interrupts allowed on line 3 */
+ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff);
+ interrupt_enable.u64 = 0;
+ interrupt_enable.s.mbox = 0x3;
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
+ /* Enable core interrupt processing for 2,3 and 7 */
+ set_c0_status(0x8c01);
+}
+
+/**
+ * Callout to firmware before smp_init
+ *
+ */
+void octeon_prepare_cpus(unsigned int max_cpus)
+{
+ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff);
+ if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_SHARED,
+ "mailbox0", mailbox_interrupt)) {
+ panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
+ }
+ if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_SHARED,
+ "mailbox1", mailbox_interrupt)) {
+ panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n");
+ }
+}
+
+/**
+ * Last chance for the board code to finish SMP initialization before
+ * the CPU is "online".
+ */
+static void octeon_smp_finish(void)
+{
+#ifdef CONFIG_CAVIUM_GDB
+ unsigned long tmp;
+ /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
+ to be not masked by this core so we know the signal is received by
+ someone */
+ asm volatile ("dmfc0 %0, $22\n"
+ "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
+#endif
+
+ octeon_user_io_init();
+
+ /* to generate the first CPU timer interrupt */
+ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+}
+
+/**
+ * Hook for after all CPUs are online
+ */
+static void octeon_cpus_done(void)
+{
+#ifdef CONFIG_CAVIUM_GDB
+ unsigned long tmp;
+ /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
+ to be not masked by this core so we know the signal is received by
+ someone */
+ asm volatile ("dmfc0 %0, $22\n"
+ "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
+#endif
+}
+
+struct plat_smp_ops octeon_smp_ops = {
+ .send_ipi_single = octeon_send_ipi_single,
+ .send_ipi_mask = octeon_send_ipi_mask,
+ .init_secondary = octeon_init_secondary,
+ .smp_finish = octeon_smp_finish,
+ .cpus_done = octeon_cpus_done,
+ .boot_secondary = octeon_boot_secondary,
+ .smp_setup = octeon_smp_setup,
+ .prepare_cpus = octeon_prepare_cpus,
+};