summaryrefslogtreecommitdiff
path: root/arch/mips/include/asm/octeon
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-03-02 19:44:16 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-03-02 19:44:16 +0400
commitaebb2afd5420c860b7fbc3882a323ef1247fbf16 (patch)
tree05ee0efcebca5ec421de44de7a6d6271088c64a8 /arch/mips/include/asm/octeon
parent8eae508b7c6ff502a71d0293b69e97c5505d5840 (diff)
parentedb15d83a875a1f4b1576188844db5c330c3267d (diff)
downloadlinux-aebb2afd5420c860b7fbc3882a323ef1247fbf16.tar.xz
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle: o Add basic support for the Mediatek/Ralink Wireless SoC family. o The Qualcomm Atheros platform is extended by support for the new QCA955X SoC series as well as a bunch of patches that get the code ready for OF support. o Lantiq and BCM47XX platform have a few improvements and bug fixes. o MIPS has sent a few patches that get the kernel ready for the upcoming microMIPS support. o The rest of the series is made up of small bug fixes and cleanups that relate to various parts of the MIPS code. The biggy in there is a whitespace cleanup. After I was sent another set of whitespace cleanup patches I decided it was the time to clean the whitespace "issues" for once and and that touches many files below arch/mips/. Fix up silly conflicts, mostly due to whitespace cleanups. * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (105 commits) MIPS: Quit exporting kernel internel break codes to uapi/asm/break.h MIPS: remove broken conditional inside vpe loader code MIPS: SMTC: fix implicit declaration of set_vi_handler MIPS: early_printk: drop __init annotations MIPS: Probe for and report hardware virtualization support. MIPS: ath79: add support for the Qualcomm Atheros AP136-010 board MIPS: ath79: add USB controller registration code for the QCA955X SoCs MIPS: ath79: add PCI controller registration code for the QCA955X SoCs MIPS: ath79: add WMAC registration code for the QCA955X SoCs MIPS: ath79: register UART for the QCA955X SoCs MIPS: ath79: add QCA955X specific glue to ath79_device_reset_{set, clear} MIPS: ath79: add GPIO setup code for the QCA955X SoCs MIPS: ath79: add IRQ handling code for the QCA955X SoCs MIPS: ath79: add clock setup code for the QCA955X SoCs MIPS: ath79: add SoC detection code for the QCA955X SoCs MIPS: ath79: add early printk support for the QCA955X SoCs MIPS: ath79: fix WMAC IRQ resource assignment mips: reserve elfcorehdr mips: Make sure kernel memory is in iomem MIPS: ath79: use dynamically allocated USB platform devices ...
Diffstat (limited to 'arch/mips/include/asm/octeon')
-rw-r--r--arch/mips/include/asm/octeon/cvmx-address.h50
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootinfo.h14
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootmem.h72
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h44
-rw-r--r--arch/mips/include/asm/octeon/cvmx-config.h30
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fau.h162
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fpa.h32
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-board.h16
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-rgmii.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-sgmii.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-util.h18
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-xaui.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper.h12
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd.h14
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c.h210
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mdio.h40
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pip-defs.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pip.h62
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pko.h60
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow.h122
-rw-r--r--arch/mips/include/asm/octeon/cvmx-scratch.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spi.h66
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spinlock.h78
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sysinfo.h16
-rw-r--r--arch/mips/include/asm/octeon/cvmx-wqe.h104
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h48
-rw-r--r--arch/mips/include/asm/octeon/octeon-feature.h10
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h240
-rw-r--r--arch/mips/include/asm/octeon/octeon.h12
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h2
30 files changed, 775 insertions, 775 deletions
diff --git a/arch/mips/include/asm/octeon/cvmx-address.h b/arch/mips/include/asm/octeon/cvmx-address.h
index 3c74d826e2e6..e2d874e681f6 100644
--- a/arch/mips/include/asm/octeon/cvmx-address.h
+++ b/arch/mips/include/asm/octeon/cvmx-address.h
@@ -84,20 +84,20 @@ typedef enum {
* Octeon-I HW never interprets this X (<39:36> reserved
* for future expansion), software should set to 0.
*
- * - 0x0 XXX0 0000 0000 to DRAM Cached
+ * - 0x0 XXX0 0000 0000 to DRAM Cached
* - 0x0 XXX0 0FFF FFFF
*
- * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
- * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
+ * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
+ * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
*
- * - 0x0 XXX0 2000 0000 to DRAM Cached
+ * - 0x0 XXX0 2000 0000 to DRAM Cached
* - 0x0 XXXF FFFF FFFF
*
- * - 0x1 00X0 0000 0000 to Boot Bus Uncached
+ * - 0x1 00X0 0000 0000 to Boot Bus Uncached
* - 0x1 00XF FFFF FFFF
*
- * - 0x1 01X0 0000 0000 to Other NCB Uncached
- * - 0x1 FFXF FFFF FFFF devices
+ * - 0x1 01X0 0000 0000 to Other NCB Uncached
+ * - 0x1 FFXF FFFF FFFF devices
*
* Decode of all Octeon addresses
*/
@@ -129,9 +129,9 @@ typedef union {
*/
struct {
uint64_t R:2; /* CVMX_MIPS_SPACE_XKPHYS in this case */
- uint64_t cca:3; /* ignored by octeon */
+ uint64_t cca:3; /* ignored by octeon */
uint64_t mbz:10;
- uint64_t pa:49; /* physical address */
+ uint64_t pa:49; /* physical address */
} sxkphys;
/* physical address */
@@ -253,22 +253,22 @@ typedef union {
#define CVMX_OCT_DID_ASX1 23ULL
#define CVMX_OCT_DID_IOB 30ULL
-#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
-#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
-#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
-#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
-#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
+#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
+#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
+#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
+#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
+#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL)
-#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
-#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
-#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
-#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
-#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
-#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
-#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
-#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
-#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
-#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
-#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
+#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
+#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
+#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
+#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
+#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
+#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
+#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
+#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
+#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
+#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
+#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
#endif /* __CVMX_ADDRESS_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index 1db1dc2724cb..284fa8d773ba 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -91,11 +91,11 @@ struct cvmx_bootinfo {
#if (CVMX_BOOTINFO_MIN_VER >= 1)
/*
* Several boards support compact flash on the Octeon boot
- * bus. The CF memory spaces may be mapped to different
+ * bus. The CF memory spaces may be mapped to different
* addresses on different boards. These are the physical
* addresses, so care must be taken to use the correct
* XKPHYS/KSEG0 addressing depending on the application's
- * ABI. These values will be 0 if CF is not present.
+ * ABI. These values will be 0 if CF is not present.
*/
uint64_t compact_flash_common_base_addr;
uint64_t compact_flash_attribute_base_addr;
@@ -131,7 +131,7 @@ struct cvmx_bootinfo {
#define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC (1ull << 3)
/* This flag is set if the TLB mappings are not contained in the
* 0x10000000 - 0x20000000 boot bus region. */
-#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
+#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
#define CVMX_BOOTINFO_CFG_FLAG_BREAK (1ull << 5)
#endif /* (CVMX_BOOTINFO_MAJ_VER == 1) */
@@ -164,9 +164,9 @@ enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_EBT5600 = 22,
CVMX_BOARD_TYPE_EBH5201 = 23,
CVMX_BOARD_TYPE_EBT5200 = 24,
- CVMX_BOARD_TYPE_CB5600 = 25,
- CVMX_BOARD_TYPE_CB5601 = 26,
- CVMX_BOARD_TYPE_CB5200 = 27,
+ CVMX_BOARD_TYPE_CB5600 = 25,
+ CVMX_BOARD_TYPE_CB5601 = 26,
+ CVMX_BOARD_TYPE_CB5200 = 27,
/* Special 'generic' board type, supports many boards */
CVMX_BOARD_TYPE_GENERIC = 28,
CVMX_BOARD_TYPE_EBH5610 = 29,
@@ -223,7 +223,7 @@ enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
/*
- * Set aside a range for customer private use. The SDK won't
+ * Set aside a range for customer private use. The SDK won't
* use any numbers in this range.
*/
CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h
index 42db2be663f1..352f1dc2508b 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootmem.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h
@@ -39,7 +39,7 @@
#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
/* minimum alignment of bootmem alloced blocks */
-#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
/* Flags for cvmx_bootmem_phy_mem* functions */
/* Allocate from end of block instead of beginning */
@@ -151,8 +151,8 @@ extern void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment);
* memory cannot be allocated at the specified address.
*
* @size: Size in bytes of block to allocate
- * @address: Physical address to allocate memory at. If this memory is not
- * available, the allocation fails.
+ * @address: Physical address to allocate memory at. If this memory is not
+ * available, the allocation fails.
* @alignment: Alignment required - must be power of 2
* Returns pointer to block of memory, NULL on error
*/
@@ -181,7 +181,7 @@ extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
* @name: name of block to free
*
* Returns 0 on failure,
- * !0 on success
+ * !0 on success
*/
@@ -210,9 +210,9 @@ extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment,
*
* @size: Size in bytes of block to allocate
* @address: Physical address to allocate memory at. If this
- * memory is not available, the allocation fails.
+ * memory is not available, the allocation fails.
* @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN
- * bytes
+ * bytes
*
* Returns a pointer to block of memory, NULL on error
*/
@@ -249,7 +249,7 @@ extern int cvmx_bootmem_free_named(char *name);
* @name: name of block to free
*
* Returns pointer to named block descriptor on success
- * 0 on failure
+ * 0 on failure
*/
struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name);
@@ -258,20 +258,20 @@ struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name);
* (optional) requested address and alignment.
*
* @req_size: size of region to allocate. All requests are rounded up
- * to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ * to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
*
* @address_min: Minimum address that block can occupy.
*
* @address_max: Specifies the maximum address_min (inclusive) that
- * the allocation can use.
+ * the allocation can use.
*
* @alignment: Requested alignment of the block. If this alignment
- * cannot be met, the allocation fails. This must be a
- * power of 2. (Note: Alignment of
- * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
- * internally enforced. Requested alignments of less than
- * CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
- * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * cannot be met, the allocation fails. This must be a
+ * power of 2. (Note: Alignment of
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ * internally enforced. Requested alignments of less than
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
*
* @flags: Flags to control options for the allocation.
*
@@ -285,21 +285,21 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
* Allocates a named block of physical memory from the free list, at
* (optional) requested address and alignment.
*
- * @param size size of region to allocate. All requests are rounded
- * up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE
- * bytes size
+ * @param size size of region to allocate. All requests are rounded
+ * up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE
+ * bytes size
* @param min_addr Minimum address that block can occupy.
* @param max_addr Specifies the maximum address_min (inclusive) that
- * the allocation can use.
+ * the allocation can use.
* @param alignment Requested alignment of the block. If this
- * alignment cannot be met, the allocation fails.
- * This must be a power of 2. (Note: Alignment of
- * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
- * internally enforced. Requested alignments of less
- * than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
- * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
- * @param name name to assign to named block
- * @param flags Flags to control options for the allocation.
+ * alignment cannot be met, the allocation fails.
+ * This must be a power of 2. (Note: Alignment of
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ * internally enforced. Requested alignments of less
+ * than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * @param name name to assign to named block
+ * @param flags Flags to control options for the allocation.
*
* @return physical address of block allocated, or -1 on failure
*/
@@ -312,14 +312,14 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
* Finds a named memory block by name.
* Also used for finding an unused entry in the named block table.
*
- * @name: Name of memory block to find. If NULL pointer given, then
- * finds unused descriptor, if available.
+ * @name: Name of memory block to find. If NULL pointer given, then
+ * finds unused descriptor, if available.
*
* @flags: Flags to control options for the allocation.
*
* Returns Pointer to memory block descriptor, NULL if not found.
- * If NULL returned when name parameter is NULL, then no memory
- * block descriptors are available.
+ * If NULL returned when name parameter is NULL, then no memory
+ * block descriptors are available.
*/
struct cvmx_bootmem_named_block_desc *
cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
@@ -331,31 +331,31 @@ cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
* @flags: flags for passing options
*
* Returns 0 on failure
- * 1 on success
+ * 1 on success
*/
int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags);
/**
- * Frees a block to the bootmem allocator list. This must
+ * Frees a block to the bootmem allocator list. This must
* be used with care, as the size provided must match the size
* of the block that was allocated, or the list will become
* corrupted.
*
* IMPORTANT: This is only intended to be used as part of named block
* frees and initial population of the free memory list.
- * *
+ * *
*
* @phy_addr: physical address of block
* @size: size of block in bytes.
* @flags: flags for passing options
*
* Returns 1 on success,
- * 0 on failure
+ * 0 on failure
*/
int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags);
/**
- * Locks the bootmem allocator. This is useful in certain situations
+ * Locks the bootmem allocator. This is useful in certain situations
* where multiple allocations must be made without being interrupted.
* This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
*
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index fed91125317f..024a71b2bff9 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -244,33 +244,33 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
".set noreorder\n"
"1:\n"
/* Atomic add one to ticket_ptr */
- "ll %[my_ticket], %[ticket_ptr]\n"
+ "ll %[my_ticket], %[ticket_ptr]\n"
/* and store the original value */
- "li %[ticket], 1\n"
+ "li %[ticket], 1\n"
/* in my_ticket */
- "baddu %[ticket], %[my_ticket]\n"
- "sc %[ticket], %[ticket_ptr]\n"
- "beqz %[ticket], 1b\n"
+ "baddu %[ticket], %[my_ticket]\n"
+ "sc %[ticket], %[ticket_ptr]\n"
+ "beqz %[ticket], 1b\n"
" nop\n"
/* Load the current now_serving ticket */
- "lbu %[ticket], %[now_serving]\n"
+ "lbu %[ticket], %[now_serving]\n"
"2:\n"
/* Jump out if now_serving == my_ticket */
- "beq %[ticket], %[my_ticket], 4f\n"
+ "beq %[ticket], %[my_ticket], 4f\n"
/* Find out how many tickets are in front of me */
- " subu %[ticket], %[my_ticket], %[ticket]\n"
+ " subu %[ticket], %[my_ticket], %[ticket]\n"
/* Use tickets in front of me minus one to delay */
"subu %[ticket], 1\n"
/* Delay will be ((tickets in front)-1)*32 loops */
- "cins %[ticket], %[ticket], 5, 7\n"
+ "cins %[ticket], %[ticket], 5, 7\n"
"3:\n"
/* Loop here until our ticket might be up */
- "bnez %[ticket], 3b\n"
- " subu %[ticket], 1\n"
+ "bnez %[ticket], 3b\n"
+ " subu %[ticket], 1\n"
/* Jump back up to check out ticket again */
- "b 2b\n"
+ "b 2b\n"
/* Load the current now_serving ticket */
- " lbu %[ticket], %[now_serving]\n"
+ " lbu %[ticket], %[now_serving]\n"
"4:\n"
".set pop\n" :
[ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
@@ -313,9 +313,9 @@ static inline __cvmx_cmd_queue_state_t
*
* @queue_id: Hardware command queue to write to
* @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
* @cmd_count: Number of command words to write
* @cmds: Array of commands to write
*
@@ -411,9 +411,9 @@ static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t
*
* @queue_id: Hardware command queue to write to
* @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
* @cmd1: Command
* @cmd2: Command
*
@@ -510,9 +510,9 @@ static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t
*
* @queue_id: Hardware command queue to write to
* @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
* @cmd1: Command
* @cmd2: Command
* @cmd3: Command
diff --git a/arch/mips/include/asm/octeon/cvmx-config.h b/arch/mips/include/asm/octeon/cvmx-config.h
index 26835d1b43b8..f7dd17d0dc22 100644
--- a/arch/mips/include/asm/octeon/cvmx-config.h
+++ b/arch/mips/include/asm/octeon/cvmx-config.h
@@ -31,13 +31,13 @@
/* Pools in use */
/* Packet buffers */
-#define CVMX_FPA_PACKET_POOL (0)
-#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
+#define CVMX_FPA_PACKET_POOL (0)
+#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
/* Work queue entrys */
-#define CVMX_FPA_WQE_POOL (1)
-#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
+#define CVMX_FPA_WQE_POOL (1)
+#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
/* PKO queue command buffers */
-#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE
/************************* FAU allocation ********************************/
@@ -45,7 +45,7 @@
* in order of descending size so that all alignment constraints are
* automatically met. The enums are linked so that the following enum
* continues allocating where the previous one left off, so the
- * numbering within each enum always starts with zero. The macros
+ * numbering within each enum always starts with zero. The macros
* take care of the address increment size, so the values entered
* always increase by 1. FAU registers are accessed with byte
* addresses.
@@ -90,9 +90,9 @@ typedef enum {
* be taken into account.
*/
/* Generic scratch iobdma area */
-#define CVMX_SCR_SCRATCH (0)
+#define CVMX_SCR_SCRATCH (0)
/* First location available after cvmx-config.h allocated region. */
-#define CVMX_SCR_REG_AVAIL_BASE (8)
+#define CVMX_SCR_REG_AVAIL_BASE (8)
/*
* CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve
@@ -145,14 +145,14 @@ typedef enum {
* 1: include
*/
#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0
#define CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL 0
#define CVMX_HELPER_INPUT_TAG_INPUT_PORT 1
diff --git a/arch/mips/include/asm/octeon/cvmx-fau.h b/arch/mips/include/asm/octeon/cvmx-fau.h
index a6939fc8ba18..ef98f7fc102f 100644
--- a/arch/mips/include/asm/octeon/cvmx-fau.h
+++ b/arch/mips/include/asm/octeon/cvmx-fau.h
@@ -37,13 +37,13 @@
*/
#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
-#define CVMX_FAU_BITS_SCRADDR 63, 56
-#define CVMX_FAU_BITS_LEN 55, 48
-#define CVMX_FAU_BITS_INEVAL 35, 14
-#define CVMX_FAU_BITS_TAGWAIT 13, 13
-#define CVMX_FAU_BITS_NOADD 13, 13
-#define CVMX_FAU_BITS_SIZE 12, 11
-#define CVMX_FAU_BITS_REGISTER 10, 0
+#define CVMX_FAU_BITS_SCRADDR 63, 56
+#define CVMX_FAU_BITS_LEN 55, 48
+#define CVMX_FAU_BITS_INEVAL 35, 14
+#define CVMX_FAU_BITS_TAGWAIT 13, 13
+#define CVMX_FAU_BITS_NOADD 13, 13
+#define CVMX_FAU_BITS_SIZE 12, 11
+#define CVMX_FAU_BITS_REGISTER 10, 0
typedef enum {
CVMX_FAU_OP_SIZE_8 = 0,
@@ -109,11 +109,11 @@ typedef union {
* Builds a store I/O address for writing to the FAU
*
* @noadd: 0 = Store value is atomically added to the current value
- * 1 = Store value is atomically written over the current value
+ * 1 = Store value is atomically written over the current value
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
* Returns Address to store for atomic update
*/
static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
@@ -127,16 +127,16 @@ static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
* Builds a I/O address for accessing the FAU
*
* @tagwait: Should the atomic add wait for the current tag switch
- * operation to complete.
- * - 0 = Don't wait
- * - 1 = Wait for tag switch to complete
+ * operation to complete.
+ * - 0 = Don't wait
+ * - 1 = Wait for tag switch to complete
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: When performing 32 and 64 bit access, only the low
- * 22 bits are available.
+ * Note: When performing 32 and 64 bit access, only the low
+ * 22 bits are available.
* Returns Address to read from for atomic update
*/
static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
@@ -152,9 +152,9 @@ static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
* Perform an atomic 64 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Value of the register before the update
*/
static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
@@ -167,9 +167,9 @@ static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
* Perform an atomic 32 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Value of the register before the update
*/
static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
@@ -182,7 +182,7 @@ static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
* Perform an atomic 16 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
* Returns Value of the register before the update
*/
@@ -209,12 +209,12 @@ static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
+ * the value of the register before the update will be
+ * returned
*/
static inline cvmx_fau_tagwait64_t
cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
@@ -233,12 +233,12 @@ cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
+ * the value of the register before the update will be
+ * returned
*/
static inline cvmx_fau_tagwait32_t
cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
@@ -257,11 +257,11 @@ cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
* Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
+ * the value of the register before the update will be
+ * returned
*/
static inline cvmx_fau_tagwait16_t
cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
@@ -282,8 +282,8 @@ cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
* Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
+ * the value of the register before the update will be
+ * returned
*/
static inline cvmx_fau_tagwait8_t
cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
@@ -301,21 +301,21 @@ cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
*
* @scraddr: Scratch pad byte address to write to. Must be 8 byte aligned
* @value: Signed value to add.
- * Note: When performing 32 and 64 bit access, only the low
- * 22 bits are available.
+ * Note: When performing 32 and 64 bit access, only the low
+ * 22 bits are available.
* @tagwait: Should the atomic add wait for the current tag switch
- * operation to complete.
- * - 0 = Don't wait
- * - 1 = Wait for tag switch to complete
+ * operation to complete.
+ * - 0 = Don't wait
+ * - 1 = Wait for tag switch to complete
* @size: The size of the operation:
- * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
- * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
- * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
- * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
+ * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
+ * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
+ * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
+ * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
* Returns Data to write using cvmx_send_single
*/
static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
@@ -337,11 +337,11 @@ static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
+ * Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
@@ -357,11 +357,11 @@ static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
+ * Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
@@ -377,9 +377,9 @@ static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
+ * Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
* Returns Placed in the scratch pad register
*/
@@ -396,7 +396,7 @@ static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
+ * Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
* Returns Placed in the scratch pad register
@@ -414,14 +414,14 @@ static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
@@ -437,14 +437,14 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
@@ -460,12 +460,12 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
*
* Returns Placed in the scratch pad register
@@ -483,9 +483,9 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
@@ -504,7 +504,7 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
* Perform an atomic 64 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
@@ -516,7 +516,7 @@ static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
* Perform an atomic 32 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
@@ -528,7 +528,7 @@ static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
* Perform an atomic 16 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
@@ -551,7 +551,7 @@ static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
* Perform an atomic 64 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
@@ -563,7 +563,7 @@ static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
* Perform an atomic 32 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
@@ -575,7 +575,7 @@ static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
* Perform an atomic 16 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
diff --git a/arch/mips/include/asm/octeon/cvmx-fpa.h b/arch/mips/include/asm/octeon/cvmx-fpa.h
index 541a1ae02b6f..aa26a2ce5a0e 100644
--- a/arch/mips/include/asm/octeon/cvmx-fpa.h
+++ b/arch/mips/include/asm/octeon/cvmx-fpa.h
@@ -39,9 +39,9 @@
#include <asm/octeon/cvmx-address.h>
#include <asm/octeon/cvmx-fpa-defs.h>
-#define CVMX_FPA_NUM_POOLS 8
+#define CVMX_FPA_NUM_POOLS 8
#define CVMX_FPA_MIN_BLOCK_SIZE 128
-#define CVMX_FPA_ALIGNMENT 128
+#define CVMX_FPA_ALIGNMENT 128
/**
* Structure describing the data format used for stores to the FPA.
@@ -186,8 +186,8 @@ static inline void *cvmx_fpa_alloc(uint64_t pool)
/**
* Asynchronously get a new block from the FPA
*
- * @scr_addr: Local scratch address to put response in. This is a byte address,
- * but must be 8 byte aligned.
+ * @scr_addr: Local scratch address to put response in. This is a byte address,
+ * but must be 8 byte aligned.
* @pool: Pool to get the block from
*/
static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
@@ -212,7 +212,7 @@ static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
* @ptr: Block to free
* @pool: Pool to put it in
* @num_cache_lines:
- * Cache lines to invalidate
+ * Cache lines to invalidate
*/
static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
uint64_t num_cache_lines)
@@ -234,7 +234,7 @@ static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
* @ptr: Block to free
* @pool: Pool to put it in
* @num_cache_lines:
- * Cache lines to invalidate
+ * Cache lines to invalidate
*/
static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
uint64_t num_cache_lines)
@@ -245,7 +245,7 @@ static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
/*
* Make sure that any previous writes to memory go out before
- * we free this buffer. This also serves as a barrier to
+ * we free this buffer. This also serves as a barrier to
* prevent GCC from reordering operations to after the
* free.
*/
@@ -259,17 +259,17 @@ static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
* This can only be called once per pool. Make sure proper
* locking enforces this.
*
- * @pool: Pool to initialize
- * 0 <= pool < 8
- * @name: Constant character string to name this pool.
- * String is not copied.
- * @buffer: Pointer to the block of memory to use. This must be
- * accessible by all processors and external hardware.
+ * @pool: Pool to initialize
+ * 0 <= pool < 8
+ * @name: Constant character string to name this pool.
+ * String is not copied.
+ * @buffer: Pointer to the block of memory to use. This must be
+ * accessible by all processors and external hardware.
* @block_size: Size for each block controlled by the FPA
* @num_blocks: Number of blocks
*
* Returns 0 on Success,
- * -1 on failure
+ * -1 on failure
*/
extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
uint64_t block_size, uint64_t num_blocks);
@@ -282,8 +282,8 @@ extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
*
* @pool: Pool to shutdown
* Returns Zero on success
- * - Positive is count of missing buffers
- * - Negative is too many buffers or corrupted pointers
+ * - Positive is count of missing buffers
+ * - Negative is too many buffers or corrupted pointers
*/
extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index 442f508eaac9..41785dd0ddd0 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -48,7 +48,7 @@ typedef enum {
* Fake IPD port, the RGMII/MII interface may use different PHY, use
* this macro to return appropriate MIX address to read the PHY.
*/
-#define CVMX_HELPER_BOARD_MGMT_IPD_PORT -10
+#define CVMX_HELPER_BOARD_MGMT_IPD_PORT -10
/**
* cvmx_override_board_link_get(int ipd_port) is a function
@@ -86,10 +86,10 @@ extern int cvmx_helper_board_get_mii_address(int ipd_port);
*
* @phy_addr: The address of the PHY to program
* @link_flags:
- * Flags to control autonegotiation. Bit 0 is autonegotiation
- * enable/disable to maintain backware compatibility.
+ * Flags to control autonegotiation. Bit 0 is autonegotiation
+ * enable/disable to maintain backware compatibility.
* @link_info: Link speed to program. If the speed is zero and autonegotiation
- * is enabled, all possible negotiation speeds are advertised.
+ * is enabled, all possible negotiation speeds are advertised.
*
* Returns Zero on success, negative on failure
*/
@@ -111,10 +111,10 @@ int cvmx_helper_board_link_set_phy(int phy_addr,
* enumeration from the bootloader.
*
* @ipd_port: IPD input port associated with the port we want to get link
- * status for.
+ * status for.
*
* Returns The ports link status. If the link isn't fully resolved, this must
- * return zero.
+ * return zero.
*/
extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
@@ -134,10 +134,10 @@ extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
*
* @interface: Interface to probe
* @supported_ports:
- * Number of ports Octeon supports.
+ * Number of ports Octeon supports.
*
* Returns Number of ports the actual board supports. Many times this will
- * simple be "support_ports".
+ * simple be "support_ports".
*/
extern int __cvmx_helper_board_interface_probe(int interface,
int supported_ports);
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
index 78295ba0050f..4d7a3db3a9f6 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
@@ -98,9 +98,9 @@ extern int __cvmx_helper_rgmii_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
index 9a9b6c103ede..4debb1c5153d 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
@@ -92,9 +92,9 @@ extern int __cvmx_helper_sgmii_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-util.h b/arch/mips/include/asm/octeon/cvmx-helper-util.h
index 01c8ddd84ff8..f446f212bbd4 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-util.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-util.h
@@ -57,11 +57,11 @@ extern int cvmx_helper_dump_packet(cvmx_wqe_t *work);
*
* @queue: Input queue to setup RED on (0-7)
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
@@ -71,11 +71,11 @@ extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
* Setup Random Early Drop to automatically begin dropping packets.
*
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
@@ -84,7 +84,7 @@ extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
* Get the version of the CVMX libraries.
*
* Returns Version string. Note this buffer is allocated statically
- * and will be shared by all callers.
+ * and will be shared by all callers.
*/
extern const char *cvmx_helper_get_version(void);
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-xaui.h b/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
index f6fbc4f45b56..5e89ed703eaa 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
@@ -92,9 +92,9 @@ extern int __cvmx_helper_xaui_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-helper.h b/arch/mips/include/asm/octeon/cvmx-helper.h
index 691c8142cd4f..5a3090dc6f2f 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper.h
@@ -93,12 +93,12 @@ extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
/**
* This function enables the IPD and also enables the packet interfaces.
* The packet interfaces (RGMII and SPI) must be enabled after the
- * IPD. This should be called by the user program after any additional
+ * IPD. This should be called by the user program after any additional
* IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
* is not set in the executive-config.h file.
*
* Returns 0 on success
- * -1 on failure
+ * -1 on failure
*/
extern int cvmx_helper_ipd_and_packet_input_enable(void);
@@ -128,7 +128,7 @@ extern int cvmx_helper_initialize_packet_io_local(void);
* @interface: Which interface to return port count for.
*
* Returns Port count for interface
- * -1 for uninitialized interface
+ * -1 for uninitialized interface
*/
extern int cvmx_helper_ports_on_interface(int interface);
@@ -150,7 +150,7 @@ extern int cvmx_helper_get_number_of_interfaces(void);
* @interface: Interface to probe
*
* Returns Mode of the interface. Unknown or unsupported interfaces return
- * DISABLED.
+ * DISABLED.
*/
extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
interface);
@@ -214,9 +214,9 @@ extern int cvmx_helper_interface_enumerate(int interface);
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd.h b/arch/mips/include/asm/octeon/cvmx-ipd.h
index 115a552c5c7f..e13490ebbb27 100644
--- a/arch/mips/include/asm/octeon/cvmx-ipd.h
+++ b/arch/mips/include/asm/octeon/cvmx-ipd.h
@@ -38,8 +38,8 @@
#include <asm/octeon/cvmx-ipd-defs.h>
enum cvmx_ipd_mode {
- CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
- CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
+ CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
+ CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
};
@@ -60,17 +60,17 @@ typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
*
* @mbuff_size: Packets buffer size in 8 byte words
* @first_mbuff_skip:
- * Number of 8 byte words to skip in the first buffer
+ * Number of 8 byte words to skip in the first buffer
* @not_first_mbuff_skip:
- * Number of 8 byte words to skip in each following buffer
+ * Number of 8 byte words to skip in each following buffer
* @first_back: Must be same as first_mbuff_skip / 128
* @second_back:
- * Must be same as not_first_mbuff_skip / 128
+ * Must be same as not_first_mbuff_skip / 128
* @wqe_fpa_pool:
- * FPA pool to get work entries from
+ * FPA pool to get work entries from
* @cache_mode:
* @back_pres_enable_flag:
- * Enable or disable port back pressure
+ * Enable or disable port back pressure
*/
static inline void cvmx_ipd_config(uint64_t mbuff_size,
uint64_t first_mbuff_skip,
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h
index 2c8ff9e33ec3..11c0a8fa8eb5 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c.h
@@ -33,13 +33,13 @@
#ifndef __CVMX_L2C_H__
#define __CVMX_L2C_H__
-#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */
+#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */
#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro, use function */
-#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */
+#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */
#define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */
-#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
+#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
/* Defines for index aliasing computations */
#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
@@ -67,91 +67,91 @@ union cvmx_l2c_tag {
/* L2C Performance Counter events. */
enum cvmx_l2c_event {
- CVMX_L2C_EVENT_CYCLES = 0,
+ CVMX_L2C_EVENT_CYCLES = 0,
CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
- CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
- CVMX_L2C_EVENT_DATA_MISS = 3,
- CVMX_L2C_EVENT_DATA_HIT = 4,
- CVMX_L2C_EVENT_MISS = 5,
- CVMX_L2C_EVENT_HIT = 6,
- CVMX_L2C_EVENT_VICTIM_HIT = 7,
- CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
- CVMX_L2C_EVENT_TAG_PROBE = 9,
- CVMX_L2C_EVENT_TAG_UPDATE = 10,
- CVMX_L2C_EVENT_TAG_COMPLETE = 11,
- CVMX_L2C_EVENT_TAG_DIRTY = 12,
- CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
- CVMX_L2C_EVENT_DATA_STORE_READ = 14,
+ CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
+ CVMX_L2C_EVENT_DATA_MISS = 3,
+ CVMX_L2C_EVENT_DATA_HIT = 4,
+ CVMX_L2C_EVENT_MISS = 5,
+ CVMX_L2C_EVENT_HIT = 6,
+ CVMX_L2C_EVENT_VICTIM_HIT = 7,
+ CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
+ CVMX_L2C_EVENT_TAG_PROBE = 9,
+ CVMX_L2C_EVENT_TAG_UPDATE = 10,
+ CVMX_L2C_EVENT_TAG_COMPLETE = 11,
+ CVMX_L2C_EVENT_TAG_DIRTY = 12,
+ CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
+ CVMX_L2C_EVENT_DATA_STORE_READ = 14,
CVMX_L2C_EVENT_DATA_STORE_WRITE = 15,
- CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
- CVMX_L2C_EVENT_WRITE_REQUEST = 17,
- CVMX_L2C_EVENT_READ_REQUEST = 18,
+ CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
+ CVMX_L2C_EVENT_WRITE_REQUEST = 17,
+ CVMX_L2C_EVENT_READ_REQUEST = 18,
CVMX_L2C_EVENT_WRITE_DATA_VALID = 19,
- CVMX_L2C_EVENT_XMC_NOP = 20,
- CVMX_L2C_EVENT_XMC_LDT = 21,
- CVMX_L2C_EVENT_XMC_LDI = 22,
- CVMX_L2C_EVENT_XMC_LDD = 23,
- CVMX_L2C_EVENT_XMC_STF = 24,
- CVMX_L2C_EVENT_XMC_STT = 25,
- CVMX_L2C_EVENT_XMC_STP = 26,
- CVMX_L2C_EVENT_XMC_STC = 27,
- CVMX_L2C_EVENT_XMC_DWB = 28,
- CVMX_L2C_EVENT_XMC_PL2 = 29,
- CVMX_L2C_EVENT_XMC_PSL1 = 30,
- CVMX_L2C_EVENT_XMC_IOBLD = 31,
- CVMX_L2C_EVENT_XMC_IOBST = 32,
- CVMX_L2C_EVENT_XMC_IOBDMA = 33,
- CVMX_L2C_EVENT_XMC_IOBRSP = 34,
- CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
- CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
- CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
- CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
- CVMX_L2C_EVENT_RSC_NOP = 39,
- CVMX_L2C_EVENT_RSC_STDN = 40,
- CVMX_L2C_EVENT_RSC_FILL = 41,
- CVMX_L2C_EVENT_RSC_REFL = 42,
- CVMX_L2C_EVENT_RSC_STIN = 43,
- CVMX_L2C_EVENT_RSC_SCIN = 44,
- CVMX_L2C_EVENT_RSC_SCFL = 45,
- CVMX_L2C_EVENT_RSC_SCDN = 46,
- CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
- CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
- CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
- CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
- CVMX_L2C_EVENT_LRF_REQ = 51,
- CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
- CVMX_L2C_EVENT_DT_WR_INVAL = 53,
+ CVMX_L2C_EVENT_XMC_NOP = 20,
+ CVMX_L2C_EVENT_XMC_LDT = 21,
+ CVMX_L2C_EVENT_XMC_LDI = 22,
+ CVMX_L2C_EVENT_XMC_LDD = 23,
+ CVMX_L2C_EVENT_XMC_STF = 24,
+ CVMX_L2C_EVENT_XMC_STT = 25,
+ CVMX_L2C_EVENT_XMC_STP = 26,
+ CVMX_L2C_EVENT_XMC_STC = 27,
+ CVMX_L2C_EVENT_XMC_DWB = 28,
+ CVMX_L2C_EVENT_XMC_PL2 = 29,
+ CVMX_L2C_EVENT_XMC_PSL1 = 30,
+ CVMX_L2C_EVENT_XMC_IOBLD = 31,
+ CVMX_L2C_EVENT_XMC_IOBST = 32,
+ CVMX_L2C_EVENT_XMC_IOBDMA = 33,
+ CVMX_L2C_EVENT_XMC_IOBRSP = 34,
+ CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
+ CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
+ CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
+ CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
+ CVMX_L2C_EVENT_RSC_NOP = 39,
+ CVMX_L2C_EVENT_RSC_STDN = 40,
+ CVMX_L2C_EVENT_RSC_FILL = 41,
+ CVMX_L2C_EVENT_RSC_REFL = 42,
+ CVMX_L2C_EVENT_RSC_STIN = 43,
+ CVMX_L2C_EVENT_RSC_SCIN = 44,
+ CVMX_L2C_EVENT_RSC_SCFL = 45,
+ CVMX_L2C_EVENT_RSC_SCDN = 46,
+ CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
+ CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
+ CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
+ CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
+ CVMX_L2C_EVENT_LRF_REQ = 51,
+ CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
+ CVMX_L2C_EVENT_DT_WR_INVAL = 53,
CVMX_L2C_EVENT_MAX
};
/* L2C Performance Counter events for Octeon2. */
enum cvmx_l2c_tad_event {
- CVMX_L2C_TAD_EVENT_NONE = 0,
- CVMX_L2C_TAD_EVENT_TAG_HIT = 1,
- CVMX_L2C_TAD_EVENT_TAG_MISS = 2,
- CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3,
- CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4,
- CVMX_L2C_TAD_EVENT_SC_FAIL = 5,
- CVMX_L2C_TAD_EVENT_SC_PASS = 6,
- CVMX_L2C_TAD_EVENT_LFB_VALID = 7,
- CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8,
- CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9,
- CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128,
- CVMX_L2C_TAD_EVENT_QUAD0_READ = 129,
- CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130,
- CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131,
- CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144,
- CVMX_L2C_TAD_EVENT_QUAD1_READ = 145,
- CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146,
- CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147,
- CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160,
- CVMX_L2C_TAD_EVENT_QUAD2_READ = 161,
- CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162,
- CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163,
- CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176,
- CVMX_L2C_TAD_EVENT_QUAD3_READ = 177,
- CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178,
- CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179,
+ CVMX_L2C_TAD_EVENT_NONE = 0,
+ CVMX_L2C_TAD_EVENT_TAG_HIT = 1,
+ CVMX_L2C_TAD_EVENT_TAG_MISS = 2,
+ CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3,
+ CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4,
+ CVMX_L2C_TAD_EVENT_SC_FAIL = 5,
+ CVMX_L2C_TAD_EVENT_SC_PASS = 6,
+ CVMX_L2C_TAD_EVENT_LFB_VALID = 7,
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8,
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9,
+ CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128,
+ CVMX_L2C_TAD_EVENT_QUAD0_READ = 129,
+ CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130,
+ CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131,
+ CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144,
+ CVMX_L2C_TAD_EVENT_QUAD1_READ = 145,
+ CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146,
+ CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147,
+ CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160,
+ CVMX_L2C_TAD_EVENT_QUAD2_READ = 161,
+ CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162,
+ CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163,
+ CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176,
+ CVMX_L2C_TAD_EVENT_QUAD3_READ = 177,
+ CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178,
+ CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179,
CVMX_L2C_TAD_EVENT_MAX
};
@@ -159,10 +159,10 @@ enum cvmx_l2c_tad_event {
* Configure one of the four L2 Cache performance counters to capture event
* occurrences.
*
- * @counter: The counter to configure. Range 0..3.
- * @event: The type of L2 Cache event occurrence to count.
+ * @counter: The counter to configure. Range 0..3.
+ * @event: The type of L2 Cache event occurrence to count.
* @clear_on_read: When asserted, any read of the performance counter
- * clears the counter.
+ * clears the counter.
*
* @note The routine does not clear the counter.
*/
@@ -184,8 +184,8 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter);
* @core: The core processor of interest.
*
* Returns The mask specifying the partitioning. 0 bits in mask indicates
- * the cache 'ways' that a core can evict from.
- * -1 on error
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
*/
int cvmx_l2c_get_core_way_partition(uint32_t core);
@@ -194,16 +194,16 @@ int cvmx_l2c_get_core_way_partition(uint32_t core);
*
* @core: The core that the partitioning applies to.
* @mask: The partitioning of the ways expressed as a binary
- * mask. A 0 bit allows the core to evict cache lines from
- * a way, while a 1 bit blocks the core from evicting any
- * lines from that way. There must be at least one allowed
- * way (0 bit) in the mask.
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
*
* @note If any ways are blocked for all cores and the HW blocks, then
- * those ways will never have any cache lines evicted from them.
- * All cores and the hardware blocks are free to read from all
- * ways regardless of the partitioning.
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
*/
int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
@@ -211,8 +211,8 @@ int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
* Return the L2 Cache way partitioning for the hw blocks.
*
* Returns The mask specifying the reserved way. 0 bits in mask indicates
- * the cache 'ways' that a core can evict from.
- * -1 on error
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
*/
int cvmx_l2c_get_hw_way_partition(void);
@@ -220,16 +220,16 @@ int cvmx_l2c_get_hw_way_partition(void);
* Partitions the L2 cache for the hardware blocks.
*
* @mask: The partitioning of the ways expressed as a binary
- * mask. A 0 bit allows the core to evict cache lines from
- * a way, while a 1 bit blocks the core from evicting any
- * lines from that way. There must be at least one allowed
- * way (0 bit) in the mask.
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
*
* @note If any ways are blocked for all cores and the HW blocks, then
- * those ways will never have any cache lines evicted from them.
- * All cores and the hardware blocks are free to read from all
- * ways regardless of the partitioning.
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
*/
int cvmx_l2c_set_hw_way_partition(uint32_t mask);
@@ -240,7 +240,7 @@ int cvmx_l2c_set_hw_way_partition(uint32_t mask);
* @addr: physical address of line to lock
*
* Returns 0 on success,
- * 1 if line not locked.
+ * 1 if line not locked.
*/
int cvmx_l2c_lock_line(uint64_t addr);
@@ -258,7 +258,7 @@ int cvmx_l2c_lock_line(uint64_t addr);
* @len: Length (in bytes) of region to lock
*
* Returns Number of requested lines that where not locked.
- * 0 on success (all locked)
+ * 0 on success (all locked)
*/
int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
@@ -272,7 +272,7 @@ int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
* @address: Physical address to unlock
*
* Returns 0: line not unlocked
- * 1: line unlocked
+ * 1: line unlocked
*/
int cvmx_l2c_unlock_line(uint64_t address);
@@ -290,7 +290,7 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
* Read the L2 controller tag for a given location in L2
*
* @association:
- * Which association to read line from
+ * Which association to read line from
* @index: Which way to read from.
*
* Returns l2c tag structure for line requested.
diff --git a/arch/mips/include/asm/octeon/cvmx-mdio.h b/arch/mips/include/asm/octeon/cvmx-mdio.h
index 6f0cd182cec8..9f6a4f32a83c 100644
--- a/arch/mips/include/asm/octeon/cvmx-mdio.h
+++ b/arch/mips/include/asm/octeon/cvmx-mdio.h
@@ -246,21 +246,21 @@ typedef union {
} cvmx_mdio_phy_reg_mmd_address_data_t;
/* Operating request encodings. */
-#define MDIO_CLAUSE_22_WRITE 0
-#define MDIO_CLAUSE_22_READ 1
+#define MDIO_CLAUSE_22_WRITE 0
+#define MDIO_CLAUSE_22_READ 1
-#define MDIO_CLAUSE_45_ADDRESS 0
-#define MDIO_CLAUSE_45_WRITE 1
+#define MDIO_CLAUSE_45_ADDRESS 0
+#define MDIO_CLAUSE_45_WRITE 1
#define MDIO_CLAUSE_45_READ_INC 2
-#define MDIO_CLAUSE_45_READ 3
+#define MDIO_CLAUSE_45_READ 3
/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
-#define CVMX_MMD_DEVICE_PMA_PMD 1
-#define CVMX_MMD_DEVICE_WIS 2
-#define CVMX_MMD_DEVICE_PCS 3
-#define CVMX_MMD_DEVICE_PHY_XS 4
-#define CVMX_MMD_DEVICE_DTS_XS 5
-#define CVMX_MMD_DEVICE_TC 6
+#define CVMX_MMD_DEVICE_PMA_PMD 1
+#define CVMX_MMD_DEVICE_WIS 2
+#define CVMX_MMD_DEVICE_PCS 3
+#define CVMX_MMD_DEVICE_PHY_XS 4
+#define CVMX_MMD_DEVICE_DTS_XS 5
+#define CVMX_MMD_DEVICE_TC 6
#define CVMX_MMD_DEVICE_CL22_EXT 29
#define CVMX_MMD_DEVICE_VENDOR_1 30
#define CVMX_MMD_DEVICE_VENDOR_2 31
@@ -291,7 +291,7 @@ static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
* registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
+ * support multiple busses.
* @phy_id: The MII phy id
* @location: Register location to read
*
@@ -328,13 +328,13 @@ static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
* registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
+ * support multiple busses.
* @phy_id: The MII phy id
* @location: Register location to write
* @val: Value to write
*
* Returns -1 on error
- * 0 on success
+ * 0 on success
*/
static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
{
@@ -370,7 +370,7 @@ static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
* read PHY registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
+ * support multiple busses.
* @phy_id: The MII phy id
* @device: MDIO Managable Device (MMD) id
* @location: Register location to read
@@ -407,7 +407,7 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
} while (smi_wr.s.pending && --timeout);
if (timeout <= 0) {
cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d TIME OUT(address)\n",
+ "device %2d register %2d TIME OUT(address)\n",
bus_id, phy_id, device, location);
return -1;
}
@@ -425,7 +425,7 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
if (timeout <= 0) {
cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d TIME OUT(data)\n",
+ "device %2d register %2d TIME OUT(data)\n",
bus_id, phy_id, device, location);
return -1;
}
@@ -434,7 +434,7 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
return smi_rd.s.dat;
else {
cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d INVALID READ\n",
+ "device %2d register %2d INVALID READ\n",
bus_id, phy_id, device, location);
return -1;
}
@@ -445,14 +445,14 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
* write PHY registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
+ * support multiple busses.
* @phy_id: The MII phy id
* @device: MDIO Managable Device (MMD) id
* @location: Register location to write
* @val: Value to write
*
* Returns -1 on error
- * 0 on success
+ * 0 on success
*/
static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
int location, int val)
diff --git a/arch/mips/include/asm/octeon/cvmx-pip-defs.h b/arch/mips/include/asm/octeon/cvmx-pip-defs.h
index 05a917d6ebe5..e975c7d2e485 100644
--- a/arch/mips/include/asm/octeon/cvmx-pip-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pip-defs.h
@@ -44,7 +44,7 @@ enum cvmx_pip_port_parse_mode {
*/
CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,
/*
- * Input packets are assumed to be IP. Results from non IP
+ * Input packets are assumed to be IP. Results from non IP
* packets is undefined. Pointers reference the beginning of
* the IP header.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-pip.h b/arch/mips/include/asm/octeon/cvmx-pip.h
index 9e739a640855..a76fe5a57a9f 100644
--- a/arch/mips/include/asm/octeon/cvmx-pip.h
+++ b/arch/mips/include/asm/octeon/cvmx-pip.h
@@ -37,8 +37,8 @@
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-pip-defs.h>
-#define CVMX_PIP_NUM_INPUT_PORTS 40
-#define CVMX_PIP_NUM_WATCHERS 4
+#define CVMX_PIP_NUM_INPUT_PORTS 40
+#define CVMX_PIP_NUM_WATCHERS 4
/*
* Encodes the different error and exception codes
@@ -92,10 +92,10 @@ typedef enum {
/**
* NOTES
- * late collision (data received before collision)
- * late collisions cannot be detected by the receiver
- * they would appear as JAM bits which would appear as bad FCS
- * or carrier extend error which is CVMX_PIP_EXTEND_ERR
+ * late collision (data received before collision)
+ * late collisions cannot be detected by the receiver
+ * they would appear as JAM bits which would appear as bad FCS
+ * or carrier extend error which is CVMX_PIP_EXTEND_ERR
*/
typedef enum {
/* No error */
@@ -122,11 +122,11 @@ typedef enum {
* error)
*/
CVMX_PIP_UNDER_FCS_ERR = 6ull,
- /* RGM 7 = FCS error */
+ /* RGM 7 = FCS error */
CVMX_PIP_GMX_FCS_ERR = 7ull,
/* RGM+SPI 8 = min frame error (pkt len < min frame len) */
CVMX_PIP_UNDER_ERR = 8ull,
- /* RGM 9 = Frame carrier extend error */
+ /* RGM 9 = Frame carrier extend error */
CVMX_PIP_EXTEND_ERR = 9ull,
/*
* RGM 10 = length mismatch (len did not match len in L2
@@ -161,10 +161,10 @@ typedef enum {
CVMX_PIP_PIP_L2_MAL_HDR = 18L
/*
* NOTES: xx = late collision (data received before collision)
- * late collisions cannot be detected by the receiver
- * they would appear as JAM bits which would appear as
- * bad FCS or carrier extend error which is
- * CVMX_PIP_EXTEND_ERR
+ * late collisions cannot be detected by the receiver
+ * they would appear as JAM bits which would appear as
+ * bad FCS or carrier extend error which is
+ * CVMX_PIP_EXTEND_ERR
*/
} cvmx_pip_rcv_err_t;
@@ -192,13 +192,13 @@ typedef struct {
/* Number of packets processed by PIP */
uint32_t packets;
/*
- * Number of indentified L2 multicast packets. Does not
+ * Number of indentified L2 multicast packets. Does not
* include broadcast packets. Only includes packets whose
* parse mode is SKIP_TO_L2
*/
uint32_t multicast_packets;
/*
- * Number of indentified L2 broadcast packets. Does not
+ * Number of indentified L2 broadcast packets. Does not
* include multicast packets. Only includes packets whose
* parse mode is SKIP_TO_L2
*/
@@ -287,7 +287,7 @@ typedef union {
* @port_num: Port number to configure
* @port_cfg: Port hardware configuration
* @port_tag_cfg:
- * Port POW tagging configuration
+ * Port POW tagging configuration
*/
static inline void cvmx_pip_config_port(uint64_t port_num,
union cvmx_pip_prt_cfgx port_cfg,
@@ -298,20 +298,20 @@ static inline void cvmx_pip_config_port(uint64_t port_num,
}
#if 0
/**
- * @deprecated This function is a thin wrapper around the Pass1 version
- * of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
- * setting the group that is incompatible with this function,
- * the preferred upgrade path is to use the CSR directly.
+ * @deprecated This function is a thin wrapper around the Pass1 version
+ * of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
+ * setting the group that is incompatible with this function,
+ * the preferred upgrade path is to use the CSR directly.
*
* Configure the global QoS packet watchers. Each watcher is
* capable of matching a field in a packet to determine the
* QoS queue for scheduling.
*
- * @watcher: Watcher number to configure (0 - 3).
+ * @watcher: Watcher number to configure (0 - 3).
* @match_type: Watcher match type
* @match_value:
- * Value the watcher will match against
- * @qos: QoS queue for packets matching this watcher
+ * Value the watcher will match against
+ * @qos: QoS queue for packets matching this watcher
*/
static inline void cvmx_pip_config_watcher(uint64_t watcher,
cvmx_pip_qos_watch_types match_type,
@@ -331,7 +331,7 @@ static inline void cvmx_pip_config_watcher(uint64_t watcher,
* Configure the VLAN priority to QoS queue mapping.
*
* @vlan_priority:
- * VLAN priority (0-7)
+ * VLAN priority (0-7)
* @qos: QoS queue for packets matching this watcher
*/
static inline void cvmx_pip_config_vlan_qos(uint64_t vlan_priority,
@@ -451,10 +451,10 @@ static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
*
* @interface: Interface to configure (0 or 1)
* @invert_result:
- * Invert the result of the CRC
+ * Invert the result of the CRC
* @reflect: Reflect
* @initialization_vector:
- * CRC initialization vector
+ * CRC initialization vector
*/
static inline void cvmx_pip_config_crc(uint64_t interface,
uint64_t invert_result, uint64_t reflect,
@@ -500,13 +500,13 @@ static inline void cvmx_pip_tag_mask_clear(uint64_t mask_index)
*
* @mask_index: Which tag mask to modify (0..3)
* @offset: Offset into the bitmask to set bits at. Use the GCC macro
- * offsetof() to determine the offsets into packet headers.
- * For example, offsetof(ethhdr, protocol) returns the offset
- * of the ethernet protocol field. The bitmask selects which
- * bytes to include the the tag, with bit offset X selecting
- * byte at offset X from the beginning of the packet data.
+ * offsetof() to determine the offsets into packet headers.
+ * For example, offsetof(ethhdr, protocol) returns the offset
+ * of the ethernet protocol field. The bitmask selects which
+ * bytes to include the the tag, with bit offset X selecting
+ * byte at offset X from the beginning of the packet data.
* @len: Number of bytes to include. Usually this is the sizeof()
- * the field.
+ * the field.
*/
static inline void cvmx_pip_tag_mask_set(uint64_t mask_index, uint64_t offset,
uint64_t len)
diff --git a/arch/mips/include/asm/octeon/cvmx-pko.h b/arch/mips/include/asm/octeon/cvmx-pko.h
index c6daeedf1f81..f7d2a6718849 100644
--- a/arch/mips/include/asm/octeon/cvmx-pko.h
+++ b/arch/mips/include/asm/octeon/cvmx-pko.h
@@ -69,16 +69,16 @@
#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)
#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256
-#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
+#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
OCTEON_IS_MODEL(OCTEON_CN3010) || OCTEON_IS_MODEL(OCTEON_CN3005) || \
OCTEON_IS_MODEL(OCTEON_CN50XX)) ? 32 : \
(OCTEON_IS_MODEL(OCTEON_CN58XX) || \
OCTEON_IS_MODEL(OCTEON_CN56XX)) ? 256 : 128)
-#define CVMX_PKO_NUM_OUTPUT_PORTS 40
+#define CVMX_PKO_NUM_OUTPUT_PORTS 40
/* use this for queues that are not used */
#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63
-#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
-#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
+#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
+#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
#define CVMX_PKO_MAX_QUEUE_DEPTH 0
typedef enum {
@@ -269,13 +269,13 @@ extern void cvmx_pko_shutdown(void);
/**
* Configure a output port and the associated queues for use.
*
- * @port: Port to configure.
+ * @port: Port to configure.
* @base_queue: First queue number to associate with this port.
* @num_queues: Number of queues t oassociate with this port
- * @priority: Array of priority levels for each queue. Values are
- * allowed to be 1-8. A value of 8 get 8 times the traffic
- * of a value of 1. There must be num_queues elements in the
- * array.
+ * @priority: Array of priority levels for each queue. Values are
+ * allowed to be 1-8. A value of 8 get 8 times the traffic
+ * of a value of 1. There must be num_queues elements in the
+ * array.
*/
extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
uint64_t base_queue,
@@ -285,7 +285,7 @@ extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
/**
* Ring the packet output doorbell. This tells the packet
* output hardware that "len" command words have been added
- * to its pending list. This command includes the required
+ * to its pending list. This command includes the required
* CVMX_SYNCWS before the doorbell ring.
*
* @port: Port the packet is for
@@ -322,18 +322,18 @@ static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
* The use_locking parameter allows the caller to use three
* possible locking modes.
* - CVMX_PKO_LOCK_NONE
- * - PKO doesn't do any locking. It is the responsibility
- * of the application to make sure that no other core
- * is accessing the same queue at the same time.
+ * - PKO doesn't do any locking. It is the responsibility
+ * of the application to make sure that no other core
+ * is accessing the same queue at the same time.
* - CVMX_PKO_LOCK_ATOMIC_TAG
- * - PKO performs an atomic tagswitch to insure exclusive
- * access to the output queue. This will maintain
- * packet ordering on output.
+ * - PKO performs an atomic tagswitch to insure exclusive
+ * access to the output queue. This will maintain
+ * packet ordering on output.
* - CVMX_PKO_LOCK_CMD_QUEUE
- * - PKO uses the common command queue locks to insure
- * exclusive access to the output queue. This is a
- * memory based ll/sc. This is the most portable
- * locking mechanism.
+ * - PKO uses the common command queue locks to insure
+ * exclusive access to the output queue. This is a
+ * memory based ll/sc. This is the most portable
+ * locking mechanism.
*
* NOTE: If atomic locking is used, the POW entry CANNOT be
* descheduled, as it does not contain a valid WQE pointer.
@@ -341,7 +341,7 @@ static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
* @port: Port to send it on
* @queue: Queue to use
* @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
+ * CVMX_PKO_LOCK_CMD_QUEUE
*/
static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
@@ -351,11 +351,11 @@ static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
/*
* Must do a full switch here to handle all cases. We
* use a fake WQE pointer, as the POW does not access
- * this memory. The WQE pointer and group are only
+ * this memory. The WQE pointer and group are only
* used if this work is descheduled, which is not
* supported by the
* cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish
- * combination. Note that this is a special case in
+ * combination. Note that this is a special case in
* which these fake values can be used - this is not a
* general technique.
*/
@@ -377,10 +377,10 @@ static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
* @port: Port to send it on
* @queue: Queue to use
* @pko_command:
- * PKO HW command word
+ * PKO HW command word
* @packet: Packet to send
* @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
+ * CVMX_PKO_LOCK_CMD_QUEUE
*
* Returns returns CVMX_PKO_SUCCESS on success, or error code on
* failure of output
@@ -418,12 +418,12 @@ static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(
* @port: Port to send it on
* @queue: Queue to use
* @pko_command:
- * PKO HW command word
+ * PKO HW command word
* @packet: Packet to send
* @addr: Plysical address of a work queue entry or physical address
- * to zero on complete.
+ * to zero on complete.
* @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
+ * CVMX_PKO_LOCK_CMD_QUEUE
*
* Returns returns CVMX_PKO_SUCCESS on success, or error code on
* failure of output
@@ -588,7 +588,7 @@ static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
* @port: Port to rate limit
* @packets_s: Maximum packet/sec
* @burst: Maximum number of packets to burst in a row before rate
- * limiting cuts in.
+ * limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
@@ -601,7 +601,7 @@ extern int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);
* @port: Port to rate limit
* @bits_s: PKO rate limit in bits/sec
* @burst: Maximum number of bits to burst before rate
- * limiting cuts in.
+ * limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index 92742b241a51..4b4d0ecfd9eb 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -70,7 +70,7 @@ enum cvmx_pow_tag_type {
* The work queue entry from the order - NEVER tag switch from
* NULL to NULL
*/
- CVMX_POW_TAG_TYPE_NULL = 2L,
+ CVMX_POW_TAG_TYPE_NULL = 2L,
/* A tag switch to NULL, and there is no space reserved in POW
* - NEVER tag switch to NULL_NULL
* - NEVER tag switch from NULL_NULL
@@ -90,7 +90,7 @@ typedef enum {
} cvmx_pow_wait_t;
/**
- * POW tag operations. These are used in the data stored to the POW.
+ * POW tag operations. These are used in the data stored to the POW.
*/
typedef enum {
/*
@@ -341,14 +341,14 @@ typedef union {
* lists. The two memory-input queue lists associated
* with each QOS level are:
*
- * - qosgrp = 0, qosgrp = 8: QOS0
- * - qosgrp = 1, qosgrp = 9: QOS1
- * - qosgrp = 2, qosgrp = 10: QOS2
- * - qosgrp = 3, qosgrp = 11: QOS3
- * - qosgrp = 4, qosgrp = 12: QOS4
- * - qosgrp = 5, qosgrp = 13: QOS5
- * - qosgrp = 6, qosgrp = 14: QOS6
- * - qosgrp = 7, qosgrp = 15: QOS7
+ * - qosgrp = 0, qosgrp = 8: QOS0
+ * - qosgrp = 1, qosgrp = 9: QOS1
+ * - qosgrp = 2, qosgrp = 10: QOS2
+ * - qosgrp = 3, qosgrp = 11: QOS3
+ * - qosgrp = 4, qosgrp = 12: QOS4
+ * - qosgrp = 5, qosgrp = 13: QOS5
+ * - qosgrp = 6, qosgrp = 14: QOS6
+ * - qosgrp = 7, qosgrp = 15: QOS7
*/
uint64_t qosgrp:4;
/*
@@ -942,11 +942,11 @@ typedef union {
* operations.
*
* NOTE: The following is the behavior of the pending switch bit at the PP
- * for POW stores (i.e. when did<7:3> == 0xc)
- * - did<2:0> == 0 => pending switch bit is set
- * - did<2:0> == 1 => no affect on the pending switch bit
- * - did<2:0> == 3 => pending switch bit is cleared
- * - did<2:0> == 7 => no affect on the pending switch bit
+ * for POW stores (i.e. when did<7:3> == 0xc)
+ * - did<2:0> == 0 => pending switch bit is set
+ * - did<2:0> == 1 => no affect on the pending switch bit
+ * - did<2:0> == 3 => pending switch bit is cleared
+ * - did<2:0> == 7 => no affect on the pending switch bit
* - did<2:0> == others => must not be used
* - No other loads/stores have an affect on the pending switch bit
* - The switch bus from POW can clear the pending switch bit
@@ -1053,7 +1053,7 @@ static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
}
#ifndef CVMX_MF_CHORD
-#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
+#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
#endif
/**
@@ -1097,7 +1097,7 @@ static inline void cvmx_pow_tag_sw_wait(void)
* so the caller must ensure that there is not a pending tag switch.
*
* @wait: When set, call stalls until work becomes avaiable, or times out.
- * If not set, returns immediately.
+ * If not set, returns immediately.
*
* Returns Returns the WQE pointer from POW. Returns NULL if no work
* was available.
@@ -1131,7 +1131,7 @@ static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t
* requesting the new work.
*
* @wait: When set, call stalls until work becomes avaiable, or times out.
- * If not set, returns immediately.
+ * If not set, returns immediately.
*
* Returns Returns the WQE pointer from POW. Returns NULL if no work
* was available.
@@ -1148,7 +1148,7 @@ static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
}
/**
- * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
+ * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
* This function waits for any previous tag switch to complete before
* requesting the null_rd.
*
@@ -1183,11 +1183,11 @@ static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void)
* there is not a pending tag switch.
*
* @scr_addr: Scratch memory address that response will be returned
- * to, which is either a valid WQE, or a response with the
- * invalid bit set. Byte address, must be 8 byte aligned.
+ * to, which is either a valid WQE, or a response with the
+ * invalid bit set. Byte address, must be 8 byte aligned.
*
* @wait: 1 to cause response to wait for work to become available (or
- * timeout), 0 to cause response to return immediately
+ * timeout), 0 to cause response to return immediately
*/
static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
cvmx_pow_wait_t wait)
@@ -1212,11 +1212,11 @@ static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
* tag switch to complete before requesting the new work.
*
* @scr_addr: Scratch memory address that response will be returned
- * to, which is either a valid WQE, or a response with the
- * invalid bit set. Byte address, must be 8 byte aligned.
+ * to, which is either a valid WQE, or a response with the
+ * invalid bit set. Byte address, must be 8 byte aligned.
*
* @wait: 1 to cause response to wait for work to become available (or
- * timeout), 0 to cause response to return immediately
+ * timeout), 0 to cause response to return immediately
*/
static inline void cvmx_pow_work_request_async(int scr_addr,
cvmx_pow_wait_t wait)
@@ -1234,7 +1234,7 @@ static inline void cvmx_pow_work_request_async(int scr_addr,
* to wait for the response.
*
* @scr_addr: Scratch memory address to get result from Byte address,
- * must be 8 byte aligned.
+ * must be 8 byte aligned.
*
* Returns Returns the WQE from the scratch register, or NULL if no
* work was available.
@@ -1260,7 +1260,7 @@ static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
* @wqe_ptr: pointer to a work queue entry returned by the POW
*
* Returns 0 if pointer is valid
- * 1 if invalid (no work was returned)
+ * 1 if invalid (no work was returned)
*/
static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
{
@@ -1314,7 +1314,7 @@ static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
/*
* Note that WQE in DRAM is not updated here, as the POW does
* not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
+ * manual for complete details. It is the application's
* responsibility to keep track of the current tag value if
* that is important.
*/
@@ -1361,7 +1361,7 @@ static inline void cvmx_pow_tag_sw(uint32_t tag,
/*
* Note that WQE in DRAM is not updated here, as the POW does
* not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
+ * manual for complete details. It is the application's
* responsibility to keep track of the current tag value if
* that is important.
*/
@@ -1390,7 +1390,7 @@ static inline void cvmx_pow_tag_sw(uint32_t tag,
* previous tag switch has completed.
*
* @wqp: pointer to work queue entry to submit. This entry is
- * updated to match the other parameters
+ * updated to match the other parameters
* @tag: tag value to be assigned to work queue entry
* @tag_type: type of tag
* @group: group value for the work queue entry.
@@ -1429,7 +1429,7 @@ static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
/*
* Note that WQE in DRAM is not updated here, as the POW does
* not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
+ * manual for complete details. It is the application's
* responsibility to keep track of the current tag value if
* that is important.
*/
@@ -1468,10 +1468,10 @@ static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
* before requesting the tag switch.
*
* @wqp: pointer to work queue entry to submit. This entry is updated
- * to match the other parameters
+ * to match the other parameters
* @tag: tag value to be assigned to work queue entry
* @tag_type: type of tag
- * @group: group value for the work queue entry.
+ * @group: group value for the work queue entry.
*/
static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag,
enum cvmx_pow_tag_type tag_type,
@@ -1560,7 +1560,7 @@ static inline void cvmx_pow_tag_sw_null(void)
* unrelated to the tag that the core currently holds.
*
* @wqp: pointer to work queue entry to submit. This entry is
- * updated to match the other parameters
+ * updated to match the other parameters
* @tag: tag value to be assigned to work queue entry
* @tag_type: type of tag
* @qos: Input queue to add to.
@@ -1592,7 +1592,7 @@ static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
ptr.sio.offset = cvmx_ptr_to_phys(wqp);
/*
- * SYNC write to memory before the work submit. This is
+ * SYNC write to memory before the work submit. This is
* necessary as POW may read values from DRAM at this time.
*/
CVMX_SYNCWS;
@@ -1604,11 +1604,11 @@ static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
* indicates which groups each core will accept work from. There are
* 16 groups.
*
- * @core_num: core to apply mask to
+ * @core_num: core to apply mask to
* @mask: Group mask. There are 16 groups, so only bits 0-15 are valid,
- * representing groups 0-15.
- * Each 1 bit in the mask enables the core to accept work from
- * the corresponding group.
+ * representing groups 0-15.
+ * Each 1 bit in the mask enables the core to accept work from
+ * the corresponding group.
*/
static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
{
@@ -1623,14 +1623,14 @@ static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
* This function sets POW static priorities for a core. Each input queue has
* an associated priority value.
*
- * @core_num: core to apply priorities to
- * @priority: Vector of 8 priorities, one per POW Input Queue (0-7).
- * Highest priority is 0 and lowest is 7. A priority value
- * of 0xF instructs POW to skip the Input Queue when
- * scheduling to this specific core.
- * NOTE: priorities should not have gaps in values, meaning
- * {0,1,1,1,1,1,1,1} is a valid configuration while
- * {0,2,2,2,2,2,2,2} is not.
+ * @core_num: core to apply priorities to
+ * @priority: Vector of 8 priorities, one per POW Input Queue (0-7).
+ * Highest priority is 0 and lowest is 7. A priority value
+ * of 0xF instructs POW to skip the Input Queue when
+ * scheduling to this specific core.
+ * NOTE: priorities should not have gaps in values, meaning
+ * {0,1,1,1,1,1,1,1} is a valid configuration while
+ * {0,2,2,2,2,2,2,2} is not.
*/
static inline void cvmx_pow_set_priority(uint64_t core_num,
const uint8_t priority[])
@@ -1708,8 +1708,8 @@ static inline void cvmx_pow_set_priority(uint64_t core_num,
* @tag_type: New tag type
* @group: New group value
* @no_sched: Control whether this work queue entry will be rescheduled.
- * - 1 : don't schedule this work
- * - 0 : allow this work to be scheduled.
+ * - 1 : don't schedule this work
+ * - 0 : allow this work to be scheduled.
*/
static inline void cvmx_pow_tag_sw_desched_nocheck(
uint32_t tag,
@@ -1794,8 +1794,8 @@ static inline void cvmx_pow_tag_sw_desched_nocheck(
* @tag_type: New tag type
* @group: New group value
* @no_sched: Control whether this work queue entry will be rescheduled.
- * - 1 : don't schedule this work
- * - 0 : allow this work to be scheduled.
+ * - 1 : don't schedule this work
+ * - 0 : allow this work to be scheduled.
*/
static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
enum cvmx_pow_tag_type tag_type,
@@ -1819,8 +1819,8 @@ static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
* Descchedules the current work queue entry.
*
* @no_sched: no schedule flag value to be set on the work queue
- * entry. If this is set the entry will not be
- * rescheduled.
+ * entry. If this is set the entry will not be
+ * rescheduled.
*/
static inline void cvmx_pow_desched(uint64_t no_sched)
{
@@ -1863,7 +1863,7 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
*****************************************************/
/*
- * Number of bits of the tag used by software. The SW bits are always
+ * Number of bits of the tag used by software. The SW bits are always
* a contiguous block of the high starting at bit 31. The hardware
* bits are always the low bits. By default, the top 8 bits of the
* tag are reserved for software, and the low 24 are set by the IPD
@@ -1890,7 +1890,7 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
* are defined here.
*/
/* Mask for the value portion of the tag */
-#define CVMX_TAG_SUBGROUP_MASK 0xFFFF
+#define CVMX_TAG_SUBGROUP_MASK 0xFFFF
#define CVMX_TAG_SUBGROUP_SHIFT 16
#define CVMX_TAG_SUBGROUP_PKO 0x1
@@ -1905,12 +1905,12 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
* This function creates a 32 bit tag value from the two values provided.
*
* @sw_bits: The upper bits (number depends on configuration) are set
- * to this value. The remainder of bits are set by the
- * hw_bits parameter.
+ * to this value. The remainder of bits are set by the
+ * hw_bits parameter.
*
* @hw_bits: The lower bits (number depends on configuration) are set
- * to this value. The remainder of bits are set by the
- * sw_bits parameter.
+ * to this value. The remainder of bits are set by the
+ * sw_bits parameter.
*
* Returns 32 bit value of the combined hw and sw bits.
*/
@@ -1957,7 +1957,7 @@ static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
*
* @buffer: Buffer to store capture into
* @buffer_size:
- * The size of the supplied buffer
+ * The size of the supplied buffer
*
* Returns Zero on success, negative on failure
*/
@@ -1968,7 +1968,7 @@ extern int cvmx_pow_capture(void *buffer, int buffer_size);
*
* @buffer: POW capture from cvmx_pow_capture()
* @buffer_size:
- * Size of the buffer
+ * Size of the buffer
*/
extern void cvmx_pow_display(void *buffer, int buffer_size);
diff --git a/arch/mips/include/asm/octeon/cvmx-scratch.h b/arch/mips/include/asm/octeon/cvmx-scratch.h
index 96b70cfd6245..8d21cc5e4e40 100644
--- a/arch/mips/include/asm/octeon/cvmx-scratch.h
+++ b/arch/mips/include/asm/octeon/cvmx-scratch.h
@@ -39,7 +39,7 @@
* Note: This define must be a long, not a long long in order to
* compile without warnings for both 32bit and 64bit.
*/
-#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
+#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
/**
* Reads an 8 bit value from the processor local scratchpad memory.
diff --git a/arch/mips/include/asm/octeon/cvmx-spi.h b/arch/mips/include/asm/octeon/cvmx-spi.h
index 3bf53b537bcf..d5038cc4b475 100644
--- a/arch/mips/include/asm/octeon/cvmx-spi.h
+++ b/arch/mips/include/asm/octeon/cvmx-spi.h
@@ -84,11 +84,11 @@ static inline int cvmx_spi_is_spi_interface(int interface)
* Initialize and start the SPI interface.
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
* @num_ports: Number of SPI ports to configure
*
@@ -102,11 +102,11 @@ extern int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode,
* with its corespondant system.
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
* Returns Zero on success, negative of failure.
*/
@@ -154,7 +154,7 @@ static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
/**
* Get current SPI4 initialization callbacks
*
- * @callbacks: Pointer to the callbacks structure.to fill
+ * @callbacks: Pointer to the callbacks structure.to fill
*
* Returns Pointer to cvmx_spi_callbacks_t structure.
*/
@@ -171,11 +171,11 @@ extern void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks);
* Callback to perform SPI4 reset
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
@@ -187,11 +187,11 @@ extern int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode);
* detection
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @num_ports: Number of ports to configure on SPI
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -204,11 +204,11 @@ extern int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
* Callback to perform clock detection
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -221,11 +221,11 @@ extern int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode,
* Callback to perform link training
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for link to be trained (in seconds)
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -238,11 +238,11 @@ extern int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode,
* Callback to perform calendar data synchronization
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for calendar data in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -255,11 +255,11 @@ extern int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode,
* Callback to handle interface up
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
diff --git a/arch/mips/include/asm/octeon/cvmx-spinlock.h b/arch/mips/include/asm/octeon/cvmx-spinlock.h
index a672abb1bc4f..4f09cff8b8c0 100644
--- a/arch/mips/include/asm/octeon/cvmx-spinlock.h
+++ b/arch/mips/include/asm/octeon/cvmx-spinlock.h
@@ -26,7 +26,7 @@
***********************license end**************************************/
/**
- * Implementation of spinlocks for Octeon CVMX. Although similar in
+ * Implementation of spinlocks for Octeon CVMX. Although similar in
* function to Linux kernel spinlocks, they are not compatible.
* Octeon CVMX spinlocks are only used to synchronize with the boot
* monitor and other non-Linux programs running in the system.
@@ -50,8 +50,8 @@ typedef struct {
} cvmx_spinlock_t;
/* note - macros not expanded in inline ASM, so values hardcoded */
-#define CVMX_SPINLOCK_UNLOCKED_VAL 0
-#define CVMX_SPINLOCK_LOCKED_VAL 1
+#define CVMX_SPINLOCK_UNLOCKED_VAL 0
+#define CVMX_SPINLOCK_LOCKED_VAL 1
#define CVMX_SPINLOCK_UNLOCKED_INITIALIZER {CVMX_SPINLOCK_UNLOCKED_VAL}
@@ -96,7 +96,7 @@ static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
* @lock: pointer to lock structure
*
* Returns 0: lock successfully taken
- * 1: lock not taken, held by someone else
+ * 1: lock not taken, held by someone else
* These return values match the Linux semantics.
*/
@@ -104,16 +104,16 @@ static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
{
unsigned int tmp;
- __asm__ __volatile__(".set noreorder \n"
+ __asm__ __volatile__(".set noreorder \n"
"1: ll %[tmp], %[val] \n"
/* if lock held, fail immediately */
- " bnez %[tmp], 2f \n"
- " li %[tmp], 1 \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " li %[tmp], 0 \n"
- "2: \n"
- ".set reorder \n" :
+ " bnez %[tmp], 2f \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[tmp], 0 \n"
+ "2: \n"
+ ".set reorder \n" :
[val] "+m"(lock->value), [tmp] "=&r"(tmp)
: : "memory");
@@ -129,14 +129,14 @@ static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
{
unsigned int tmp;
- __asm__ __volatile__(".set noreorder \n"
+ __asm__ __volatile__(".set noreorder \n"
"1: ll %[tmp], %[val] \n"
- " bnez %[tmp], 1b \n"
- " li %[tmp], 1 \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- ".set reorder \n" :
+ " bnez %[tmp], 1b \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n" :
[val] "+m"(lock->value), [tmp] "=&r"(tmp)
: : "memory");
@@ -163,17 +163,17 @@ static inline void cvmx_spinlock_bit_lock(uint32_t *word)
unsigned int tmp;
unsigned int sav;
- __asm__ __volatile__(".set noreorder \n"
- ".set noat \n"
+ __asm__ __volatile__(".set noreorder \n"
+ ".set noat \n"
"1: ll %[tmp], %[val] \n"
- " bbit1 %[tmp], 31, 1b \n"
- " li $at, 1 \n"
- " ins %[tmp], $at, 31, 1 \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- ".set at \n"
- ".set reorder \n" :
+ " bbit1 %[tmp], 31, 1b \n"
+ " li $at, 1 \n"
+ " ins %[tmp], $at, 31, 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set at \n"
+ ".set reorder \n" :
[val] "+m"(*word), [tmp] "=&r"(tmp), [sav] "=&r"(sav)
: : "memory");
@@ -187,7 +187,7 @@ static inline void cvmx_spinlock_bit_lock(uint32_t *word)
*
* @word: word to lock bit 31 of
* Returns 0: lock successfully taken
- * 1: lock not taken, held by someone else
+ * 1: lock not taken, held by someone else
* These return values match the Linux semantics.
*/
static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
@@ -198,15 +198,15 @@ static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
".set noat\n"
"1: ll %[tmp], %[val] \n"
/* if lock held, fail immediately */
- " bbit1 %[tmp], 31, 2f \n"
- " li $at, 1 \n"
- " ins %[tmp], $at, 31, 1 \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " li %[tmp], 0 \n"
- "2: \n"
- ".set at \n"
- ".set reorder \n" :
+ " bbit1 %[tmp], 31, 2f \n"
+ " li $at, 1 \n"
+ " ins %[tmp], $at, 31, 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[tmp], 0 \n"
+ "2: \n"
+ ".set at \n"
+ ".set reorder \n" :
[val] "+m"(*word), [tmp] "=&r"(tmp)
: : "memory");
diff --git a/arch/mips/include/asm/octeon/cvmx-sysinfo.h b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
index 61dd5741afe4..2131197422e5 100644
--- a/arch/mips/include/asm/octeon/cvmx-sysinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
@@ -85,7 +85,7 @@ struct cvmx_sysinfo {
char board_serial_number[OCTEON_SERIAL_LEN];
/*
* Several boards support compact flash on the Octeon boot
- * bus. The CF memory spaces may be mapped to different
+ * bus. The CF memory spaces may be mapped to different
* addresses on different boards. These values will be 0 if
* CF is not present. Note that these addresses are physical
* addresses, and it is up to the application to use the
@@ -123,25 +123,25 @@ extern struct cvmx_sysinfo *cvmx_sysinfo_get(void);
/**
* This function is used in non-simple executive environments (such as
- * Linux kernel, u-boot, etc.) to configure the minimal fields that
+ * Linux kernel, u-boot, etc.) to configure the minimal fields that
* are required to use simple executive files directly.
*
* Locking (if required) must be handled outside of this
* function
*
* @phy_mem_desc_ptr: Pointer to global physical memory descriptor
- * (bootmem descriptor) @board_type: Octeon board
- * type enumeration
+ * (bootmem descriptor) @board_type: Octeon board
+ * type enumeration
*
* @board_rev_major:
- * Board major revision
+ * Board major revision
* @board_rev_minor:
- * Board minor revision
+ * Board minor revision
* @cpu_clock_hz:
- * CPU clock freqency in hertz
+ * CPU clock freqency in hertz
*
* Returns 0: Failure
- * 1: success
+ * 1: success
*/
extern int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
uint16_t board_type,
diff --git a/arch/mips/include/asm/octeon/cvmx-wqe.h b/arch/mips/include/asm/octeon/cvmx-wqe.h
index df762389e271..aa0d3d0de75c 100644
--- a/arch/mips/include/asm/octeon/cvmx-wqe.h
+++ b/arch/mips/include/asm/octeon/cvmx-wqe.h
@@ -101,23 +101,23 @@ typedef union {
* - 1 = Malformed L4
* - 2 = L4 Checksum Error: the L4 checksum value is
* - 3 = UDP Length Error: The UDP length field would
- * make the UDP data longer than what remains in
- * the IP packet (as defined by the IP header
- * length field).
+ * make the UDP data longer than what remains in
+ * the IP packet (as defined by the IP header
+ * length field).
* - 4 = Bad L4 Port: either the source or destination
- * TCP/UDP port is 0.
+ * TCP/UDP port is 0.
* - 8 = TCP FIN Only: the packet is TCP and only the
- * FIN flag set.
+ * FIN flag set.
* - 9 = TCP No Flags: the packet is TCP and no flags
- * are set.
+ * are set.
* - 10 = TCP FIN RST: the packet is TCP and both FIN
- * and RST are set.
+ * and RST are set.
* - 11 = TCP SYN URG: the packet is TCP and both SYN
- * and URG are set.
+ * and URG are set.
* - 12 = TCP SYN RST: the packet is TCP and both SYN
- * and RST are set.
+ * and RST are set.
* - 13 = TCP SYN FIN: the packet is TCP and both SYN
- * and FIN are set.
+ * and FIN are set.
*/
uint64_t L4_error:1;
/* set if the packet is a fragment */
@@ -127,16 +127,16 @@ typedef union {
* failure indicated in err_code below, decode:
*
* - 1 = Not IP: the IP version field is neither 4 nor
- * 6.
+ * 6.
* - 2 = IPv4 Header Checksum Error: the IPv4 header
- * has a checksum violation.
+ * has a checksum violation.
* - 3 = IP Malformed Header: the packet is not long
- * enough to contain the IP header.
+ * enough to contain the IP header.
* - 4 = IP Malformed: the packet is not long enough
* to contain the bytes indicated by the IP
* header. Pad is allowed.
* - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6
- * Hop Count field are zero.
+ * Hop Count field are zero.
* - 6 = IP Options
*/
uint64_t IP_exc:1;
@@ -243,46 +243,46 @@ typedef union {
* decode:
*
* - 1 = partial error: a packet was partially
- * received, but internal buffering / bandwidth
- * was not adequate to receive the entire
- * packet.
+ * received, but internal buffering / bandwidth
+ * was not adequate to receive the entire
+ * packet.
* - 2 = jabber error: the RGMII packet was too large
- * and is truncated.
+ * and is truncated.
* - 3 = overrun error: the RGMII packet is longer
- * than allowed and had an FCS error.
+ * than allowed and had an FCS error.
* - 4 = oversize error: the RGMII packet is longer
- * than allowed.
+ * than allowed.
* - 5 = alignment error: the RGMII packet is not an
- * integer number of bytes
- * and had an FCS error (100M and 10M only).
+ * integer number of bytes
+ * and had an FCS error (100M and 10M only).
* - 6 = fragment error: the RGMII packet is shorter
- * than allowed and had an FCS error.
+ * than allowed and had an FCS error.
* - 7 = GMX FCS error: the RGMII packet had an FCS
- * error.
+ * error.
* - 8 = undersize error: the RGMII packet is shorter
- * than allowed.
+ * than allowed.
* - 9 = extend error: the RGMII packet had an extend
- * error.
+ * error.
* - 10 = length mismatch error: the RGMII packet had
- * a length that did not match the length field
- * in the L2 HDR.
+ * a length that did not match the length field
+ * in the L2 HDR.
* - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII
- * packet had one or more data reception errors
- * (RXERR) or the SPI4 packet had one or more
- * DIP4 errors.
+ * packet had one or more data reception errors
+ * (RXERR) or the SPI4 packet had one or more
+ * DIP4 errors.
* - 12 = RGMII skip error/SPI4 Abort Error: the RGMII
- * packet was not large enough to cover the
- * skipped bytes or the SPI4 packet was
- * terminated with an About EOPS.
+ * packet was not large enough to cover the
+ * skipped bytes or the SPI4 packet was
+ * terminated with an About EOPS.
* - 13 = RGMII nibble error/SPI4 Port NXA Error: the
- * RGMII packet had a studder error (data not
- * repeated - 10/100M only) or the SPI4 packet
- * was sent to an NXA.
+ * RGMII packet had a studder error (data not
+ * repeated - 10/100M only) or the SPI4 packet
+ * was sent to an NXA.
* - 16 = FCS error: a SPI4.2 packet had an FCS error.
* - 17 = Skip error: a packet was not large enough to
- * cover the skipped bytes.
+ * cover the skipped bytes.
* - 18 = L2 header malformed: the packet is not long
- * enough to contain the L2.
+ * enough to contain the L2.
*/
uint64_t rcv_error:1;
@@ -309,7 +309,7 @@ typedef struct {
/*****************************************************************
* WORD 0
- * HW WRITE: the following 64 bits are filled by HW when a packet arrives
+ * HW WRITE: the following 64 bits are filled by HW when a packet arrives
*/
/**
@@ -323,14 +323,14 @@ typedef struct {
/**
* Next pointer used by hardware for list maintenance.
* May be written/read by HW before the work queue
- * entry is scheduled to a PP
+ * entry is scheduled to a PP
* (Only 36 bits used in Octeon 1)
*/
uint64_t next_ptr:40;
/*****************************************************************
* WORD 1
- * HW WRITE: the following 64 bits are filled by HW when a packet arrives
+ * HW WRITE: the following 64 bits are filled by HW when a packet arrives
*/
/**
@@ -362,8 +362,8 @@ typedef struct {
/**
* WORD 2 HW WRITE: the following 64-bits are filled in by
- * hardware when a packet arrives This indicates a variety of
- * status and error conditions.
+ * hardware when a packet arrives This indicates a variety of
+ * status and error conditions.
*/
cvmx_pip_wqe_word2 word2;
@@ -373,15 +373,15 @@ typedef struct {
union cvmx_buf_ptr packet_ptr;
/**
- * HW WRITE: octeon will fill in a programmable amount from the
- * packet, up to (at most, but perhaps less) the amount
- * needed to fill the work queue entry to 128 bytes
+ * HW WRITE: octeon will fill in a programmable amount from the
+ * packet, up to (at most, but perhaps less) the amount
+ * needed to fill the work queue entry to 128 bytes
*
- * If the packet is recognized to be IP, the hardware starts
- * (except that the IPv4 header is padded for appropriate
- * alignment) writing here where the IP header starts. If the
- * packet is not recognized to be IP, the hardware starts
- * writing the beginning of the packet here.
+ * If the packet is recognized to be IP, the hardware starts
+ * (except that the IPv4 header is padded for appropriate
+ * alignment) writing here where the IP header starts. If the
+ * packet is not recognized to be IP, the hardware starts
+ * writing the beginning of the packet here.
*/
uint8_t packet_data[96];
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index db58beab6cb2..f991e7701d3d 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -76,14 +76,14 @@ enum cvmx_mips_space {
#endif
#if CVMX_ENABLE_DEBUG_PRINTS
-#define cvmx_dprintf printk
+#define cvmx_dprintf printk
#else
#define cvmx_dprintf(...) {}
#endif
-#define CVMX_MAX_CORES (16)
-#define CVMX_CACHE_LINE_SIZE (128) /* In bytes */
-#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) /* In bytes */
+#define CVMX_MAX_CORES (16)
+#define CVMX_CACHE_LINE_SIZE (128) /* In bytes */
+#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) /* In bytes */
#define CVMX_CACHE_LINE_ALIGNED __attribute__ ((aligned(CVMX_CACHE_LINE_SIZE)))
#define CAST64(v) ((long long)(long)(v))
#define CASTPTR(type, v) ((type *)(long)(v))
@@ -133,8 +133,8 @@ static inline uint64_t cvmx_build_io_address(uint64_t major_did,
*
* Example: cvmx_build_bits(39,24,value)
* <pre>
- * 6 5 4 3 3 2 1
- * 3 5 7 9 1 3 5 7 0
+ * 6 5 4 3 3 2 1
+ * 3 5 7 9 1 3 5 7 0
* +-------+-------+-------+-------+-------+-------+-------+------+
* 000000000000000000000000___________value000000000000000000000000
* </pre>
@@ -183,7 +183,7 @@ static inline uint64_t cvmx_ptr_to_phys(void *ptr)
* memory pointer (void *).
*
* @physical_address:
- * Hardware physical address to memory
+ * Hardware physical address to memory
* Returns Pointer to memory
*/
static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
@@ -207,10 +207,10 @@ static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
/* We have a full 64bit ABI. Writing to a 64bit address can be done with
a simple volatile pointer */
-#define CVMX_BUILD_WRITE64(TYPE, ST) \
-static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
-{ \
- *CASTPTR(volatile TYPE##_t, addr) = val; \
+#define CVMX_BUILD_WRITE64(TYPE, ST) \
+static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
+{ \
+ *CASTPTR(volatile TYPE##_t, addr) = val; \
}
@@ -221,19 +221,19 @@ static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
/* We have a full 64bit ABI. Writing to a 64bit address can be done with
a simple volatile pointer */
-#define CVMX_BUILD_READ64(TYPE, LT) \
-static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
-{ \
+#define CVMX_BUILD_READ64(TYPE, LT) \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
+{ \
return *CASTPTR(volatile TYPE##_t, addr); \
}
/* The following defines 8 functions for writing to a 64bit address. Each
takes two arguments, the address and the value to write.
- cvmx_write64_int64 cvmx_write64_uint64
- cvmx_write64_int32 cvmx_write64_uint32
- cvmx_write64_int16 cvmx_write64_uint16
- cvmx_write64_int8 cvmx_write64_uint8 */
+ cvmx_write64_int64 cvmx_write64_uint64
+ cvmx_write64_int32 cvmx_write64_uint32
+ cvmx_write64_int16 cvmx_write64_uint16
+ cvmx_write64_int8 cvmx_write64_uint8 */
CVMX_BUILD_WRITE64(int64, "sd");
CVMX_BUILD_WRITE64(int32, "sw");
CVMX_BUILD_WRITE64(int16, "sh");
@@ -246,10 +246,10 @@ CVMX_BUILD_WRITE64(uint8, "sb");
/* The following defines 8 functions for reading from a 64bit address. Each
takes the address as the only argument
- cvmx_read64_int64 cvmx_read64_uint64
- cvmx_read64_int32 cvmx_read64_uint32
- cvmx_read64_int16 cvmx_read64_uint16
- cvmx_read64_int8 cvmx_read64_uint8 */
+ cvmx_read64_int64 cvmx_read64_uint64
+ cvmx_read64_int32 cvmx_read64_uint32
+ cvmx_read64_int16 cvmx_read64_uint16
+ cvmx_read64_int8 cvmx_read64_uint8 */
CVMX_BUILD_READ64(int64, "ld");
CVMX_BUILD_READ64(int32, "lw");
CVMX_BUILD_READ64(int16, "lh");
@@ -389,7 +389,7 @@ static inline void cvmx_wait(uint64_t cycles)
/**
* Reads a chip global cycle counter. This counts CPU cycles since
- * chip reset. The counter is 64 bit.
+ * chip reset. The counter is 64 bit.
* This register does not exist on CN38XX pass 1 silicion
*
* Returns Global chip cycle count since chip reset.
@@ -453,7 +453,7 @@ static inline uint32_t cvmx_octeon_num_cores(void)
/**
* Read a byte of fuse data
- * @byte_addr: address to read
+ * @byte_addr: address to read
*
* Returns fuse value: 0 or 1
*/
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
index 8008da2f8779..90e05a8d4b15 100644
--- a/arch/mips/include/asm/octeon/octeon-feature.h
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -35,7 +35,7 @@
#include <asm/octeon/cvmx-rnm-defs.h>
enum octeon_feature {
- /* CN68XX uses port kinds for packet interface */
+ /* CN68XX uses port kinds for packet interface */
OCTEON_FEATURE_PKND,
/* CN68XX has different fields in word0 - word2 */
OCTEON_FEATURE_CN68XX_WQE,
@@ -51,7 +51,7 @@ enum octeon_feature {
OCTEON_FEATURE_DORM_CRYPTO,
/* Does this Octeon support PCI express? */
OCTEON_FEATURE_PCIE,
- /* Does this Octeon support SRIOs */
+ /* Does this Octeon support SRIOs */
OCTEON_FEATURE_SRIO,
/* Does this Octeon support Interlaken */
OCTEON_FEATURE_ILK,
@@ -75,7 +75,7 @@ enum octeon_feature {
/* Octeon MDIO block supports clause 45 transactions for 10
* Gig support */
OCTEON_FEATURE_MDIO_CLAUSE_45,
- /*
+ /*
* CN52XX and CN56XX used a block named NPEI for PCIe
* access. Newer chips replaced this with SLI+DPI.
*/
@@ -94,10 +94,10 @@ static inline int cvmx_fuse_read(int fuse);
* be kept out of fast path code.
*
* @feature: Feature to check for. This should always be a constant so the
- * compiler can remove the switch statement through optimization.
+ * compiler can remove the switch statement through optimization.
*
* Returns Non zero if the feature exists. Zero if the feature does not
- * exist.
+ * exist.
*/
static inline int octeon_has_feature(enum octeon_feature feature)
{
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index 349bb2ba840c..e2c122c6a657 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -29,7 +29,7 @@
/*
* The defines below should be used with the OCTEON_IS_MODEL() macro
- * to determine what model of chip the software is running on. Models
+ * to determine what model of chip the software is running on. Models
* ending in 'XX' match multiple models (families), while specific
* models match only that model. If a pass (revision) is specified,
* then only that revision will be matched. Care should be taken when
@@ -40,183 +40,183 @@
* subject to change at anytime without notice.
*
* NOTE: only the OCTEON_IS_MODEL() macro/function and the OCTEON_CN*
- * macros should be used outside of this file. All other macros are
+ * macros should be used outside of this file. All other macros are
* for internal use only, and may change without notice.
*/
-#define OCTEON_FAMILY_MASK 0x00ffff00
+#define OCTEON_FAMILY_MASK 0x00ffff00
/* Flag bits in top byte */
/* Ignores revision in model checks */
-#define OM_IGNORE_REVISION 0x01000000
+#define OM_IGNORE_REVISION 0x01000000
/* Check submodels */
-#define OM_CHECK_SUBMODEL 0x02000000
+#define OM_CHECK_SUBMODEL 0x02000000
/* Match all models previous than the one specified */
#define OM_MATCH_PREVIOUS_MODELS 0x04000000
/* Ignores the minor revison on newer parts */
#define OM_IGNORE_MINOR_REVISION 0x08000000
-#define OM_FLAG_MASK 0xff000000
+#define OM_FLAG_MASK 0xff000000
/* Match all cn5XXX Octeon models. */
-#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000
+#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000
/* Match all cn6XXX Octeon models. */
-#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000
+#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000
/* Match all cnf7XXX Octeon models. */
-#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000
+#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000
/*
* CNF7XXX models with new revision encoding
*/
-#define OCTEON_CNF71XX_PASS1_0 0x000d9400
+#define OCTEON_CNF71XX_PASS1_0 0x000d9400
-#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
/*
* CN6XXX models with new revision encoding
*/
-#define OCTEON_CN68XX_PASS1_0 0x000d9100
-#define OCTEON_CN68XX_PASS1_1 0x000d9101
-#define OCTEON_CN68XX_PASS1_2 0x000d9102
-#define OCTEON_CN68XX_PASS2_0 0x000d9108
+#define OCTEON_CN68XX_PASS1_0 0x000d9100
+#define OCTEON_CN68XX_PASS1_1 0x000d9101
+#define OCTEON_CN68XX_PASS1_2 0x000d9102
+#define OCTEON_CN68XX_PASS2_0 0x000d9108
-#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN68XX_PASS2_X (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN68XX_PASS2_X (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN68XX_PASS1 OCTEON_CN68XX_PASS1_X
#define OCTEON_CN68XX_PASS2 OCTEON_CN68XX_PASS2_X
-#define OCTEON_CN66XX_PASS1_0 0x000d9200
-#define OCTEON_CN66XX_PASS1_2 0x000d9202
+#define OCTEON_CN66XX_PASS1_0 0x000d9200
+#define OCTEON_CN66XX_PASS1_2 0x000d9202
-#define OCTEON_CN66XX (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN66XX_PASS1_X (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN66XX (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN66XX_PASS1_X (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN63XX_PASS1_0 0x000d9000
-#define OCTEON_CN63XX_PASS1_1 0x000d9001
-#define OCTEON_CN63XX_PASS1_2 0x000d9002
-#define OCTEON_CN63XX_PASS2_0 0x000d9008
-#define OCTEON_CN63XX_PASS2_1 0x000d9009
-#define OCTEON_CN63XX_PASS2_2 0x000d900a
+#define OCTEON_CN63XX_PASS1_0 0x000d9000
+#define OCTEON_CN63XX_PASS1_1 0x000d9001
+#define OCTEON_CN63XX_PASS1_2 0x000d9002
+#define OCTEON_CN63XX_PASS2_0 0x000d9008
+#define OCTEON_CN63XX_PASS2_1 0x000d9009
+#define OCTEON_CN63XX_PASS2_2 0x000d900a
-#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN61XX_PASS1_0 0x000d9300
+#define OCTEON_CN61XX_PASS1_0 0x000d9300
-#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
/*
* CN5XXX models with new revision encoding
*/
-#define OCTEON_CN58XX_PASS1_0 0x000d0300
-#define OCTEON_CN58XX_PASS1_1 0x000d0301
-#define OCTEON_CN58XX_PASS1_2 0x000d0303
-#define OCTEON_CN58XX_PASS2_0 0x000d0308
-#define OCTEON_CN58XX_PASS2_1 0x000d0309
-#define OCTEON_CN58XX_PASS2_2 0x000d030a
-#define OCTEON_CN58XX_PASS2_3 0x000d030b
-
-#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X
-#define OCTEON_CN58XX_PASS2 OCTEON_CN58XX_PASS2_X
-
-#define OCTEON_CN56XX_PASS1_0 0x000d0400
-#define OCTEON_CN56XX_PASS1_1 0x000d0401
-#define OCTEON_CN56XX_PASS2_0 0x000d0408
-#define OCTEON_CN56XX_PASS2_1 0x000d0409
-
-#define OCTEON_CN56XX (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN56XX_PASS1_X (OCTEON_CN56XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN56XX_PASS2_X (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN56XX_PASS1 OCTEON_CN56XX_PASS1_X
-#define OCTEON_CN56XX_PASS2 OCTEON_CN56XX_PASS2_X
-
-#define OCTEON_CN57XX OCTEON_CN56XX
-#define OCTEON_CN57XX_PASS1 OCTEON_CN56XX_PASS1
-#define OCTEON_CN57XX_PASS2 OCTEON_CN56XX_PASS2
-
-#define OCTEON_CN55XX OCTEON_CN56XX
-#define OCTEON_CN55XX_PASS1 OCTEON_CN56XX_PASS1
-#define OCTEON_CN55XX_PASS2 OCTEON_CN56XX_PASS2
-
-#define OCTEON_CN54XX OCTEON_CN56XX
-#define OCTEON_CN54XX_PASS1 OCTEON_CN56XX_PASS1
-#define OCTEON_CN54XX_PASS2 OCTEON_CN56XX_PASS2
-
-#define OCTEON_CN50XX_PASS1_0 0x000d0600
-
-#define OCTEON_CN50XX (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN50XX_PASS1_X (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN50XX_PASS1 OCTEON_CN50XX_PASS1_X
+#define OCTEON_CN58XX_PASS1_0 0x000d0300
+#define OCTEON_CN58XX_PASS1_1 0x000d0301
+#define OCTEON_CN58XX_PASS1_2 0x000d0303
+#define OCTEON_CN58XX_PASS2_0 0x000d0308
+#define OCTEON_CN58XX_PASS2_1 0x000d0309
+#define OCTEON_CN58XX_PASS2_2 0x000d030a
+#define OCTEON_CN58XX_PASS2_3 0x000d030b
+
+#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X
+#define OCTEON_CN58XX_PASS2 OCTEON_CN58XX_PASS2_X
+
+#define OCTEON_CN56XX_PASS1_0 0x000d0400
+#define OCTEON_CN56XX_PASS1_1 0x000d0401
+#define OCTEON_CN56XX_PASS2_0 0x000d0408
+#define OCTEON_CN56XX_PASS2_1 0x000d0409
+
+#define OCTEON_CN56XX (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN56XX_PASS1_X (OCTEON_CN56XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS2_X (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS1 OCTEON_CN56XX_PASS1_X
+#define OCTEON_CN56XX_PASS2 OCTEON_CN56XX_PASS2_X
+
+#define OCTEON_CN57XX OCTEON_CN56XX
+#define OCTEON_CN57XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN57XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN55XX OCTEON_CN56XX
+#define OCTEON_CN55XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN55XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN54XX OCTEON_CN56XX
+#define OCTEON_CN54XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN54XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN50XX_PASS1_0 0x000d0600
+
+#define OCTEON_CN50XX (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN50XX_PASS1_X (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN50XX_PASS1 OCTEON_CN50XX_PASS1_X
/*
* NOTE: Octeon CN5000F model is not identifiable using the
* OCTEON_IS_MODEL() functions, but are treated as CN50XX.
*/
-#define OCTEON_CN52XX_PASS1_0 0x000d0700
-#define OCTEON_CN52XX_PASS2_0 0x000d0708
+#define OCTEON_CN52XX_PASS1_0 0x000d0700
+#define OCTEON_CN52XX_PASS2_0 0x000d0708
-#define OCTEON_CN52XX (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN52XX_PASS1_X (OCTEON_CN52XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN52XX_PASS2_X (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN52XX_PASS1 OCTEON_CN52XX_PASS1_X
-#define OCTEON_CN52XX_PASS2 OCTEON_CN52XX_PASS2_X
+#define OCTEON_CN52XX (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN52XX_PASS1_X (OCTEON_CN52XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS2_X (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS1 OCTEON_CN52XX_PASS1_X
+#define OCTEON_CN52XX_PASS2 OCTEON_CN52XX_PASS2_X
/*
* CN3XXX models with old revision enconding
*/
-#define OCTEON_CN38XX_PASS1 0x000d0000
-#define OCTEON_CN38XX_PASS2 0x000d0001
-#define OCTEON_CN38XX_PASS3 0x000d0003
-#define OCTEON_CN38XX (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
+#define OCTEON_CN38XX_PASS1 0x000d0000
+#define OCTEON_CN38XX_PASS2 0x000d0001
+#define OCTEON_CN38XX_PASS3 0x000d0003
+#define OCTEON_CN38XX (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
-#define OCTEON_CN36XX OCTEON_CN38XX
-#define OCTEON_CN36XX_PASS2 OCTEON_CN38XX_PASS2
-#define OCTEON_CN36XX_PASS3 OCTEON_CN38XX_PASS3
+#define OCTEON_CN36XX OCTEON_CN38XX
+#define OCTEON_CN36XX_PASS2 OCTEON_CN38XX_PASS2
+#define OCTEON_CN36XX_PASS3 OCTEON_CN38XX_PASS3
/* The OCTEON_CN31XX matches CN31XX models and the CN3020 */
-#define OCTEON_CN31XX_PASS1 0x000d0100
-#define OCTEON_CN31XX_PASS1_1 0x000d0102
-#define OCTEON_CN31XX (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
+#define OCTEON_CN31XX_PASS1 0x000d0100
+#define OCTEON_CN31XX_PASS1_1 0x000d0102
+#define OCTEON_CN31XX (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
/*
* This model is only used for internal checks, it is not a valid
* model for the OCTEON_MODEL environment variable. This matches the
* CN3010 and CN3005 but NOT the CN3020.
*/
-#define OCTEON_CN30XX_PASS1 0x000d0200
-#define OCTEON_CN30XX_PASS1_1 0x000d0202
-#define OCTEON_CN30XX (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
+#define OCTEON_CN30XX_PASS1 0x000d0200
+#define OCTEON_CN30XX_PASS1_1 0x000d0202
+#define OCTEON_CN30XX (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
-#define OCTEON_CN3005_PASS1 (0x000d0210 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3005_PASS1_0 (0x000d0210 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3005_PASS1_1 (0x000d0212 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3005 (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1 (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_0 (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_1 (0x000d0212 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005 (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010_PASS1 (0x000d0200 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010_PASS1_0 (0x000d0200 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010_PASS1_1 (0x000d0202 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010 (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1 (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_0 (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_1 (0x000d0202 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010 (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020_PASS1 (0x000d0110 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020_PASS1_0 (0x000d0110 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020_PASS1_1 (0x000d0112 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020 (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1 (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_0 (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_1 (0x000d0112 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020 (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
/*
* This matches the complete family of CN3xxx CPUs, and not subsequent
* models
*/
-#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
-#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
-#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
+#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
+#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
+#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
/* These are used to cover entire families of OCTEON processors */
#define OCTEON_FAM_1 (OCTEON_CN3XXX)
@@ -243,18 +243,18 @@
*/
/* Masks used for the various types of model/family/revision matching */
-#define OCTEON_38XX_FAMILY_MASK 0x00ffff00
+#define OCTEON_38XX_FAMILY_MASK 0x00ffff00
#define OCTEON_38XX_FAMILY_REV_MASK 0x00ffff0f
-#define OCTEON_38XX_MODEL_MASK 0x00ffff10
+#define OCTEON_38XX_MODEL_MASK 0x00ffff10
#define OCTEON_38XX_MODEL_REV_MASK (OCTEON_38XX_FAMILY_REV_MASK | OCTEON_38XX_MODEL_MASK)
/* CN5XXX and later use different layout of bits in the revision ID field */
-#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
+#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
#define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f
-#define OCTEON_58XX_MODEL_MASK 0x00ffffc0
+#define OCTEON_58XX_MODEL_MASK 0x00ffffc0
#define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8)
-#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
+#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
/* forward declarations */
static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
@@ -264,7 +264,7 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
/* NOTE: This for internal use only! */
#define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \
-((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \
+((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \
((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_MODEL_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == 0) \
@@ -276,7 +276,7 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
&& (((chip_model) & OCTEON_38XX_MODEL_MASK) < ((arg_model) & OCTEON_38XX_MODEL_MASK))) \
)) || \
- (((arg_model & OCTEON_38XX_FAMILY_MASK) >= OCTEON_CN58XX_PASS1_0) && ( \
+ (((arg_model & OCTEON_38XX_FAMILY_MASK) >= OCTEON_CN58XX_PASS1_0) && ( \
((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == 0) \
@@ -320,7 +320,7 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
* Use of the macro in preprocessor directives ( #if OCTEON_IS_MODEL(...) )
* is NOT SUPPORTED, and should be replaced with CVMX_COMPILED_FOR()
* I.e.:
- * #if OCTEON_IS_MODEL(OCTEON_CN56XX) -> #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
+ * #if OCTEON_IS_MODEL(OCTEON_CN56XX) -> #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
*/
#define OCTEON_IS_MODEL(x) __octeon_is_model_runtime__(x)
#define OCTEON_IS_COMMON_BINARY() 1
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 254e9954ed71..a2eed23c49a9 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -75,15 +75,15 @@ struct octeon_boot_descriptor {
uint32_t argc;
uint32_t argv[OCTEON_ARGV_MAX_ARGS];
-#define BOOT_FLAG_INIT_CORE (1 << 0)
-#define OCTEON_BL_FLAG_DEBUG (1 << 1)
-#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2)
+#define BOOT_FLAG_INIT_CORE (1 << 0)
+#define OCTEON_BL_FLAG_DEBUG (1 << 1)
+#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2)
/* If set, use uart1 for console */
-#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3)
+#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3)
/* If set, use PCI console */
-#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4)
+#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4)
/* Call exit on break on serial port */
-#define OCTEON_BL_FLAG_BREAK (1 << 5)
+#define OCTEON_BL_FLAG_BREAK (1 << 5)
uint32_t flags;
uint32_t core_mask;
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
index c66734bd3382..64ba56a02843 100644
--- a/arch/mips/include/asm/octeon/pci-octeon.h
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -22,7 +22,7 @@
#define CVMX_PCIE_BAR1_PHYS_SIZE (1ull << 28)
/*
- * The RC base of BAR1. gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,
+ * The RC base of BAR1. gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,
* place BAR1 so it is the same for both.
*/
#define CVMX_PCIE_BAR1_RC_BASE (1ull << 41)