summaryrefslogtreecommitdiff
path: root/arch/x86/boot
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2009-05-09 02:45:17 +0400
committerH. Peter Anvin <hpa@zytor.com>2009-05-09 04:16:23 +0400
commit5f64ec64e7f9b246c0a94f34cdf7782f98a6e55d (patch)
tree4dc7b26bfd240866bd32c9df7f72910328697ce4 /arch/x86/boot
parentbd2a36984c50bb546a7d04cb395fddcf98a1092c (diff)
downloadlinux-5f64ec64e7f9b246c0a94f34cdf7782f98a6e55d.tar.xz
x86, boot: stylistic cleanups for boot/compressed/head_32.S
Reformat arch/x86/boot/compressed/head_32.S to be closer to currently preferred kernel assembly style, that is: - opcode and operand separated by tab - operands separated by ", " - C-style comments This also makes it more similar to head_64.S. [ Impact: cleanup, no object code change ] Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/boot')
-rw-r--r--arch/x86/boot/compressed/head_32.S170
1 files changed, 89 insertions, 81 deletions
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index e3398f3d1b34..7bd7766ffabf 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -12,16 +12,16 @@
* the page directory. [According to comments etc elsewhere on a compressed
* kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
*
- * Page 0 is deliberately kept safe, since System Management Mode code in
+ * Page 0 is deliberately kept safe, since System Management Mode code in
* laptops may need to access the BIOS data stored there. This is also
- * useful for future device drivers that either access the BIOS via VM86
+ * useful for future device drivers that either access the BIOS via VM86
* mode.
*/
/*
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
*/
-.text
+ .text
#include <linux/linkage.h>
#include <asm/segment.h>
@@ -29,75 +29,80 @@
#include <asm/boot.h>
#include <asm/asm-offsets.h>
-.section ".text.head","ax",@progbits
+ .section ".text.head","ax",@progbits
ENTRY(startup_32)
cld
- /* test KEEP_SEGMENTS flag to see if the bootloader is asking
- * us to not reload segments */
- testb $(1<<6), BP_loadflags(%esi)
- jnz 1f
+ /*
+ * Test KEEP_SEGMENTS flag to see if the bootloader is asking
+ * us to not reload segments
+ */
+ testb $(1<<6), BP_loadflags(%esi)
+ jnz 1f
cli
- movl $(__BOOT_DS),%eax
- movl %eax,%ds
- movl %eax,%es
- movl %eax,%fs
- movl %eax,%gs
- movl %eax,%ss
+ movl $__BOOT_DS, %eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %eax, %fs
+ movl %eax, %gs
+ movl %eax, %ss
1:
-/* Calculate the delta between where we were compiled to run
+/*
+ * Calculate the delta between where we were compiled to run
* at and where we were actually loaded at. This can only be done
* with a short local call on x86. Nothing else will tell us what
* address we are running at. The reserved chunk of the real-mode
* data at 0x1e4 (defined as a scratch field) are used as the stack
* for this calculation. Only 4 bytes are needed.
*/
- leal (BP_scratch+4)(%esi), %esp
- call 1f
-1: popl %ebp
- subl $1b, %ebp
+ leal (BP_scratch+4)(%esi), %esp
+ call 1f
+1: popl %ebp
+ subl $1b, %ebp
-/* %ebp contains the address we are loaded at by the boot loader and %ebx
+/*
+ * %ebp contains the address we are loaded at by the boot loader and %ebx
* contains the address where we should move the kernel image temporarily
* for safe in-place decompression.
*/
#ifdef CONFIG_RELOCATABLE
- movl %ebp, %ebx
+ movl %ebp, %ebx
addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebx
andl $(~(CONFIG_PHYSICAL_ALIGN - 1)), %ebx
#else
- movl $LOAD_PHYSICAL_ADDR, %ebx
+ movl $LOAD_PHYSICAL_ADDR, %ebx
#endif
/* Replace the compressed data size with the uncompressed size */
- subl input_len(%ebp), %ebx
- movl output_len(%ebp), %eax
- addl %eax, %ebx
+ subl input_len(%ebp), %ebx
+ movl output_len(%ebp), %eax
+ addl %eax, %ebx
/* Add 8 bytes for every 32K input block */
- shrl $12, %eax
- addl %eax, %ebx
+ shrl $12, %eax
+ addl %eax, %ebx
/* Add 32K + 18 bytes of extra slack */
- addl $(32768 + 18), %ebx
+ addl $(32768 + 18), %ebx
/* Align on a 4K boundary */
- addl $4095, %ebx
- andl $~4095, %ebx
+ addl $4095, %ebx
+ andl $~4095, %ebx
-/* Copy the compressed kernel to the end of our buffer
+/*
+ * Copy the compressed kernel to the end of our buffer
* where decompression in place becomes safe.
*/
- pushl %esi
- leal _ebss(%ebp), %esi
- leal _ebss(%ebx), %edi
- movl $(_ebss - startup_32), %ecx
+ pushl %esi
+ leal _ebss(%ebp), %esi
+ leal _ebss(%ebx), %edi
+ movl $(_ebss - startup_32), %ecx
std
- rep
- movsb
+ rep movsb
cld
- popl %esi
+ popl %esi
-/* Compute the kernel start address.
+/*
+ * Compute the kernel start address.
*/
#ifdef CONFIG_RELOCATABLE
addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebp
@@ -109,81 +114,84 @@ ENTRY(startup_32)
/*
* Jump to the relocated address.
*/
- leal relocated(%ebx), %eax
- jmp *%eax
+ leal relocated(%ebx), %eax
+ jmp *%eax
ENDPROC(startup_32)
-.section ".text"
+ .text
relocated:
/*
* Clear BSS
*/
- xorl %eax,%eax
- leal _edata(%ebx),%edi
- leal _ebss(%ebx), %ecx
- subl %edi,%ecx
+ xorl %eax, %eax
+ leal _edata(%ebx), %edi
+ leal _ebss(%ebx), %ecx
+ subl %edi, %ecx
cld
- rep
- stosb
+ rep stosb
/*
* Setup the stack for the decompressor
*/
- leal boot_stack_end(%ebx), %esp
+ leal boot_stack_end(%ebx), %esp
/*
* Do the decompression, and jump to the new kernel..
*/
- movl output_len(%ebx), %eax
- pushl %eax
- # push arguments for decompress_kernel:
- pushl %ebp # output address
- movl input_len(%ebx), %eax
- pushl %eax # input_len
- leal input_data(%ebx), %eax
- pushl %eax # input_data
- leal boot_heap(%ebx), %eax
- pushl %eax # heap area
- pushl %esi # real mode pointer
- call decompress_kernel
- addl $20, %esp
- popl %ecx
+ movl output_len(%ebx), %eax
+ pushl %eax
+ /* push arguments for decompress_kernel: */
+ pushl %ebp /* output address */
+ movl input_len(%ebx), %eax
+ pushl %eax /* input_len */
+ leal input_data(%ebx), %eax
+ pushl %eax /* input_data */
+ leal boot_heap(%ebx), %eax
+ pushl %eax /* heap area */
+ pushl %esi /* real mode pointer */
+ call decompress_kernel
+ addl $20, %esp
+ popl %ecx
#if CONFIG_RELOCATABLE
-/* Find the address of the relocations.
+/*
+ * Find the address of the relocations.
*/
- movl %ebp, %edi
- addl %ecx, %edi
+ movl %ebp, %edi
+ addl %ecx, %edi
-/* Calculate the delta between where vmlinux was compiled to run
+/*
+ * Calculate the delta between where vmlinux was compiled to run
* and where it was actually loaded.
*/
- movl %ebp, %ebx
- subl $LOAD_PHYSICAL_ADDR, %ebx
- jz 2f /* Nothing to be done if loaded at compiled addr. */
+ movl %ebp, %ebx
+ subl $LOAD_PHYSICAL_ADDR, %ebx
+ jz 2f /* Nothing to be done if loaded at compiled addr. */
/*
* Process relocations.
*/
-1: subl $4, %edi
- movl 0(%edi), %ecx
- testl %ecx, %ecx
- jz 2f
- addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
- jmp 1b
+1: subl $4, %edi
+ movl (%edi), %ecx
+ testl %ecx, %ecx
+ jz 2f
+ addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+ jmp 1b
2:
#endif
/*
* Jump to the decompressed kernel.
*/
- xorl %ebx,%ebx
- jmp *%ebp
+ xorl %ebx, %ebx
+ jmp *%ebp
-.bss
-/* Stack and heap for uncompression */
-.balign 4
+/*
+ * Stack and heap for uncompression
+ */
+ .bss
+ .balign 4
boot_heap:
.fill BOOT_HEAP_SIZE, 1, 0
boot_stack: