linux/arch/alpha/kernel/vmlinux.lds.S
Geoffrey Thomas 9d93f00580 alpha: Clean up linker script using new linker script macros.
Note that .data.page_aligned and .data.cacheline_aligned are now after
_data; it was probably a bug that they were before it.

Also, some explicit ALIGN(8)'s between various initcall sections were
removed; this should be harmless as the implicit alignment of
initcall_t was already 8.

Signed-off-by: Geoffrey Thomas <geofft@ksplice.com>
Signed-off-by: Tim Abbott <tabbott@ksplice.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-09-24 17:16:22 -07:00

74 lines
1.2 KiB
ArmAsm

#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
#include <asm/thread_info.h>
OUTPUT_FORMAT("elf64-alpha")
OUTPUT_ARCH(alpha)
ENTRY(__start)
PHDRS { kernel PT_LOAD; note PT_NOTE; }
jiffies = jiffies_64;
SECTIONS
{
#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
. = 0xfffffc0000310000;
#else
. = 0xfffffc0001010000;
#endif
_text = .; /* Text and read-only data */
.text : {
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
} :kernel
_etext = .; /* End of text section */
NOTES :kernel :note
.dummy : {
*(.dummy)
} :kernel
RODATA
EXCEPTION_TABLE(16)
/* Will be freed after init */
__init_begin = ALIGN(PAGE_SIZE);
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
PERCPU(PAGE_SIZE)
/* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
needed for the THREAD_SIZE aligned init_task gets freed after init */
. = ALIGN(THREAD_SIZE);
__init_end = .;
/* Freed after init ends here */
_data = .;
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
.got : {
*(.got)
}
.sdata : {
*(.sdata)
}
_edata = .; /* End of data section */
BSS_SECTION(0, 0, 0)
_end = .;
.mdebug 0 : {
*(.mdebug)
}
.note 0 : {
*(.note)
}
STABS_DEBUG
DWARF_DEBUG
DISCARDS
}