summaryrefslogtreecommitdiff
path: root/libguile
diff options
context:
space:
mode:
authorAndy Wingo <wingo@pobox.com>2017-03-08 22:39:29 +0100
committerAndy Wingo <wingo@pobox.com>2017-03-08 22:49:24 +0100
commitc62f0b025649eadc28cb1cb1afd1be183414b9b0 (patch)
treed44e2a055a4bdcb2f1ba28c7e73dc765e34e22d0 /libguile
parent8157c2a3acc61b561903957f69e7e83163d5a1b5 (diff)
64KB segment alignment
* module/system/vm/linker.scm (*lcm-page-size*): Rename from *page-size*, change to 64 KB. * libguile/loader.c (load_thunk_from_memory): Only require page size alignment, knowing that although Guile might emit ELF with 64k alignment, it only really needs page alignment.
Diffstat (limited to 'libguile')
-rw-r--r--libguile/loader.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/libguile/loader.c b/libguile/loader.c
index 558a722ea..7b1adc9c9 100644
--- a/libguile/loader.c
+++ b/libguile/loader.c
@@ -420,7 +420,18 @@ load_thunk_from_memory (char *data, size_t len, int is_read_only)
if (dynamic_segment < 0)
ABORT ("no PT_DYNAMIC segment");
- if (!IS_ALIGNED ((scm_t_uintptr) data, alignment))
+ /* The ELF images that Guile currently emits have segments that are
+ aligned on 64 KB boundaries, which might be larger than the actual
+ page size (usually 4 KB). However Guile doesn't actually use the
+ absolute addresses at all. All Guile needs is for the loaded image
+ to be able to make the data section writable (for the mmap path),
+ and for that the segment just needs to be page-aligned, and a page
+ is always bigger than Guile's minimum alignment. Since we know
+ (for the mmap path) that the base _is_ page-aligned, we proceed
+ ahead even if the image alignment is greater than the page
+ size. */
+ if (!IS_ALIGNED ((scm_t_uintptr) data, alignment)
+ && !IS_ALIGNED (alignment, page_size))
ABORT ("incorrectly aligned base");
/* Allow writes to writable pages. */
@@ -433,7 +444,7 @@ load_thunk_from_memory (char *data, size_t len, int is_read_only)
continue;
if (ph[i].p_flags == PF_R)
continue;
- if (ph[i].p_align != page_size)
+ if (ph[i].p_align < page_size)
continue;
if (mprotect (data + ph[i].p_vaddr,