Skip to content

Commit

Permalink
x86_64: move vdso to mmap region from stack region
Browse files Browse the repository at this point in the history
This removes the only executable code from the stack region and gives
the vdso the same randomized base as other mmap mappings including the
linker and other shared objects. It results in a sane amount of entropy
being provided and there's little to no advantage in separating this
from the existing executable code there.

It's sensible for userspace to reserve the initial mmap base as a region
for executable code with a random gap for other mmap allocations, along
with providing randomization within that region. However, there isn't
much the kernel can do to help due to how dynamic linkers load the
shared objects.

This was extracted from the PaX RANDMMAP feature.

Signed-off-by: Daniel Micay <danielmicay@gmail.com>
  • Loading branch information
thestinger authored and anthraxx committed Mar 16, 2021
1 parent 350376f commit 06aba60
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 55 deletions.
48 changes: 1 addition & 47 deletions arch/x86/entry/vdso/vma.c
Original file line number Diff line number Diff line change
Expand Up @@ -298,55 +298,9 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
}

#ifdef CONFIG_X86_64
/*
* Put the vdso above the (randomized) stack with another randomized
* offset. This way there is no hole in the middle of address space.
* To save memory make sure it is still in the same PTE as the stack
* top. This doesn't give that many random bits.
*
* Note that this algorithm is imperfect: the distribution of the vdso
* start address within a PMD is biased toward the end.
*
* Only used for the 64-bit and x32 vdsos.
*/
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
unsigned long addr, end;
unsigned offset;

/*
* Round up the start address. It can start out unaligned as a result
* of stack start randomization.
*/
start = PAGE_ALIGN(start);

/* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX;
end -= len;

if (end > start) {
offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
addr = start + (offset << PAGE_SHIFT);
} else {
addr = start;
}

/*
* Forcibly align the final address in case we have a hardware
* issue that requires alignment for performance reasons.
*/
addr = align_vdso_addr(addr);

return addr;
}

static int map_vdso_randomized(const struct vdso_image *image)
{
unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);

return map_vdso(image, addr);
return map_vdso(image, 0);
}
#endif

Expand Down
1 change: 0 additions & 1 deletion arch/x86/include/asm/elf.h
Original file line number Diff line number Diff line change
Expand Up @@ -403,5 +403,4 @@ struct va_alignment {
} ____cacheline_aligned;

extern struct va_alignment va_align;
extern unsigned long align_vdso_addr(unsigned long);
#endif /* _ASM_X86_ELF_H */
7 changes: 0 additions & 7 deletions arch/x86/kernel/sys_x86_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,13 +52,6 @@ static unsigned long get_align_bits(void)
return va_align.bits & get_align_mask();
}

unsigned long align_vdso_addr(unsigned long addr)
{
unsigned long align_mask = get_align_mask();
addr = (addr + align_mask) & ~align_mask;
return addr | get_align_bits();
}

static int __init control_va_addr_alignment(char *str)
{
/* guard against enabling this on other CPU families */
Expand Down

0 comments on commit 06aba60

Please sign in to comment.