Skip to content

Commit 9dabf60

Browse files
committed
parisc: add flexible mmap memory layout support
Add support for the flexible mmap memory layout (as described in http://lwn.net/Articles/91829). This is especially very interesting on parisc since we currently only support 32bit userspace (even with a 64bit Linux kernel). Signed-off-by: Helge Deller <[email protected]>
1 parent f5a408d commit 9dabf60

File tree

6 files changed

+233
-43
lines changed

6 files changed

+233
-43
lines changed

arch/parisc/include/asm/elf.h

+4
Original file line numberDiff line numberDiff line change
@@ -348,4 +348,8 @@ struct pt_regs; /* forward declaration... */
348348

349349
#define ELF_HWCAP 0
350350

351+
struct mm_struct;
352+
extern unsigned long arch_randomize_brk(struct mm_struct *);
353+
#define arch_randomize_brk arch_randomize_brk
354+
351355
#endif

arch/parisc/include/asm/pgtable.h

+1
Original file line numberDiff line numberDiff line change
@@ -511,6 +511,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
511511
/* We provide our own get_unmapped_area to provide cache coherency */
512512

513513
#define HAVE_ARCH_UNMAPPED_AREA
514+
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
514515

515516
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
516517
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR

arch/parisc/include/asm/processor.h

+2
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,8 @@
3030
#endif
3131
#define current_text_addr() ({ void *pc; current_ia(pc); pc; })
3232

33+
#define HAVE_ARCH_PICK_MMAP_LAYOUT
34+
3335
#define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
3436
#define TASK_SIZE TASK_SIZE_OF(current)
3537
#define TASK_UNMAPPED_BASE (current->thread.map_base)

arch/parisc/include/asm/thread_info.h

+10
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,16 @@ struct thread_info {
7676
#define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
7777
_TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT)
7878

79+
#ifdef CONFIG_64BIT
80+
# ifdef CONFIG_COMPAT
81+
# define is_32bit_task() (test_thread_flag(TIF_32BIT))
82+
# else
83+
# define is_32bit_task() (0)
84+
# endif
85+
#else
86+
# define is_32bit_task() (1)
87+
#endif
88+
7989
#endif /* __KERNEL__ */
8090

8191
#endif /* _ASM_PARISC_THREAD_INFO_H */

arch/parisc/kernel/process.c

+20-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
1414
* Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
1515
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
16-
* Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
16+
* Copyright (C) 2001-2014 Helge Deller <deller@gmx.de>
1717
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
1818
*
1919
*
@@ -49,6 +49,7 @@
4949
#include <linux/kallsyms.h>
5050
#include <linux/uaccess.h>
5151
#include <linux/rcupdate.h>
52+
#include <linux/random.h>
5253

5354
#include <asm/io.h>
5455
#include <asm/asm-offsets.h>
@@ -286,3 +287,21 @@ void *dereference_function_descriptor(void *ptr)
286287
return ptr;
287288
}
288289
#endif
290+
291+
static inline unsigned long brk_rnd(void)
292+
{
293+
/* 8MB for 32bit, 1GB for 64bit */
294+
if (is_32bit_task())
295+
return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
296+
else
297+
return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
298+
}
299+
300+
unsigned long arch_randomize_brk(struct mm_struct *mm)
301+
{
302+
unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
303+
304+
if (ret < mm->brk)
305+
return mm->brk;
306+
return ret;
307+
}

arch/parisc/kernel/sys_parisc.c

+196-42
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
66
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
77
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
8+
* Copyright (C) 1999-2014 Helge Deller <[email protected]>
89
*
910
*
1011
* This program is free software; you can redistribute it and/or modify
@@ -23,6 +24,7 @@
2324
*/
2425

2526
#include <asm/uaccess.h>
27+
#include <asm/elf.h>
2628
#include <linux/file.h>
2729
#include <linux/fs.h>
2830
#include <linux/linkage.h>
@@ -32,78 +34,230 @@
3234
#include <linux/syscalls.h>
3335
#include <linux/utsname.h>
3436
#include <linux/personality.h>
37+
#include <linux/random.h>
3538

36-
static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
39+
/* we construct an artificial offset for the mapping based on the physical
40+
* address of the kernel mapping variable */
41+
#define GET_LAST_MMAP(filp) \
42+
(filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
43+
#define SET_LAST_MMAP(filp, val) \
44+
{ /* nothing */ }
45+
46+
static int get_offset(unsigned int last_mmap)
3747
{
38-
struct vm_unmapped_area_info info;
48+
return (last_mmap & (SHMLBA-1)) >> PAGE_SHIFT;
49+
}
3950

40-
info.flags = 0;
41-
info.length = len;
42-
info.low_limit = PAGE_ALIGN(addr);
43-
info.high_limit = TASK_SIZE;
44-
info.align_mask = 0;
45-
info.align_offset = 0;
46-
return vm_unmapped_area(&info);
51+
static unsigned long shared_align_offset(unsigned int last_mmap,
52+
unsigned long pgoff)
53+
{
54+
return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
4755
}
4856

49-
/*
50-
* We need to know the offset to use. Old scheme was to look for
51-
* existing mapping and use the same offset. New scheme is to use the
52-
* address of the kernel data structure as the seed for the offset.
53-
* We'll see how that works...
54-
*
55-
* The mapping is cacheline aligned, so there's no information in the bottom
56-
* few bits of the address. We're looking for 10 bits (4MB / 4k), so let's
57-
* drop the bottom 8 bits and use bits 8-17.
58-
*/
59-
static int get_offset(struct address_space *mapping)
57+
static inline unsigned long COLOR_ALIGN(unsigned long addr,
58+
unsigned int last_mmap, unsigned long pgoff)
6059
{
61-
return (unsigned long) mapping >> 8;
60+
unsigned long base = (addr+SHMLBA-1) & ~(SHMLBA-1);
61+
unsigned long off = (SHMLBA-1) &
62+
(shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
63+
64+
return base + off;
6265
}
6366

64-
static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
67+
/*
68+
* Top of mmap area (just below the process stack).
69+
*/
70+
71+
static unsigned long mmap_upper_limit(void)
6572
{
66-
struct address_space *mapping = filp ? filp->f_mapping : NULL;
73+
unsigned long stack_base;
6774

68-
return (get_offset(mapping) + pgoff) << PAGE_SHIFT;
75+
/* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */
76+
stack_base = rlimit_max(RLIMIT_STACK);
77+
if (stack_base > (1 << 30))
78+
stack_base = 1 << 30;
79+
80+
return PAGE_ALIGN(STACK_TOP - stack_base);
6981
}
7082

71-
static unsigned long get_shared_area(struct file *filp, unsigned long addr,
72-
unsigned long len, unsigned long pgoff)
83+
84+
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
85+
unsigned long len, unsigned long pgoff, unsigned long flags)
7386
{
87+
struct mm_struct *mm = current->mm;
88+
struct vm_area_struct *vma;
89+
unsigned long task_size = TASK_SIZE;
90+
int do_color_align, last_mmap;
7491
struct vm_unmapped_area_info info;
7592

93+
if (len > task_size)
94+
return -ENOMEM;
95+
96+
do_color_align = 0;
97+
if (filp || (flags & MAP_SHARED))
98+
do_color_align = 1;
99+
last_mmap = GET_LAST_MMAP(filp);
100+
101+
if (flags & MAP_FIXED) {
102+
if ((flags & MAP_SHARED) && last_mmap &&
103+
(addr - shared_align_offset(last_mmap, pgoff))
104+
& (SHMLBA - 1))
105+
return -EINVAL;
106+
goto found_addr;
107+
}
108+
109+
if (addr) {
110+
if (do_color_align && last_mmap)
111+
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
112+
else
113+
addr = PAGE_ALIGN(addr);
114+
115+
vma = find_vma(mm, addr);
116+
if (task_size - len >= addr &&
117+
(!vma || addr + len <= vma->vm_start))
118+
goto found_addr;
119+
}
120+
76121
info.flags = 0;
77122
info.length = len;
78-
info.low_limit = PAGE_ALIGN(addr);
79-
info.high_limit = TASK_SIZE;
80-
info.align_mask = PAGE_MASK & (SHMLBA - 1);
81-
info.align_offset = shared_align_offset(filp, pgoff);
82-
return vm_unmapped_area(&info);
123+
info.low_limit = mm->mmap_legacy_base;
124+
info.high_limit = mmap_upper_limit();
125+
info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0;
126+
info.align_offset = shared_align_offset(last_mmap, pgoff);
127+
addr = vm_unmapped_area(&info);
128+
129+
found_addr:
130+
if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
131+
SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
132+
133+
return addr;
83134
}
84135

85-
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
86-
unsigned long len, unsigned long pgoff, unsigned long flags)
136+
unsigned long
137+
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
138+
const unsigned long len, const unsigned long pgoff,
139+
const unsigned long flags)
87140
{
141+
struct vm_area_struct *vma;
142+
struct mm_struct *mm = current->mm;
143+
unsigned long addr = addr0;
144+
int do_color_align, last_mmap;
145+
struct vm_unmapped_area_info info;
146+
147+
#ifdef CONFIG_64BIT
148+
/* This should only ever run for 32-bit processes. */
149+
BUG_ON(!test_thread_flag(TIF_32BIT));
150+
#endif
151+
152+
/* requested length too big for entire address space */
88153
if (len > TASK_SIZE)
89154
return -ENOMEM;
155+
156+
do_color_align = 0;
157+
if (filp || (flags & MAP_SHARED))
158+
do_color_align = 1;
159+
last_mmap = GET_LAST_MMAP(filp);
160+
90161
if (flags & MAP_FIXED) {
91-
if ((flags & MAP_SHARED) &&
92-
(addr - shared_align_offset(filp, pgoff)) & (SHMLBA - 1))
162+
if ((flags & MAP_SHARED) && last_mmap &&
163+
(addr - shared_align_offset(last_mmap, pgoff))
164+
& (SHMLBA - 1))
93165
return -EINVAL;
94-
return addr;
166+
goto found_addr;
95167
}
96-
if (!addr)
97-
addr = TASK_UNMAPPED_BASE;
98168

99-
if (filp || (flags & MAP_SHARED))
100-
addr = get_shared_area(filp, addr, len, pgoff);
101-
else
102-
addr = get_unshared_area(addr, len);
169+
/* requesting a specific address */
170+
if (addr) {
171+
if (do_color_align && last_mmap)
172+
addr = COLOR_ALIGN(addr, last_mmap, pgoff);
173+
else
174+
addr = PAGE_ALIGN(addr);
175+
vma = find_vma(mm, addr);
176+
if (TASK_SIZE - len >= addr &&
177+
(!vma || addr + len <= vma->vm_start))
178+
goto found_addr;
179+
}
180+
181+
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
182+
info.length = len;
183+
info.low_limit = PAGE_SIZE;
184+
info.high_limit = mm->mmap_base;
185+
info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0;
186+
info.align_offset = shared_align_offset(last_mmap, pgoff);
187+
addr = vm_unmapped_area(&info);
188+
if (!(addr & ~PAGE_MASK))
189+
goto found_addr;
190+
VM_BUG_ON(addr != -ENOMEM);
191+
192+
/*
193+
* A failed mmap() very likely causes application failure,
194+
* so fall back to the bottom-up function here. This scenario
195+
* can happen with large stack limits and large mmap()
196+
* allocations.
197+
*/
198+
return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
199+
200+
found_addr:
201+
if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
202+
SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
103203

104204
return addr;
105205
}
106206

207+
static int mmap_is_legacy(void)
208+
{
209+
if (current->personality & ADDR_COMPAT_LAYOUT)
210+
return 1;
211+
212+
/* parisc stack always grows up - so a unlimited stack should
213+
* not be an indicator to use the legacy memory layout.
214+
* if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
215+
* return 1;
216+
*/
217+
218+
return sysctl_legacy_va_layout;
219+
}
220+
221+
static unsigned long mmap_rnd(void)
222+
{
223+
unsigned long rnd = 0;
224+
225+
/*
226+
* 8 bits of randomness in 32bit mmaps, 20 address space bits
227+
* 28 bits of randomness in 64bit mmaps, 40 address space bits
228+
*/
229+
if (current->flags & PF_RANDOMIZE) {
230+
if (is_32bit_task())
231+
rnd = get_random_int() % (1<<8);
232+
else
233+
rnd = get_random_int() % (1<<28);
234+
}
235+
return rnd << PAGE_SHIFT;
236+
}
237+
238+
static unsigned long mmap_legacy_base(void)
239+
{
240+
return TASK_UNMAPPED_BASE + mmap_rnd();
241+
}
242+
243+
/*
244+
* This function, called very early during the creation of a new
245+
* process VM image, sets up which VM layout function to use:
246+
*/
247+
void arch_pick_mmap_layout(struct mm_struct *mm)
248+
{
249+
mm->mmap_legacy_base = mmap_legacy_base();
250+
mm->mmap_base = mmap_upper_limit();
251+
252+
if (mmap_is_legacy()) {
253+
mm->mmap_base = mm->mmap_legacy_base;
254+
mm->get_unmapped_area = arch_get_unmapped_area;
255+
} else {
256+
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
257+
}
258+
}
259+
260+
107261
asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
108262
unsigned long prot, unsigned long flags, unsigned long fd,
109263
unsigned long pgoff)

0 commit comments

Comments
 (0)