|
5 | 5 | * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
|
6 | 6 | * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
|
7 | 7 | * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
|
| 8 | + * Copyright (C) 1999-2014 Helge Deller <[email protected]> |
8 | 9 | *
|
9 | 10 | *
|
10 | 11 | * This program is free software; you can redistribute it and/or modify
|
|
23 | 24 | */
|
24 | 25 |
|
25 | 26 | #include <asm/uaccess.h>
|
| 27 | +#include <asm/elf.h> |
26 | 28 | #include <linux/file.h>
|
27 | 29 | #include <linux/fs.h>
|
28 | 30 | #include <linux/linkage.h>
|
|
32 | 34 | #include <linux/syscalls.h>
|
33 | 35 | #include <linux/utsname.h>
|
34 | 36 | #include <linux/personality.h>
|
| 37 | +#include <linux/random.h> |
35 | 38 |
|
36 |
| -static unsigned long get_unshared_area(unsigned long addr, unsigned long len) |
| 39 | +/* we construct an artificial offset for the mapping based on the physical |
| 40 | + * address of the kernel mapping variable */ |
| 41 | +#define GET_LAST_MMAP(filp) \ |
| 42 | + (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL) |
| 43 | +#define SET_LAST_MMAP(filp, val) \ |
| 44 | + { /* nothing */ } |
| 45 | + |
| 46 | +static int get_offset(unsigned int last_mmap) |
37 | 47 | {
|
38 |
| - struct vm_unmapped_area_info info; |
| 48 | + return (last_mmap & (SHMLBA-1)) >> PAGE_SHIFT; |
| 49 | +} |
39 | 50 |
|
40 |
| - info.flags = 0; |
41 |
| - info.length = len; |
42 |
| - info.low_limit = PAGE_ALIGN(addr); |
43 |
| - info.high_limit = TASK_SIZE; |
44 |
| - info.align_mask = 0; |
45 |
| - info.align_offset = 0; |
46 |
| - return vm_unmapped_area(&info); |
| 51 | +static unsigned long shared_align_offset(unsigned int last_mmap, |
| 52 | + unsigned long pgoff) |
| 53 | +{ |
| 54 | + return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT; |
47 | 55 | }
|
48 | 56 |
|
49 |
| -/* |
50 |
| - * We need to know the offset to use. Old scheme was to look for |
51 |
| - * existing mapping and use the same offset. New scheme is to use the |
52 |
| - * address of the kernel data structure as the seed for the offset. |
53 |
| - * We'll see how that works... |
54 |
| - * |
55 |
| - * The mapping is cacheline aligned, so there's no information in the bottom |
56 |
| - * few bits of the address. We're looking for 10 bits (4MB / 4k), so let's |
57 |
| - * drop the bottom 8 bits and use bits 8-17. |
58 |
| - */ |
59 |
| -static int get_offset(struct address_space *mapping) |
| 57 | +static inline unsigned long COLOR_ALIGN(unsigned long addr, |
| 58 | + unsigned int last_mmap, unsigned long pgoff) |
60 | 59 | {
|
61 |
| - return (unsigned long) mapping >> 8; |
| 60 | + unsigned long base = (addr+SHMLBA-1) & ~(SHMLBA-1); |
| 61 | + unsigned long off = (SHMLBA-1) & |
| 62 | + (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT); |
| 63 | + |
| 64 | + return base + off; |
62 | 65 | }
|
63 | 66 |
|
64 |
| -static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff) |
| 67 | +/* |
| 68 | + * Top of mmap area (just below the process stack). |
| 69 | + */ |
| 70 | + |
| 71 | +static unsigned long mmap_upper_limit(void) |
65 | 72 | {
|
66 |
| - struct address_space *mapping = filp ? filp->f_mapping : NULL; |
| 73 | + unsigned long stack_base; |
67 | 74 |
|
68 |
| - return (get_offset(mapping) + pgoff) << PAGE_SHIFT; |
| 75 | + /* Limit stack size to 1GB - see setup_arg_pages() in fs/exec.c */ |
| 76 | + stack_base = rlimit_max(RLIMIT_STACK); |
| 77 | + if (stack_base > (1 << 30)) |
| 78 | + stack_base = 1 << 30; |
| 79 | + |
| 80 | + return PAGE_ALIGN(STACK_TOP - stack_base); |
69 | 81 | }
|
70 | 82 |
|
71 |
| -static unsigned long get_shared_area(struct file *filp, unsigned long addr, |
72 |
| - unsigned long len, unsigned long pgoff) |
| 83 | + |
| 84 | +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
| 85 | + unsigned long len, unsigned long pgoff, unsigned long flags) |
73 | 86 | {
|
| 87 | + struct mm_struct *mm = current->mm; |
| 88 | + struct vm_area_struct *vma; |
| 89 | + unsigned long task_size = TASK_SIZE; |
| 90 | + int do_color_align, last_mmap; |
74 | 91 | struct vm_unmapped_area_info info;
|
75 | 92 |
|
| 93 | + if (len > task_size) |
| 94 | + return -ENOMEM; |
| 95 | + |
| 96 | + do_color_align = 0; |
| 97 | + if (filp || (flags & MAP_SHARED)) |
| 98 | + do_color_align = 1; |
| 99 | + last_mmap = GET_LAST_MMAP(filp); |
| 100 | + |
| 101 | + if (flags & MAP_FIXED) { |
| 102 | + if ((flags & MAP_SHARED) && last_mmap && |
| 103 | + (addr - shared_align_offset(last_mmap, pgoff)) |
| 104 | + & (SHMLBA - 1)) |
| 105 | + return -EINVAL; |
| 106 | + goto found_addr; |
| 107 | + } |
| 108 | + |
| 109 | + if (addr) { |
| 110 | + if (do_color_align && last_mmap) |
| 111 | + addr = COLOR_ALIGN(addr, last_mmap, pgoff); |
| 112 | + else |
| 113 | + addr = PAGE_ALIGN(addr); |
| 114 | + |
| 115 | + vma = find_vma(mm, addr); |
| 116 | + if (task_size - len >= addr && |
| 117 | + (!vma || addr + len <= vma->vm_start)) |
| 118 | + goto found_addr; |
| 119 | + } |
| 120 | + |
76 | 121 | info.flags = 0;
|
77 | 122 | info.length = len;
|
78 |
| - info.low_limit = PAGE_ALIGN(addr); |
79 |
| - info.high_limit = TASK_SIZE; |
80 |
| - info.align_mask = PAGE_MASK & (SHMLBA - 1); |
81 |
| - info.align_offset = shared_align_offset(filp, pgoff); |
82 |
| - return vm_unmapped_area(&info); |
| 123 | + info.low_limit = mm->mmap_legacy_base; |
| 124 | + info.high_limit = mmap_upper_limit(); |
| 125 | + info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0; |
| 126 | + info.align_offset = shared_align_offset(last_mmap, pgoff); |
| 127 | + addr = vm_unmapped_area(&info); |
| 128 | + |
| 129 | +found_addr: |
| 130 | + if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) |
| 131 | + SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); |
| 132 | + |
| 133 | + return addr; |
83 | 134 | }
|
84 | 135 |
|
85 |
| -unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
86 |
| - unsigned long len, unsigned long pgoff, unsigned long flags) |
| 136 | +unsigned long |
| 137 | +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, |
| 138 | + const unsigned long len, const unsigned long pgoff, |
| 139 | + const unsigned long flags) |
87 | 140 | {
|
| 141 | + struct vm_area_struct *vma; |
| 142 | + struct mm_struct *mm = current->mm; |
| 143 | + unsigned long addr = addr0; |
| 144 | + int do_color_align, last_mmap; |
| 145 | + struct vm_unmapped_area_info info; |
| 146 | + |
| 147 | +#ifdef CONFIG_64BIT |
| 148 | + /* This should only ever run for 32-bit processes. */ |
| 149 | + BUG_ON(!test_thread_flag(TIF_32BIT)); |
| 150 | +#endif |
| 151 | + |
| 152 | + /* requested length too big for entire address space */ |
88 | 153 | if (len > TASK_SIZE)
|
89 | 154 | return -ENOMEM;
|
| 155 | + |
| 156 | + do_color_align = 0; |
| 157 | + if (filp || (flags & MAP_SHARED)) |
| 158 | + do_color_align = 1; |
| 159 | + last_mmap = GET_LAST_MMAP(filp); |
| 160 | + |
90 | 161 | if (flags & MAP_FIXED) {
|
91 |
| - if ((flags & MAP_SHARED) && |
92 |
| - (addr - shared_align_offset(filp, pgoff)) & (SHMLBA - 1)) |
| 162 | + if ((flags & MAP_SHARED) && last_mmap && |
| 163 | + (addr - shared_align_offset(last_mmap, pgoff)) |
| 164 | + & (SHMLBA - 1)) |
93 | 165 | return -EINVAL;
|
94 |
| - return addr; |
| 166 | + goto found_addr; |
95 | 167 | }
|
96 |
| - if (!addr) |
97 |
| - addr = TASK_UNMAPPED_BASE; |
98 | 168 |
|
99 |
| - if (filp || (flags & MAP_SHARED)) |
100 |
| - addr = get_shared_area(filp, addr, len, pgoff); |
101 |
| - else |
102 |
| - addr = get_unshared_area(addr, len); |
| 169 | + /* requesting a specific address */ |
| 170 | + if (addr) { |
| 171 | + if (do_color_align && last_mmap) |
| 172 | + addr = COLOR_ALIGN(addr, last_mmap, pgoff); |
| 173 | + else |
| 174 | + addr = PAGE_ALIGN(addr); |
| 175 | + vma = find_vma(mm, addr); |
| 176 | + if (TASK_SIZE - len >= addr && |
| 177 | + (!vma || addr + len <= vma->vm_start)) |
| 178 | + goto found_addr; |
| 179 | + } |
| 180 | + |
| 181 | + info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
| 182 | + info.length = len; |
| 183 | + info.low_limit = PAGE_SIZE; |
| 184 | + info.high_limit = mm->mmap_base; |
| 185 | + info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0; |
| 186 | + info.align_offset = shared_align_offset(last_mmap, pgoff); |
| 187 | + addr = vm_unmapped_area(&info); |
| 188 | + if (!(addr & ~PAGE_MASK)) |
| 189 | + goto found_addr; |
| 190 | + VM_BUG_ON(addr != -ENOMEM); |
| 191 | + |
| 192 | + /* |
| 193 | + * A failed mmap() very likely causes application failure, |
| 194 | + * so fall back to the bottom-up function here. This scenario |
| 195 | + * can happen with large stack limits and large mmap() |
| 196 | + * allocations. |
| 197 | + */ |
| 198 | + return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); |
| 199 | + |
| 200 | +found_addr: |
| 201 | + if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK)) |
| 202 | + SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT)); |
103 | 203 |
|
104 | 204 | return addr;
|
105 | 205 | }
|
106 | 206 |
|
| 207 | +static int mmap_is_legacy(void) |
| 208 | +{ |
| 209 | + if (current->personality & ADDR_COMPAT_LAYOUT) |
| 210 | + return 1; |
| 211 | + |
| 212 | + /* parisc stack always grows up - so a unlimited stack should |
| 213 | + * not be an indicator to use the legacy memory layout. |
| 214 | + * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) |
| 215 | + * return 1; |
| 216 | + */ |
| 217 | + |
| 218 | + return sysctl_legacy_va_layout; |
| 219 | +} |
| 220 | + |
| 221 | +static unsigned long mmap_rnd(void) |
| 222 | +{ |
| 223 | + unsigned long rnd = 0; |
| 224 | + |
| 225 | + /* |
| 226 | + * 8 bits of randomness in 32bit mmaps, 20 address space bits |
| 227 | + * 28 bits of randomness in 64bit mmaps, 40 address space bits |
| 228 | + */ |
| 229 | + if (current->flags & PF_RANDOMIZE) { |
| 230 | + if (is_32bit_task()) |
| 231 | + rnd = get_random_int() % (1<<8); |
| 232 | + else |
| 233 | + rnd = get_random_int() % (1<<28); |
| 234 | + } |
| 235 | + return rnd << PAGE_SHIFT; |
| 236 | +} |
| 237 | + |
| 238 | +static unsigned long mmap_legacy_base(void) |
| 239 | +{ |
| 240 | + return TASK_UNMAPPED_BASE + mmap_rnd(); |
| 241 | +} |
| 242 | + |
| 243 | +/* |
| 244 | + * This function, called very early during the creation of a new |
| 245 | + * process VM image, sets up which VM layout function to use: |
| 246 | + */ |
| 247 | +void arch_pick_mmap_layout(struct mm_struct *mm) |
| 248 | +{ |
| 249 | + mm->mmap_legacy_base = mmap_legacy_base(); |
| 250 | + mm->mmap_base = mmap_upper_limit(); |
| 251 | + |
| 252 | + if (mmap_is_legacy()) { |
| 253 | + mm->mmap_base = mm->mmap_legacy_base; |
| 254 | + mm->get_unmapped_area = arch_get_unmapped_area; |
| 255 | + } else { |
| 256 | + mm->get_unmapped_area = arch_get_unmapped_area_topdown; |
| 257 | + } |
| 258 | +} |
| 259 | + |
| 260 | + |
107 | 261 | asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
|
108 | 262 | unsigned long prot, unsigned long flags, unsigned long fd,
|
109 | 263 | unsigned long pgoff)
|
|
0 commit comments