Print this page
rev 7151 : 8043224: -Xcheck:jni improvements to exception checking and excessive local refs
Summary: Warning when not checking exceptions from function that require so, also when local refs expand beyond capacity.
Reviewed-by: dsimms
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/hotspot/src/share/vm/memory/guardedMemory.hpp
+++ new/hotspot/src/share/vm/memory/guardedMemory.hpp
1 1 /*
2 - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
2 + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #ifndef SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
26 26 #define SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
27 27
28 28 #include "memory/allocation.hpp"
29 29 #include "utilities/globalDefinitions.hpp"
30 30
31 31 /**
32 32 * Guarded memory for detecting buffer overrun.
33 33 *
34 34 * Allows allocations to be wrapped with padded bytes of a known byte pattern,
35 35 * that is a "guard". Guard patterns may be verified to detect buffer overruns.
36 36 *
37 37 * Primarily used by "debug malloc" and "checked JNI".
38 38 *
39 39 * Memory layout:
40 40 *
41 41 * |Offset | Content | Description |
42 42 * |------------------------------------------------------------
43 43 * |base_addr | 0xABABABABABABABAB | Head guard |
44 44 * |+16 | <size_t:user_size> | User data size |
45 45 * |+sizeof(uintptr_t) | <tag> | Tag word |
46 46 * |+sizeof(void*) | 0xF1 <user_data> ( | User data |
47 47 * |+user_size | 0xABABABABABABABAB | Tail guard |
48 48 * -------------------------------------------------------------
49 49 *
50 50 * Where:
51 51 * - guard padding uses "badResourceValue" (0xAB)
52 52 * - tag word is general purpose
53 53 * - user data
54 54 * -- initially padded with "uninitBlockPad" (0xF1),
55 55 * -- to "freeBlockPad" (0xBA), when freed
56 56 *
57 57 * Usage:
58 58 *
59 59 * * Allocations: one may wrap allocations with guard memory:
60 60 * <code>
61 61 * Thing* alloc_thing() {
62 62 * void* mem = user_alloc_fn(GuardedMemory::get_total_size(sizeof(thing)));
63 63 * GuardedMemory guarded(mem, sizeof(thing));
64 64 * return (Thing*) guarded.get_user_ptr();
65 65 * }
66 66 * </code>
67 67 * * Verify: memory guards are still in tact
68 68 * <code>
69 69 * bool verify_thing(Thing* thing) {
70 70 * GuardedMemory guarded((void*)thing);
71 71 * return guarded.verify_guards();
72 72 * }
73 73 * </code>
74 74 * * Free: one may mark bytes as freed (further debugging support)
75 75 * <code>
76 76 * void free_thing(Thing* thing) {
77 77 * GuardedMemory guarded((void*)thing);
78 78 * assert(guarded.verify_guards(), "Corrupt thing");
79 79 * user_free_fn(guards.release_for_freeing();
80 80 * }
81 81 * </code>
82 82 */
83 83 class GuardedMemory : StackObj { // Wrapper on stack
84 84
85 85 // Private inner classes for memory layout...
86 86
87 87 protected:
88 88
89 89 /**
90 90 * Guard class for header and trailer known pattern to test for overwrites.
91 91 */
92 92 class Guard { // Class for raw memory (no vtbl allowed)
93 93 friend class GuardedMemory;
94 94 protected:
95 95 enum {
96 96 GUARD_SIZE = 16
97 97 };
98 98
99 99 u_char _guard[GUARD_SIZE];
100 100
101 101 public:
102 102
103 103 void build() {
104 104 u_char* c = _guard; // Possibly unaligned if tail guard
105 105 u_char* end = c + GUARD_SIZE;
106 106 while (c < end) {
107 107 *c = badResourceValue;
108 108 c++;
109 109 }
110 110 }
111 111
112 112 bool verify() const {
113 113 u_char* c = (u_char*) _guard;
114 114 u_char* end = c + GUARD_SIZE;
115 115 while (c < end) {
116 116 if (*c != badResourceValue) {
117 117 return false;
118 118 }
119 119 c++;
120 120 }
121 121 return true;
122 122 }
123 123
124 124 }; // GuardedMemory::Guard
125 125
126 126 /**
127 127 * Header guard and size
128 128 */
129 129 class GuardHeader : Guard {
130 130 friend class GuardedMemory;
131 131 protected:
132 132 // Take care in modifying fields here, will effect alignment
133 133 // e.g. x86 ABI 16 byte stack alignment
134 134 union {
135 135 uintptr_t __unused_full_word1;
136 136 size_t _user_size;
137 137 };
138 138 void* _tag;
139 139 public:
140 140 void set_user_size(const size_t usz) { _user_size = usz; }
141 141 size_t get_user_size() const { return _user_size; }
142 142
143 143 void set_tag(const void* tag) { _tag = (void*) tag; }
144 144 void* get_tag() const { return _tag; }
145 145
146 146 }; // GuardedMemory::GuardHeader
147 147
148 148 // Guarded Memory...
149 149
150 150 protected:
151 151 u_char* _base_addr;
152 152
153 153 public:
154 154
155 155 /**
156 156 * Create new guarded memory.
157 157 *
158 158 * Wraps, starting at the given "base_ptr" with guards. Use "get_user_ptr()"
159 159 * to return a pointer suitable for user data.
160 160 *
161 161 * @param base_ptr allocation wishing to be wrapped, must be at least "GuardedMemory::get_total_size()" bytes.
162 162 * @param user_size the size of the user data to be wrapped.
163 163 * @param tag optional general purpose tag.
164 164 */
165 165 GuardedMemory(void* base_ptr, const size_t user_size, const void* tag = NULL) {
166 166 wrap_with_guards(base_ptr, user_size, tag);
167 167 }
168 168
169 169 /**
170 170 * Wrap existing guarded memory.
171 171 *
172 172 * To use this constructor, one must have created guarded memory with
173 173 * "GuardedMemory(void*, size_t, void*)" (or indirectly via helper, e.g. "wrap_copy()").
174 174 *
175 175 * @param user_p existing wrapped memory.
176 176 */
177 177 GuardedMemory(void* userp) {
178 178 u_char* user_ptr = (u_char*) userp;
179 179 assert((uintptr_t)user_ptr > (sizeof(GuardHeader) + 0x1000), "Invalid pointer");
180 180 _base_addr = (user_ptr - sizeof(GuardHeader));
181 181 }
182 182
183 183 /**
184 184 * Create new guarded memory.
185 185 *
186 186 * Wraps, starting at the given "base_ptr" with guards. Allows reuse of stack allocated helper.
187 187 *
188 188 * @param base_ptr allocation wishing to be wrapped, must be at least "GuardedMemory::get_total_size()" bytes.
189 189 * @param user_size the size of the user data to be wrapped.
190 190 * @param tag optional general purpose tag.
191 191 *
192 192 * @return user data pointer (inner pointer to supplied "base_ptr").
193 193 */
194 194 void* wrap_with_guards(void* base_ptr, size_t user_size, const void* tag = NULL) {
195 195 assert(base_ptr != NULL, "Attempt to wrap NULL with memory guard");
196 196 _base_addr = (u_char*)base_ptr;
197 197 get_head_guard()->build();
198 198 get_head_guard()->set_user_size(user_size);
199 199 get_tail_guard()->build();
200 200 set_tag(tag);
201 201 set_user_bytes(uninitBlockPad);
202 202 assert(verify_guards(), "Expected valid memory guards");
203 203 return get_user_ptr();
204 204 }
205 205
206 206 /**
207 207 * Verify head and tail guards.
208 208 *
209 209 * @return true if guards are intact, false would indicate a buffer overrun.
210 210 */
211 211 bool verify_guards() const {
212 212 if (_base_addr != NULL) {
213 213 return (get_head_guard()->verify() && get_tail_guard()->verify());
214 214 }
215 215 return false;
216 216 }
217 217
218 218 /**
219 219 * Set the general purpose tag.
220 220 *
221 221 * @param tag general purpose tag.
222 222 */
223 223 void set_tag(const void* tag) { get_head_guard()->set_tag(tag); }
224 224
225 225 /**
226 226 * Return the general purpose tag.
227 227 *
↓ open down ↓ |
215 lines elided |
↑ open up ↑ |
228 228 * @return the general purpose tag, defaults to NULL.
229 229 */
230 230 void* get_tag() const { return get_head_guard()->get_tag(); }
231 231
232 232 /**
233 233 * Return the size of the user data.
234 234 *
235 235 * @return the size of the user data.
236 236 */
237 237 size_t get_user_size() const {
238 - assert(_base_addr, "Not wrapping any memory");
238 + assert(_base_addr != NULL, "Not wrapping any memory");
239 239 return get_head_guard()->get_user_size();
240 240 }
241 241
242 242 /**
243 243 * Return the user data pointer.
244 244 *
245 245 * @return the user data pointer.
246 246 */
247 247 u_char* get_user_ptr() const {
248 - assert(_base_addr, "Not wrapping any memory");
248 + assert(_base_addr != NULL, "Not wrapping any memory");
249 249 return _base_addr + sizeof(GuardHeader);
250 250 }
251 251
252 252 /**
253 253 * Release the wrapped pointer for resource freeing.
254 254 *
255 255 * Pads the user data with "freeBlockPad", and dis-associates the helper.
256 256 *
257 257 * @return the original base pointer used to wrap the data.
258 258 */
259 259 void* release_for_freeing() {
260 260 set_user_bytes(freeBlockPad);
261 261 return release();
262 262 }
263 263
264 264 /**
265 265 * Dis-associate the help from the original base address.
266 266 *
267 267 * @return the original base pointer used to wrap the data.
268 268 */
269 269 void* release() {
270 270 void* p = (void*) _base_addr;
271 271 _base_addr = NULL;
272 272 return p;
273 273 }
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
274 274
275 275 virtual void print_on(outputStream* st) const;
276 276
277 277 protected:
278 278 GuardHeader* get_head_guard() const { return (GuardHeader*) _base_addr; }
279 279 Guard* get_tail_guard() const { return (Guard*) (get_user_ptr() + get_user_size()); };
280 280 void set_user_bytes(u_char ch) {
281 281 memset(get_user_ptr(), ch, get_user_size());
282 282 }
283 283
284 -public:
284 + public:
285 285 /**
286 286 * Return the total size required for wrapping the given user size.
287 287 *
288 288 * @return the total size required for wrapping the given user size.
289 289 */
290 290 static size_t get_total_size(size_t user_size) {
291 291 size_t total_size = sizeof(GuardHeader) + user_size + sizeof(Guard);
292 292 assert(total_size > user_size, "Unexpected wrap-around");
293 293 return total_size;
294 294 }
295 295
296 296 // Helper functions...
297 297
298 298 /**
299 299 * Wrap a copy of size "len" of "ptr".
300 300 *
301 301 * @param ptr the memory to be copied
302 302 * @param len the length of the copy
303 303 * @param tag optional general purpose tag (see GuardedMemory::get_tag())
304 304 *
305 305 * @return guarded wrapped memory pointer to the user area, or NULL if OOM.
306 306 */
307 307 static void* wrap_copy(const void* p, const size_t len, const void* tag = NULL);
308 308
309 309 /**
310 310 * Free wrapped copy.
311 311 *
312 312 * Frees memory copied with "wrap_copy()".
313 313 *
314 314 * @param p memory returned by "wrap_copy()".
315 315 *
316 316 * @return true if guards were verified as intact. false indicates a buffer overrun.
317 317 */
318 318 static bool free_copy(void* p);
319 319
320 320 // Testing...
321 321 #ifndef PRODUCT
322 322 static void test_guarded_memory(void);
323 323 #endif
324 324 }; // GuardedMemory
325 325
326 326 #endif // SHARE_VM_MEMORY_GUARDED_MEMORY_HPP
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX