Print this page
rev 6911 : 8065305: Make it possible to extend the G1CollectorPolicy
Summary: Added a G1CollectorPolicyExt where it is possible to extend the class.
Reviewed-by: sjohanss, tschatzl
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/hotspot/src/share/vm/memory/universe.cpp
+++ new/hotspot/src/share/vm/memory/universe.cpp
1 1 /*
2 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "classfile/classLoader.hpp"
27 27 #include "classfile/classLoaderData.hpp"
28 28 #include "classfile/javaClasses.hpp"
29 29 #if INCLUDE_CDS
30 30 #include "classfile/sharedClassUtil.hpp"
31 31 #endif
32 32 #include "classfile/symbolTable.hpp"
33 33 #include "classfile/systemDictionary.hpp"
34 34 #include "classfile/vmSymbols.hpp"
35 35 #include "code/codeCache.hpp"
36 36 #include "code/dependencies.hpp"
37 37 #include "gc_interface/collectedHeap.inline.hpp"
38 38 #include "interpreter/interpreter.hpp"
39 39 #include "memory/cardTableModRefBS.hpp"
40 40 #include "memory/filemap.hpp"
41 41 #include "memory/gcLocker.inline.hpp"
42 42 #include "memory/genCollectedHeap.hpp"
43 43 #include "memory/genRemSet.hpp"
44 44 #include "memory/generation.hpp"
45 45 #include "memory/metadataFactory.hpp"
46 46 #include "memory/metaspaceShared.hpp"
47 47 #include "memory/oopFactory.hpp"
48 48 #include "memory/space.hpp"
49 49 #include "memory/universe.hpp"
50 50 #include "memory/universe.inline.hpp"
51 51 #include "oops/constantPool.hpp"
52 52 #include "oops/instanceClassLoaderKlass.hpp"
53 53 #include "oops/instanceKlass.hpp"
54 54 #include "oops/instanceMirrorKlass.hpp"
55 55 #include "oops/instanceRefKlass.hpp"
56 56 #include "oops/oop.inline.hpp"
57 57 #include "oops/typeArrayKlass.hpp"
58 58 #include "prims/jvmtiRedefineClassesTrace.hpp"
59 59 #include "runtime/arguments.hpp"
60 60 #include "runtime/deoptimization.hpp"
61 61 #include "runtime/fprofiler.hpp"
62 62 #include "runtime/handles.inline.hpp"
63 63 #include "runtime/init.hpp"
64 64 #include "runtime/java.hpp"
65 65 #include "runtime/javaCalls.hpp"
66 66 #include "runtime/sharedRuntime.hpp"
67 67 #include "runtime/synchronizer.hpp"
68 68 #include "runtime/thread.inline.hpp"
69 69 #include "runtime/timer.hpp"
70 70 #include "runtime/vm_operations.hpp"
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
71 71 #include "services/memoryService.hpp"
72 72 #include "utilities/copy.hpp"
73 73 #include "utilities/events.hpp"
74 74 #include "utilities/hashtable.inline.hpp"
75 75 #include "utilities/preserveException.hpp"
76 76 #include "utilities/macros.hpp"
77 77 #if INCLUDE_ALL_GCS
78 78 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
79 79 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
80 80 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
81 -#include "gc_implementation/g1/g1CollectorPolicy.hpp"
81 +#include "gc_implementation/g1/g1CollectorPolicy_ext.hpp"
82 82 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
83 83 #endif // INCLUDE_ALL_GCS
84 84
85 85 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
86 86
87 87 // Known objects
88 88 Klass* Universe::_boolArrayKlassObj = NULL;
89 89 Klass* Universe::_byteArrayKlassObj = NULL;
90 90 Klass* Universe::_charArrayKlassObj = NULL;
91 91 Klass* Universe::_intArrayKlassObj = NULL;
92 92 Klass* Universe::_shortArrayKlassObj = NULL;
93 93 Klass* Universe::_longArrayKlassObj = NULL;
94 94 Klass* Universe::_singleArrayKlassObj = NULL;
95 95 Klass* Universe::_doubleArrayKlassObj = NULL;
96 96 Klass* Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ };
97 97 Klass* Universe::_objectArrayKlassObj = NULL;
98 98 oop Universe::_int_mirror = NULL;
99 99 oop Universe::_float_mirror = NULL;
100 100 oop Universe::_double_mirror = NULL;
101 101 oop Universe::_byte_mirror = NULL;
102 102 oop Universe::_bool_mirror = NULL;
103 103 oop Universe::_char_mirror = NULL;
104 104 oop Universe::_long_mirror = NULL;
105 105 oop Universe::_short_mirror = NULL;
106 106 oop Universe::_void_mirror = NULL;
107 107 oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ };
108 108 oop Universe::_main_thread_group = NULL;
109 109 oop Universe::_system_thread_group = NULL;
110 110 objArrayOop Universe::_the_empty_class_klass_array = NULL;
111 111 Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
112 112 oop Universe::_the_null_string = NULL;
113 113 oop Universe::_the_min_jint_string = NULL;
114 114 LatestMethodCache* Universe::_finalizer_register_cache = NULL;
115 115 LatestMethodCache* Universe::_loader_addClass_cache = NULL;
116 116 LatestMethodCache* Universe::_pd_implies_cache = NULL;
117 117 oop Universe::_out_of_memory_error_java_heap = NULL;
118 118 oop Universe::_out_of_memory_error_metaspace = NULL;
119 119 oop Universe::_out_of_memory_error_class_metaspace = NULL;
120 120 oop Universe::_out_of_memory_error_array_size = NULL;
121 121 oop Universe::_out_of_memory_error_gc_overhead_limit = NULL;
122 122 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
123 123 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
124 124 bool Universe::_verify_in_progress = false;
125 125 oop Universe::_null_ptr_exception_instance = NULL;
126 126 oop Universe::_arithmetic_exception_instance = NULL;
127 127 oop Universe::_virtual_machine_error_instance = NULL;
128 128 oop Universe::_vm_exception = NULL;
129 129 oop Universe::_allocation_context_notification_obj = NULL;
130 130
131 131 Method* Universe::_throw_illegal_access_error = NULL;
132 132 Array<int>* Universe::_the_empty_int_array = NULL;
133 133 Array<u2>* Universe::_the_empty_short_array = NULL;
134 134 Array<Klass*>* Universe::_the_empty_klass_array = NULL;
135 135 Array<Method*>* Universe::_the_empty_method_array = NULL;
136 136
137 137 // These variables are guarded by FullGCALot_lock.
138 138 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
139 139 debug_only(int Universe::_fullgc_alot_dummy_next = 0;)
140 140
141 141 // Heap
142 142 int Universe::_verify_count = 0;
143 143
144 144 int Universe::_base_vtable_size = 0;
145 145 bool Universe::_bootstrapping = false;
146 146 bool Universe::_fully_initialized = false;
147 147
148 148 size_t Universe::_heap_capacity_at_last_gc;
149 149 size_t Universe::_heap_used_at_last_gc = 0;
150 150
151 151 CollectedHeap* Universe::_collectedHeap = NULL;
152 152
153 153 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
154 154 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
155 155 address Universe::_narrow_ptrs_base;
156 156
157 157 void Universe::basic_type_classes_do(void f(Klass*)) {
158 158 f(boolArrayKlassObj());
159 159 f(byteArrayKlassObj());
160 160 f(charArrayKlassObj());
161 161 f(intArrayKlassObj());
162 162 f(shortArrayKlassObj());
163 163 f(longArrayKlassObj());
164 164 f(singleArrayKlassObj());
165 165 f(doubleArrayKlassObj());
166 166 }
167 167
168 168 void Universe::oops_do(OopClosure* f, bool do_all) {
169 169
170 170 f->do_oop((oop*) &_int_mirror);
171 171 f->do_oop((oop*) &_float_mirror);
172 172 f->do_oop((oop*) &_double_mirror);
173 173 f->do_oop((oop*) &_byte_mirror);
174 174 f->do_oop((oop*) &_bool_mirror);
175 175 f->do_oop((oop*) &_char_mirror);
176 176 f->do_oop((oop*) &_long_mirror);
177 177 f->do_oop((oop*) &_short_mirror);
178 178 f->do_oop((oop*) &_void_mirror);
179 179
180 180 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
181 181 f->do_oop((oop*) &_mirrors[i]);
182 182 }
183 183 assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
184 184
185 185 f->do_oop((oop*)&_the_empty_class_klass_array);
186 186 f->do_oop((oop*)&_the_null_string);
187 187 f->do_oop((oop*)&_the_min_jint_string);
188 188 f->do_oop((oop*)&_out_of_memory_error_java_heap);
189 189 f->do_oop((oop*)&_out_of_memory_error_metaspace);
190 190 f->do_oop((oop*)&_out_of_memory_error_class_metaspace);
191 191 f->do_oop((oop*)&_out_of_memory_error_array_size);
192 192 f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
193 193 f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
194 194 f->do_oop((oop*)&_null_ptr_exception_instance);
195 195 f->do_oop((oop*)&_arithmetic_exception_instance);
196 196 f->do_oop((oop*)&_virtual_machine_error_instance);
197 197 f->do_oop((oop*)&_main_thread_group);
198 198 f->do_oop((oop*)&_system_thread_group);
199 199 f->do_oop((oop*)&_vm_exception);
200 200 f->do_oop((oop*)&_allocation_context_notification_obj);
201 201 debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
202 202 }
203 203
204 204 // Serialize metadata in and out of CDS archive, not oops.
205 205 void Universe::serialize(SerializeClosure* f, bool do_all) {
206 206
207 207 f->do_ptr((void**)&_boolArrayKlassObj);
208 208 f->do_ptr((void**)&_byteArrayKlassObj);
209 209 f->do_ptr((void**)&_charArrayKlassObj);
210 210 f->do_ptr((void**)&_intArrayKlassObj);
211 211 f->do_ptr((void**)&_shortArrayKlassObj);
212 212 f->do_ptr((void**)&_longArrayKlassObj);
213 213 f->do_ptr((void**)&_singleArrayKlassObj);
214 214 f->do_ptr((void**)&_doubleArrayKlassObj);
215 215 f->do_ptr((void**)&_objectArrayKlassObj);
216 216
217 217 {
218 218 for (int i = 0; i < T_VOID+1; i++) {
219 219 if (_typeArrayKlassObjs[i] != NULL) {
220 220 assert(i >= T_BOOLEAN, "checking");
221 221 f->do_ptr((void**)&_typeArrayKlassObjs[i]);
222 222 } else if (do_all) {
223 223 f->do_ptr((void**)&_typeArrayKlassObjs[i]);
224 224 }
225 225 }
226 226 }
227 227
228 228 f->do_ptr((void**)&_the_array_interfaces_array);
229 229 f->do_ptr((void**)&_the_empty_int_array);
230 230 f->do_ptr((void**)&_the_empty_short_array);
231 231 f->do_ptr((void**)&_the_empty_method_array);
232 232 f->do_ptr((void**)&_the_empty_klass_array);
233 233 _finalizer_register_cache->serialize(f);
234 234 _loader_addClass_cache->serialize(f);
235 235 _pd_implies_cache->serialize(f);
236 236 }
237 237
238 238 void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
239 239 if (size < alignment || size % alignment != 0) {
240 240 vm_exit_during_initialization(
241 241 err_msg("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment));
242 242 }
243 243 }
244 244
245 245 void initialize_basic_type_klass(Klass* k, TRAPS) {
246 246 Klass* ok = SystemDictionary::Object_klass();
247 247 if (UseSharedSpaces) {
248 248 ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
249 249 assert(k->super() == ok, "u3");
250 250 k->restore_unshareable_info(loader_data, Handle(), CHECK);
251 251 } else {
252 252 k->initialize_supers(ok, CHECK);
253 253 }
254 254 k->append_to_sibling_list();
255 255 }
256 256
257 257 void Universe::genesis(TRAPS) {
258 258 ResourceMark rm;
259 259
260 260 { FlagSetting fs(_bootstrapping, true);
261 261
262 262 { MutexLocker mc(Compile_lock);
263 263
264 264 // determine base vtable size; without that we cannot create the array klasses
265 265 compute_base_vtable_size();
266 266
267 267 if (!UseSharedSpaces) {
268 268 _boolArrayKlassObj = TypeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK);
269 269 _charArrayKlassObj = TypeArrayKlass::create_klass(T_CHAR, sizeof(jchar), CHECK);
270 270 _singleArrayKlassObj = TypeArrayKlass::create_klass(T_FLOAT, sizeof(jfloat), CHECK);
271 271 _doubleArrayKlassObj = TypeArrayKlass::create_klass(T_DOUBLE, sizeof(jdouble), CHECK);
272 272 _byteArrayKlassObj = TypeArrayKlass::create_klass(T_BYTE, sizeof(jbyte), CHECK);
273 273 _shortArrayKlassObj = TypeArrayKlass::create_klass(T_SHORT, sizeof(jshort), CHECK);
274 274 _intArrayKlassObj = TypeArrayKlass::create_klass(T_INT, sizeof(jint), CHECK);
275 275 _longArrayKlassObj = TypeArrayKlass::create_klass(T_LONG, sizeof(jlong), CHECK);
276 276
277 277 _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj;
278 278 _typeArrayKlassObjs[T_CHAR] = _charArrayKlassObj;
279 279 _typeArrayKlassObjs[T_FLOAT] = _singleArrayKlassObj;
280 280 _typeArrayKlassObjs[T_DOUBLE] = _doubleArrayKlassObj;
281 281 _typeArrayKlassObjs[T_BYTE] = _byteArrayKlassObj;
282 282 _typeArrayKlassObjs[T_SHORT] = _shortArrayKlassObj;
283 283 _typeArrayKlassObjs[T_INT] = _intArrayKlassObj;
284 284 _typeArrayKlassObjs[T_LONG] = _longArrayKlassObj;
285 285
286 286 ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data();
287 287
288 288 _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK);
289 289 _the_empty_int_array = MetadataFactory::new_array<int>(null_cld, 0, CHECK);
290 290 _the_empty_short_array = MetadataFactory::new_array<u2>(null_cld, 0, CHECK);
291 291 _the_empty_method_array = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK);
292 292 _the_empty_klass_array = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK);
293 293 }
294 294 }
295 295
296 296 vmSymbols::initialize(CHECK);
297 297
298 298 SystemDictionary::initialize(CHECK);
299 299
300 300 Klass* ok = SystemDictionary::Object_klass();
301 301
302 302 _the_null_string = StringTable::intern("null", CHECK);
303 303 _the_min_jint_string = StringTable::intern("-2147483648", CHECK);
304 304
305 305 if (UseSharedSpaces) {
306 306 // Verify shared interfaces array.
307 307 assert(_the_array_interfaces_array->at(0) ==
308 308 SystemDictionary::Cloneable_klass(), "u3");
309 309 assert(_the_array_interfaces_array->at(1) ==
310 310 SystemDictionary::Serializable_klass(), "u3");
311 311 } else {
312 312 // Set up shared interfaces array. (Do this before supers are set up.)
313 313 _the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass());
314 314 _the_array_interfaces_array->at_put(1, SystemDictionary::Serializable_klass());
315 315 }
316 316
317 317 initialize_basic_type_klass(boolArrayKlassObj(), CHECK);
318 318 initialize_basic_type_klass(charArrayKlassObj(), CHECK);
319 319 initialize_basic_type_klass(singleArrayKlassObj(), CHECK);
320 320 initialize_basic_type_klass(doubleArrayKlassObj(), CHECK);
321 321 initialize_basic_type_klass(byteArrayKlassObj(), CHECK);
322 322 initialize_basic_type_klass(shortArrayKlassObj(), CHECK);
323 323 initialize_basic_type_klass(intArrayKlassObj(), CHECK);
324 324 initialize_basic_type_klass(longArrayKlassObj(), CHECK);
325 325 } // end of core bootstrapping
326 326
327 327 // Maybe this could be lifted up now that object array can be initialized
328 328 // during the bootstrapping.
329 329
330 330 // OLD
331 331 // Initialize _objectArrayKlass after core bootstraping to make
332 332 // sure the super class is set up properly for _objectArrayKlass.
333 333 // ---
334 334 // NEW
335 335 // Since some of the old system object arrays have been converted to
336 336 // ordinary object arrays, _objectArrayKlass will be loaded when
337 337 // SystemDictionary::initialize(CHECK); is run. See the extra check
338 338 // for Object_klass_loaded in objArrayKlassKlass::allocate_objArray_klass_impl.
339 339 _objectArrayKlassObj = InstanceKlass::
340 340 cast(SystemDictionary::Object_klass())->array_klass(1, CHECK);
341 341 // OLD
342 342 // Add the class to the class hierarchy manually to make sure that
343 343 // its vtable is initialized after core bootstrapping is completed.
344 344 // ---
345 345 // New
346 346 // Have already been initialized.
347 347 _objectArrayKlassObj->append_to_sibling_list();
348 348
349 349 // Compute is_jdk version flags.
350 350 // Only 1.3 or later has the java.lang.Shutdown class.
351 351 // Only 1.4 or later has the java.lang.CharSequence interface.
352 352 // Only 1.5 or later has the java.lang.management.MemoryUsage class.
353 353 if (JDK_Version::is_partially_initialized()) {
354 354 uint8_t jdk_version;
355 355 Klass* k = SystemDictionary::resolve_or_null(
356 356 vmSymbols::java_lang_management_MemoryUsage(), THREAD);
357 357 CLEAR_PENDING_EXCEPTION; // ignore exceptions
358 358 if (k == NULL) {
359 359 k = SystemDictionary::resolve_or_null(
360 360 vmSymbols::java_lang_CharSequence(), THREAD);
361 361 CLEAR_PENDING_EXCEPTION; // ignore exceptions
362 362 if (k == NULL) {
363 363 k = SystemDictionary::resolve_or_null(
364 364 vmSymbols::java_lang_Shutdown(), THREAD);
365 365 CLEAR_PENDING_EXCEPTION; // ignore exceptions
366 366 if (k == NULL) {
367 367 jdk_version = 2;
368 368 } else {
369 369 jdk_version = 3;
370 370 }
371 371 } else {
372 372 jdk_version = 4;
373 373 }
374 374 } else {
375 375 jdk_version = 5;
376 376 }
377 377 JDK_Version::fully_initialize(jdk_version);
378 378 }
379 379
380 380 #ifdef ASSERT
381 381 if (FullGCALot) {
382 382 // Allocate an array of dummy objects.
383 383 // We'd like these to be at the bottom of the old generation,
384 384 // so that when we free one and then collect,
385 385 // (almost) the whole heap moves
386 386 // and we find out if we actually update all the oops correctly.
387 387 // But we can't allocate directly in the old generation,
388 388 // so we allocate wherever, and hope that the first collection
389 389 // moves these objects to the bottom of the old generation.
390 390 // We can allocate directly in the permanent generation, so we do.
391 391 int size;
392 392 if (UseConcMarkSweepGC) {
393 393 warning("Using +FullGCALot with concurrent mark sweep gc "
394 394 "will not force all objects to relocate");
395 395 size = FullGCALotDummies;
396 396 } else {
397 397 size = FullGCALotDummies * 2;
398 398 }
399 399 objArrayOop naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK);
400 400 objArrayHandle dummy_array(THREAD, naked_array);
401 401 int i = 0;
402 402 while (i < size) {
403 403 // Allocate dummy in old generation
404 404 oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
405 405 dummy_array->obj_at_put(i++, dummy);
406 406 }
407 407 {
408 408 // Only modify the global variable inside the mutex.
409 409 // If we had a race to here, the other dummy_array instances
410 410 // and their elements just get dropped on the floor, which is fine.
411 411 MutexLocker ml(FullGCALot_lock);
412 412 if (_fullgc_alot_dummy_array == NULL) {
413 413 _fullgc_alot_dummy_array = dummy_array();
414 414 }
415 415 }
416 416 assert(i == _fullgc_alot_dummy_array->length(), "just checking");
417 417 }
418 418 #endif
419 419
420 420 // Initialize dependency array for null class loader
421 421 ClassLoaderData::the_null_class_loader_data()->init_dependencies(CHECK);
422 422
423 423 }
424 424
425 425 // CDS support for patching vtables in metadata in the shared archive.
426 426 // All types inherited from Metadata have vtables, but not types inherited
427 427 // from MetaspaceObj, because the latter does not have virtual functions.
428 428 // If the metadata type has a vtable, it cannot be shared in the read-only
429 429 // section of the CDS archive, because the vtable pointer is patched.
430 430 static inline void add_vtable(void** list, int* n, void* o, int count) {
431 431 guarantee((*n) < count, "vtable list too small");
432 432 void* vtable = dereference_vptr(o);
433 433 assert(*(void**)(vtable) != NULL, "invalid vtable");
434 434 list[(*n)++] = vtable;
435 435 }
436 436
437 437 void Universe::init_self_patching_vtbl_list(void** list, int count) {
438 438 int n = 0;
439 439 { InstanceKlass o; add_vtable(list, &n, &o, count); }
440 440 { InstanceClassLoaderKlass o; add_vtable(list, &n, &o, count); }
441 441 { InstanceMirrorKlass o; add_vtable(list, &n, &o, count); }
442 442 { InstanceRefKlass o; add_vtable(list, &n, &o, count); }
443 443 { TypeArrayKlass o; add_vtable(list, &n, &o, count); }
444 444 { ObjArrayKlass o; add_vtable(list, &n, &o, count); }
445 445 { Method o; add_vtable(list, &n, &o, count); }
446 446 { ConstantPool o; add_vtable(list, &n, &o, count); }
447 447 }
448 448
449 449 void Universe::initialize_basic_type_mirrors(TRAPS) {
450 450 assert(_int_mirror==NULL, "basic type mirrors already initialized");
451 451 _int_mirror =
452 452 java_lang_Class::create_basic_type_mirror("int", T_INT, CHECK);
453 453 _float_mirror =
454 454 java_lang_Class::create_basic_type_mirror("float", T_FLOAT, CHECK);
455 455 _double_mirror =
456 456 java_lang_Class::create_basic_type_mirror("double", T_DOUBLE, CHECK);
457 457 _byte_mirror =
458 458 java_lang_Class::create_basic_type_mirror("byte", T_BYTE, CHECK);
459 459 _bool_mirror =
460 460 java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
461 461 _char_mirror =
462 462 java_lang_Class::create_basic_type_mirror("char", T_CHAR, CHECK);
463 463 _long_mirror =
464 464 java_lang_Class::create_basic_type_mirror("long", T_LONG, CHECK);
465 465 _short_mirror =
466 466 java_lang_Class::create_basic_type_mirror("short", T_SHORT, CHECK);
467 467 _void_mirror =
468 468 java_lang_Class::create_basic_type_mirror("void", T_VOID, CHECK);
469 469
470 470 _mirrors[T_INT] = _int_mirror;
471 471 _mirrors[T_FLOAT] = _float_mirror;
472 472 _mirrors[T_DOUBLE] = _double_mirror;
473 473 _mirrors[T_BYTE] = _byte_mirror;
474 474 _mirrors[T_BOOLEAN] = _bool_mirror;
475 475 _mirrors[T_CHAR] = _char_mirror;
476 476 _mirrors[T_LONG] = _long_mirror;
477 477 _mirrors[T_SHORT] = _short_mirror;
478 478 _mirrors[T_VOID] = _void_mirror;
479 479 //_mirrors[T_OBJECT] = InstanceKlass::cast(_object_klass)->java_mirror();
480 480 //_mirrors[T_ARRAY] = InstanceKlass::cast(_object_klass)->java_mirror();
481 481 }
482 482
483 483 void Universe::fixup_mirrors(TRAPS) {
484 484 // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly,
485 485 // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply
486 486 // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
487 487 // that the number of objects allocated at this point is very small.
488 488 assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
489 489 HandleMark hm(THREAD);
490 490 // Cache the start of the static fields
491 491 InstanceMirrorKlass::init_offset_of_static_fields();
492 492
493 493 GrowableArray <Klass*>* list = java_lang_Class::fixup_mirror_list();
494 494 int list_length = list->length();
495 495 for (int i = 0; i < list_length; i++) {
496 496 Klass* k = list->at(i);
497 497 assert(k->is_klass(), "List should only hold classes");
498 498 EXCEPTION_MARK;
499 499 KlassHandle kh(THREAD, k);
500 500 java_lang_Class::fixup_mirror(kh, CATCH);
501 501 }
502 502 delete java_lang_Class::fixup_mirror_list();
503 503 java_lang_Class::set_fixup_mirror_list(NULL);
504 504 }
505 505
506 506 static bool has_run_finalizers_on_exit = false;
507 507
508 508 void Universe::run_finalizers_on_exit() {
509 509 if (has_run_finalizers_on_exit) return;
510 510 has_run_finalizers_on_exit = true;
511 511
512 512 // Called on VM exit. This ought to be run in a separate thread.
513 513 if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
514 514 {
515 515 PRESERVE_EXCEPTION_MARK;
516 516 KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
517 517 JavaValue result(T_VOID);
518 518 JavaCalls::call_static(
519 519 &result,
520 520 finalizer_klass,
521 521 vmSymbols::run_finalizers_on_exit_name(),
522 522 vmSymbols::void_method_signature(),
523 523 THREAD
524 524 );
525 525 // Ignore any pending exceptions
526 526 CLEAR_PENDING_EXCEPTION;
527 527 }
528 528 }
529 529
530 530
531 531 // initialize_vtable could cause gc if
532 532 // 1) we specified true to initialize_vtable and
533 533 // 2) this ran after gc was enabled
534 534 // In case those ever change we use handles for oops
535 535 void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {
536 536 // init vtable of k and all subclasses
537 537 Klass* ko = k_h();
538 538 klassVtable* vt = ko->vtable();
539 539 if (vt) vt->initialize_vtable(false, CHECK);
540 540 if (ko->oop_is_instance()) {
541 541 InstanceKlass* ik = (InstanceKlass*)ko;
542 542 for (KlassHandle s_h(THREAD, ik->subklass());
543 543 s_h() != NULL;
544 544 s_h = KlassHandle(THREAD, s_h()->next_sibling())) {
545 545 reinitialize_vtable_of(s_h, CHECK);
546 546 }
547 547 }
548 548 }
549 549
550 550
551 551 void initialize_itable_for_klass(Klass* k, TRAPS) {
552 552 InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK);
553 553 }
554 554
555 555
556 556 void Universe::reinitialize_itables(TRAPS) {
557 557 SystemDictionary::classes_do(initialize_itable_for_klass, CHECK);
558 558
559 559 }
560 560
561 561
562 562 bool Universe::on_page_boundary(void* addr) {
563 563 return ((uintptr_t) addr) % os::vm_page_size() == 0;
564 564 }
565 565
566 566
567 567 bool Universe::should_fill_in_stack_trace(Handle throwable) {
568 568 // never attempt to fill in the stack trace of preallocated errors that do not have
569 569 // backtrace. These errors are kept alive forever and may be "re-used" when all
570 570 // preallocated errors with backtrace have been consumed. Also need to avoid
571 571 // a potential loop which could happen if an out of memory occurs when attempting
572 572 // to allocate the backtrace.
573 573 return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
574 574 (throwable() != Universe::_out_of_memory_error_metaspace) &&
575 575 (throwable() != Universe::_out_of_memory_error_class_metaspace) &&
576 576 (throwable() != Universe::_out_of_memory_error_array_size) &&
577 577 (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
578 578 }
579 579
580 580
581 581 oop Universe::gen_out_of_memory_error(oop default_err) {
582 582 // generate an out of memory error:
583 583 // - if there is a preallocated error with backtrace available then return it wth
584 584 // a filled in stack trace.
585 585 // - if there are no preallocated errors with backtrace available then return
586 586 // an error without backtrace.
587 587 int next;
588 588 if (_preallocated_out_of_memory_error_avail_count > 0) {
589 589 next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
590 590 assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
591 591 } else {
592 592 next = -1;
593 593 }
594 594 if (next < 0) {
595 595 // all preallocated errors have been used.
596 596 // return default
597 597 return default_err;
598 598 } else {
599 599 // get the error object at the slot and set set it to NULL so that the
600 600 // array isn't keeping it alive anymore.
601 601 oop exc = preallocated_out_of_memory_errors()->obj_at(next);
602 602 assert(exc != NULL, "slot has been used already");
603 603 preallocated_out_of_memory_errors()->obj_at_put(next, NULL);
604 604
605 605 // use the message from the default error
606 606 oop msg = java_lang_Throwable::message(default_err);
607 607 assert(msg != NULL, "no message");
608 608 java_lang_Throwable::set_message(exc, msg);
609 609
610 610 // populate the stack trace and return it.
611 611 java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc);
612 612 return exc;
613 613 }
614 614 }
615 615
616 616 intptr_t Universe::_non_oop_bits = 0;
617 617
618 618 void* Universe::non_oop_word() {
619 619 // Neither the high bits nor the low bits of this value is allowed
620 620 // to look like (respectively) the high or low bits of a real oop.
621 621 //
622 622 // High and low are CPU-specific notions, but low always includes
623 623 // the low-order bit. Since oops are always aligned at least mod 4,
624 624 // setting the low-order bit will ensure that the low half of the
625 625 // word will never look like that of a real oop.
626 626 //
627 627 // Using the OS-supplied non-memory-address word (usually 0 or -1)
628 628 // will take care of the high bits, however many there are.
629 629
630 630 if (_non_oop_bits == 0) {
631 631 _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
632 632 }
633 633
634 634 return (void*)_non_oop_bits;
635 635 }
636 636
637 637 jint universe_init() {
638 638 assert(!Universe::_fully_initialized, "called after initialize_vtables");
639 639 guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
640 640 "LogHeapWordSize is incorrect.");
641 641 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
642 642 guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
643 643 "oop size is not not a multiple of HeapWord size");
644 644 TraceTime timer("Genesis", TraceStartupTime);
645 645 JavaClasses::compute_hard_coded_offsets();
646 646
647 647 jint status = Universe::initialize_heap();
648 648 if (status != JNI_OK) {
649 649 return status;
650 650 }
651 651
652 652 Metaspace::global_initialize();
653 653
654 654 // Create memory for metadata. Must be after initializing heap for
655 655 // DumpSharedSpaces.
656 656 ClassLoaderData::init_null_class_loader_data();
657 657
658 658 // We have a heap so create the Method* caches before
659 659 // Metaspace::initialize_shared_spaces() tries to populate them.
660 660 Universe::_finalizer_register_cache = new LatestMethodCache();
661 661 Universe::_loader_addClass_cache = new LatestMethodCache();
662 662 Universe::_pd_implies_cache = new LatestMethodCache();
663 663
664 664 if (UseSharedSpaces) {
665 665 // Read the data structures supporting the shared spaces (shared
666 666 // system dictionary, symbol table, etc.). After that, access to
667 667 // the file (other than the mapped regions) is no longer needed, and
668 668 // the file is closed. Closing the file does not affect the
669 669 // currently mapped regions.
670 670 MetaspaceShared::initialize_shared_spaces();
671 671 StringTable::create_table();
672 672 } else {
673 673 SymbolTable::create_table();
674 674 StringTable::create_table();
675 675 ClassLoader::create_package_info_table();
676 676
677 677 if (DumpSharedSpaces) {
678 678 MetaspaceShared::prepare_for_dumping();
679 679 }
680 680 }
681 681
682 682 return JNI_OK;
683 683 }
684 684
685 685 // Choose the heap base address and oop encoding mode
686 686 // when compressed oops are used:
687 687 // Unscaled - Use 32-bits oops without encoding when
688 688 // NarrowOopHeapBaseMin + heap_size < 4Gb
689 689 // ZeroBased - Use zero based compressed oops with encoding when
690 690 // NarrowOopHeapBaseMin + heap_size < 32Gb
691 691 // HeapBased - Use compressed oops with heap base + encoding.
692 692
693 693 // 4Gb
694 694 static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1);
695 695 // 32Gb
696 696 // OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes;
697 697
698 698 char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
699 699 assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
700 700 assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be");
701 701 assert(is_size_aligned(heap_size, alignment), "Must be");
702 702
703 703 uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
704 704
705 705 size_t base = 0;
706 706 #ifdef _LP64
707 707 if (UseCompressedOops) {
708 708 assert(mode == UnscaledNarrowOop ||
709 709 mode == ZeroBasedNarrowOop ||
710 710 mode == HeapBasedNarrowOop, "mode is invalid");
711 711 const size_t total_size = heap_size + heap_base_min_address_aligned;
712 712 // Return specified base for the first request.
713 713 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
714 714 base = heap_base_min_address_aligned;
715 715
716 716 // If the total size is small enough to allow UnscaledNarrowOop then
717 717 // just use UnscaledNarrowOop.
718 718 } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
719 719 if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) &&
720 720 (Universe::narrow_oop_shift() == 0)) {
721 721 // Use 32-bits oops without encoding and
722 722 // place heap's top on the 4Gb boundary
723 723 base = (UnscaledOopHeapMax - heap_size);
724 724 } else {
725 725 // Can't reserve with NarrowOopShift == 0
726 726 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
727 727
728 728 if (mode == UnscaledNarrowOop ||
729 729 mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) {
730 730
731 731 // Use zero based compressed oops with encoding and
732 732 // place heap's top on the 32Gb boundary in case
733 733 // total_size > 4Gb or failed to reserve below 4Gb.
734 734 uint64_t heap_top = OopEncodingHeapMax;
735 735
736 736 // For small heaps, save some space for compressed class pointer
737 737 // space so it can be decoded with no base.
738 738 if (UseCompressedClassPointers && !UseSharedSpaces &&
739 739 OopEncodingHeapMax <= 32*G) {
740 740
741 741 uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
742 742 assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space,
743 743 alignment), "difference must be aligned too");
744 744 uint64_t new_top = OopEncodingHeapMax-class_space;
745 745
746 746 if (total_size <= new_top) {
747 747 heap_top = new_top;
748 748 }
749 749 }
750 750
751 751 // Align base to the adjusted top of the heap
752 752 base = heap_top - heap_size;
753 753 }
754 754 }
755 755 } else {
756 756 // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
757 757 // HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb.
758 758 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
759 759 }
760 760
761 761 // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
762 762 // used in ReservedHeapSpace() constructors.
763 763 // The final values will be set in initialize_heap() below.
764 764 if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
765 765 // Use zero based compressed oops
766 766 Universe::set_narrow_oop_base(NULL);
767 767 // Don't need guard page for implicit checks in indexed
768 768 // addressing mode with zero based Compressed Oops.
769 769 Universe::set_narrow_oop_use_implicit_null_checks(true);
770 770 } else {
771 771 // Set to a non-NULL value so the ReservedSpace ctor computes
772 772 // the correct no-access prefix.
773 773 // The final value will be set in initialize_heap() below.
774 774 Universe::set_narrow_oop_base((address)UnscaledOopHeapMax);
775 775 #if defined(_WIN64) || defined(AIX)
776 776 if (UseLargePages) {
777 777 // Cannot allocate guard pages for implicit checks in indexed
778 778 // addressing mode when large pages are specified on windows.
779 779 Universe::set_narrow_oop_use_implicit_null_checks(false);
780 780 }
781 781 #endif // _WIN64
782 782 }
783 783 }
784 784 #endif
785 785
786 786 assert(is_ptr_aligned((char*)base, alignment), "Must be");
787 787 return (char*)base; // also return NULL (don't care) for 32-bit VM
788 788 }
789 789
790 790 jint Universe::initialize_heap() {
↓ open down ↓ |
699 lines elided |
↑ open up ↑ |
791 791
792 792 if (UseParallelGC) {
793 793 #if INCLUDE_ALL_GCS
794 794 Universe::_collectedHeap = new ParallelScavengeHeap();
795 795 #else // INCLUDE_ALL_GCS
796 796 fatal("UseParallelGC not supported in this VM.");
797 797 #endif // INCLUDE_ALL_GCS
798 798
799 799 } else if (UseG1GC) {
800 800 #if INCLUDE_ALL_GCS
801 - G1CollectorPolicy* g1p = new G1CollectorPolicy();
801 + G1CollectorPolicyExt* g1p = new G1CollectorPolicyExt();
802 802 g1p->initialize_all();
803 803 G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
804 804 Universe::_collectedHeap = g1h;
805 805 #else // INCLUDE_ALL_GCS
806 806 fatal("UseG1GC not supported in java kernel vm.");
807 807 #endif // INCLUDE_ALL_GCS
808 808
809 809 } else {
810 810 GenCollectorPolicy *gc_policy;
811 811
812 812 if (UseSerialGC) {
813 813 gc_policy = new MarkSweepPolicy();
814 814 } else if (UseConcMarkSweepGC) {
815 815 #if INCLUDE_ALL_GCS
816 816 if (UseAdaptiveSizePolicy) {
817 817 gc_policy = new ASConcurrentMarkSweepPolicy();
818 818 } else {
819 819 gc_policy = new ConcurrentMarkSweepPolicy();
820 820 }
821 821 #else // INCLUDE_ALL_GCS
822 822 fatal("UseConcMarkSweepGC not supported in this VM.");
823 823 #endif // INCLUDE_ALL_GCS
824 824 } else { // default old generation
825 825 gc_policy = new MarkSweepPolicy();
826 826 }
827 827 gc_policy->initialize_all();
828 828
829 829 Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
830 830 }
831 831
832 832 ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
833 833
834 834 jint status = Universe::heap()->initialize();
835 835 if (status != JNI_OK) {
836 836 return status;
837 837 }
838 838
839 839 #ifdef _LP64
840 840 if (UseCompressedOops) {
841 841 // Subtract a page because something can get allocated at heap base.
842 842 // This also makes implicit null checking work, because the
843 843 // memory+1 page below heap_base needs to cause a signal.
844 844 // See needs_explicit_null_check.
845 845 // Only set the heap base for compressed oops because it indicates
846 846 // compressed oops for pstack code.
847 847 bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
848 848 if (verbose) {
849 849 tty->cr();
850 850 tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
851 851 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
852 852 }
853 853 if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
854 854 // Can't reserve heap below 32Gb.
855 855 // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
856 856 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
857 857 #ifdef AIX
858 858 // There is no protected page before the heap. This assures all oops
859 859 // are decoded so that NULL is preserved, so this page will not be accessed.
860 860 Universe::set_narrow_oop_use_implicit_null_checks(false);
861 861 #endif
862 862 if (verbose) {
863 863 tty->print(", %s: "PTR_FORMAT,
864 864 narrow_oop_mode_to_string(HeapBasedNarrowOop),
865 865 Universe::narrow_oop_base());
866 866 }
867 867 } else {
868 868 Universe::set_narrow_oop_base(0);
869 869 if (verbose) {
870 870 tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop));
871 871 }
872 872 #ifdef _WIN64
873 873 if (!Universe::narrow_oop_use_implicit_null_checks()) {
874 874 // Don't need guard page for implicit checks in indexed addressing
875 875 // mode with zero based Compressed Oops.
876 876 Universe::set_narrow_oop_use_implicit_null_checks(true);
877 877 }
878 878 #endif // _WIN64
879 879 if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
880 880 // Can't reserve heap below 4Gb.
881 881 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
882 882 } else {
883 883 Universe::set_narrow_oop_shift(0);
884 884 if (verbose) {
885 885 tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop));
886 886 }
887 887 }
888 888 }
889 889
890 890 if (verbose) {
891 891 tty->cr();
892 892 tty->cr();
893 893 }
894 894 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
895 895 }
896 896 // Universe::narrow_oop_base() is one page below the heap.
897 897 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
898 898 os::vm_page_size()) ||
899 899 Universe::narrow_oop_base() == NULL, "invalid value");
900 900 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
901 901 Universe::narrow_oop_shift() == 0, "invalid value");
902 902 #endif
903 903
904 904 // We will never reach the CATCH below since Exceptions::_throw will cause
905 905 // the VM to exit if an exception is thrown during initialization
906 906
907 907 if (UseTLAB) {
908 908 assert(Universe::heap()->supports_tlab_allocation(),
909 909 "Should support thread-local allocation buffers");
910 910 ThreadLocalAllocBuffer::startup_initialization();
911 911 }
912 912 return JNI_OK;
913 913 }
914 914
915 915
916 916 // Reserve the Java heap, which is now the same for all GCs.
917 917 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
918 918 assert(alignment <= Arguments::conservative_max_heap_alignment(),
919 919 err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
920 920 alignment, Arguments::conservative_max_heap_alignment()));
921 921 size_t total_reserved = align_size_up(heap_size, alignment);
922 922 assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
923 923 "heap size is too big for compressed oops");
924 924
925 925 bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
926 926 assert(!UseLargePages
927 927 || UseParallelGC
928 928 || use_large_pages, "Wrong alignment to use large pages");
929 929
930 930 char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
931 931
932 932 ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
933 933
934 934 if (UseCompressedOops) {
935 935 if (addr != NULL && !total_rs.is_reserved()) {
936 936 // Failed to reserve at specified address - the requested memory
937 937 // region is taken already, for example, by 'java' launcher.
938 938 // Try again to reserver heap higher.
939 939 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
940 940
941 941 ReservedHeapSpace total_rs0(total_reserved, alignment,
942 942 use_large_pages, addr);
943 943
944 944 if (addr != NULL && !total_rs0.is_reserved()) {
945 945 // Failed to reserve at specified address again - give up.
946 946 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
947 947 assert(addr == NULL, "");
948 948
949 949 ReservedHeapSpace total_rs1(total_reserved, alignment,
950 950 use_large_pages, addr);
951 951 total_rs = total_rs1;
952 952 } else {
953 953 total_rs = total_rs0;
954 954 }
955 955 }
956 956 }
957 957
958 958 if (!total_rs.is_reserved()) {
959 959 vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
960 960 return total_rs;
961 961 }
962 962
963 963 if (UseCompressedOops) {
964 964 // Universe::initialize_heap() will reset this to NULL if unscaled
965 965 // or zero-based narrow oops are actually used.
966 966 address base = (address)(total_rs.base() - os::vm_page_size());
967 967 Universe::set_narrow_oop_base(base);
968 968 }
969 969 return total_rs;
970 970 }
971 971
972 972
973 973 // It's the caller's responsibility to ensure glitch-freedom
974 974 // (if required).
975 975 void Universe::update_heap_info_at_gc() {
976 976 _heap_capacity_at_last_gc = heap()->capacity();
977 977 _heap_used_at_last_gc = heap()->used();
978 978 }
979 979
980 980
981 981 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
982 982 switch (mode) {
983 983 case UnscaledNarrowOop:
984 984 return "32-bits Oops";
985 985 case ZeroBasedNarrowOop:
986 986 return "zero based Compressed Oops";
987 987 case HeapBasedNarrowOop:
988 988 return "Compressed Oops with base";
989 989 }
990 990
991 991 ShouldNotReachHere();
992 992 return "";
993 993 }
994 994
995 995
996 996 Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {
997 997 if (narrow_oop_base() != 0) {
998 998 return HeapBasedNarrowOop;
999 999 }
1000 1000
1001 1001 if (narrow_oop_shift() != 0) {
1002 1002 return ZeroBasedNarrowOop;
1003 1003 }
1004 1004
1005 1005 return UnscaledNarrowOop;
1006 1006 }
1007 1007
1008 1008
1009 1009 void universe2_init() {
1010 1010 EXCEPTION_MARK;
1011 1011 Universe::genesis(CATCH);
1012 1012 }
1013 1013
1014 1014
1015 1015 // This function is defined in JVM.cpp
1016 1016 extern void initialize_converter_functions();
1017 1017
1018 1018 bool universe_post_init() {
1019 1019 assert(!is_init_completed(), "Error: initialization not yet completed!");
1020 1020 Universe::_fully_initialized = true;
1021 1021 EXCEPTION_MARK;
1022 1022 { ResourceMark rm;
1023 1023 Interpreter::initialize(); // needed for interpreter entry points
1024 1024 if (!UseSharedSpaces) {
1025 1025 HandleMark hm(THREAD);
1026 1026 KlassHandle ok_h(THREAD, SystemDictionary::Object_klass());
1027 1027 Universe::reinitialize_vtable_of(ok_h, CHECK_false);
1028 1028 Universe::reinitialize_itables(CHECK_false);
1029 1029 }
1030 1030 }
1031 1031
1032 1032 HandleMark hm(THREAD);
1033 1033 Klass* k;
1034 1034 instanceKlassHandle k_h;
1035 1035 // Setup preallocated empty java.lang.Class array
1036 1036 Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false);
1037 1037
1038 1038 // Setup preallocated OutOfMemoryError errors
1039 1039 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false);
1040 1040 k_h = instanceKlassHandle(THREAD, k);
1041 1041 Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false);
1042 1042 Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false);
1043 1043 Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false);
1044 1044 Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
1045 1045 Universe::_out_of_memory_error_gc_overhead_limit =
1046 1046 k_h->allocate_instance(CHECK_false);
1047 1047
1048 1048 // Setup preallocated NullPointerException
1049 1049 // (this is currently used for a cheap & dirty solution in compiler exception handling)
1050 1050 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false);
1051 1051 Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1052 1052 // Setup preallocated ArithmeticException
1053 1053 // (this is currently used for a cheap & dirty solution in compiler exception handling)
1054 1054 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false);
1055 1055 Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1056 1056 // Virtual Machine Error for when we get into a situation we can't resolve
1057 1057 k = SystemDictionary::resolve_or_fail(
1058 1058 vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false);
1059 1059 bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false);
1060 1060 if (!linked) {
1061 1061 tty->print_cr("Unable to link/verify VirtualMachineError class");
1062 1062 return false; // initialization failed
1063 1063 }
1064 1064 Universe::_virtual_machine_error_instance =
1065 1065 InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1066 1066
1067 1067 Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1068 1068
1069 1069 if (!DumpSharedSpaces) {
1070 1070 // These are the only Java fields that are currently set during shared space dumping.
1071 1071 // We prefer to not handle this generally, so we always reinitialize these detail messages.
1072 1072 Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
1073 1073 java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
1074 1074
1075 1075 msg = java_lang_String::create_from_str("Metaspace", CHECK_false);
1076 1076 java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
1077 1077 msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
1078 1078 java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
1079 1079
1080 1080 msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
1081 1081 java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
1082 1082
1083 1083 msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
1084 1084 java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
1085 1085
1086 1086 msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
1087 1087 java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
1088 1088
1089 1089 // Setup the array of errors that have preallocated backtrace
1090 1090 k = Universe::_out_of_memory_error_java_heap->klass();
1091 1091 assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error");
1092 1092 k_h = instanceKlassHandle(THREAD, k);
1093 1093
1094 1094 int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0;
1095 1095 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false);
1096 1096 for (int i=0; i<len; i++) {
1097 1097 oop err = k_h->allocate_instance(CHECK_false);
1098 1098 Handle err_h = Handle(THREAD, err);
1099 1099 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
1100 1100 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
1101 1101 }
1102 1102 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
1103 1103 }
1104 1104
1105 1105
1106 1106 // Setup static method for registering finalizers
1107 1107 // The finalizer klass must be linked before looking up the method, in
1108 1108 // case it needs to get rewritten.
1109 1109 InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
1110 1110 Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
1111 1111 vmSymbols::register_method_name(),
1112 1112 vmSymbols::register_method_signature());
1113 1113 if (m == NULL || !m->is_static()) {
1114 1114 tty->print_cr("Unable to link/verify Finalizer.register method");
1115 1115 return false; // initialization failed (cannot throw exception yet)
1116 1116 }
1117 1117 Universe::_finalizer_register_cache->init(
1118 1118 SystemDictionary::Finalizer_klass(), m);
1119 1119
1120 1120 InstanceKlass::cast(SystemDictionary::misc_Unsafe_klass())->link_class(CHECK_false);
1121 1121 m = InstanceKlass::cast(SystemDictionary::misc_Unsafe_klass())->find_method(
1122 1122 vmSymbols::throwIllegalAccessError_name(),
1123 1123 vmSymbols::void_method_signature());
1124 1124 if (m != NULL && !m->is_static()) {
1125 1125 // Note null is okay; this method is used in itables, and if it is null,
1126 1126 // then AbstractMethodError is thrown instead.
1127 1127 tty->print_cr("Unable to link/verify Unsafe.throwIllegalAccessError method");
1128 1128 return false; // initialization failed (cannot throw exception yet)
1129 1129 }
1130 1130 Universe::_throw_illegal_access_error = m;
1131 1131
1132 1132 // Setup method for registering loaded classes in class loader vector
1133 1133 InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
1134 1134 m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
1135 1135 if (m == NULL || m->is_static()) {
1136 1136 tty->print_cr("Unable to link/verify ClassLoader.addClass method");
1137 1137 return false; // initialization failed (cannot throw exception yet)
1138 1138 }
1139 1139 Universe::_loader_addClass_cache->init(
1140 1140 SystemDictionary::ClassLoader_klass(), m);
1141 1141
1142 1142 // Setup method for checking protection domain
1143 1143 InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
1144 1144 m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->
1145 1145 find_method(vmSymbols::impliesCreateAccessControlContext_name(),
1146 1146 vmSymbols::void_boolean_signature());
1147 1147 // Allow NULL which should only happen with bootstrapping.
1148 1148 if (m != NULL) {
1149 1149 if (m->is_static()) {
1150 1150 // NoSuchMethodException doesn't actually work because it tries to run the
1151 1151 // <init> function before java_lang_Class is linked. Print error and exit.
1152 1152 tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage");
1153 1153 return false; // initialization failed
1154 1154 }
1155 1155 Universe::_pd_implies_cache->init(
1156 1156 SystemDictionary::ProtectionDomain_klass(), m);;
1157 1157 }
1158 1158
1159 1159 // The folowing is initializing converter functions for serialization in
1160 1160 // JVM.cpp. If we clean up the StrictMath code above we may want to find
1161 1161 // a better solution for this as well.
1162 1162 initialize_converter_functions();
1163 1163
1164 1164 // This needs to be done before the first scavenge/gc, since
1165 1165 // it's an input to soft ref clearing policy.
1166 1166 {
1167 1167 MutexLocker x(Heap_lock);
1168 1168 Universe::update_heap_info_at_gc();
1169 1169 }
1170 1170
1171 1171 // ("weak") refs processing infrastructure initialization
1172 1172 Universe::heap()->post_initialize();
1173 1173
1174 1174 // Initialize performance counters for metaspaces
1175 1175 MetaspaceCounters::initialize_performance_counters();
1176 1176 CompressedClassSpaceCounters::initialize_performance_counters();
1177 1177
1178 1178 MemoryService::add_metaspace_memory_pools();
1179 1179
1180 1180 MemoryService::set_universe_heap(Universe::_collectedHeap);
1181 1181 #if INCLUDE_CDS
1182 1182 if (UseSharedSpaces) {
1183 1183 SharedClassUtil::initialize(CHECK_false);
1184 1184 }
1185 1185 #endif
1186 1186 return true;
1187 1187 }
1188 1188
1189 1189
1190 1190 void Universe::compute_base_vtable_size() {
1191 1191 _base_vtable_size = ClassLoader::compute_Object_vtable();
1192 1192 }
1193 1193
1194 1194
1195 1195 // %%% The Universe::flush_foo methods belong in CodeCache.
1196 1196
1197 1197 // Flushes compiled methods dependent on dependee.
1198 1198 void Universe::flush_dependents_on(instanceKlassHandle dependee) {
1199 1199 assert_lock_strong(Compile_lock);
1200 1200
1201 1201 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1202 1202
1203 1203 // CodeCache can only be updated by a thread_in_VM and they will all be
1204 1204 // stopped dring the safepoint so CodeCache will be safe to update without
1205 1205 // holding the CodeCache_lock.
1206 1206
1207 1207 KlassDepChange changes(dependee);
1208 1208
1209 1209 // Compute the dependent nmethods
1210 1210 if (CodeCache::mark_for_deoptimization(changes) > 0) {
1211 1211 // At least one nmethod has been marked for deoptimization
1212 1212 VM_Deoptimize op;
1213 1213 VMThread::execute(&op);
1214 1214 }
1215 1215 }
1216 1216
1217 1217 // Flushes compiled methods dependent on a particular CallSite
1218 1218 // instance when its target is different than the given MethodHandle.
1219 1219 void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
1220 1220 assert_lock_strong(Compile_lock);
1221 1221
1222 1222 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1223 1223
1224 1224 // CodeCache can only be updated by a thread_in_VM and they will all be
1225 1225 // stopped dring the safepoint so CodeCache will be safe to update without
1226 1226 // holding the CodeCache_lock.
1227 1227
1228 1228 CallSiteDepChange changes(call_site(), method_handle());
1229 1229
1230 1230 // Compute the dependent nmethods that have a reference to a
1231 1231 // CallSite object. We use InstanceKlass::mark_dependent_nmethod
1232 1232 // directly instead of CodeCache::mark_for_deoptimization because we
1233 1233 // want dependents on the call site class only not all classes in
1234 1234 // the ContextStream.
1235 1235 int marked = 0;
1236 1236 {
1237 1237 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1238 1238 InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
1239 1239 marked = call_site_klass->mark_dependent_nmethods(changes);
1240 1240 }
1241 1241 if (marked > 0) {
1242 1242 // At least one nmethod has been marked for deoptimization
1243 1243 VM_Deoptimize op;
1244 1244 VMThread::execute(&op);
1245 1245 }
1246 1246 }
1247 1247
1248 1248 #ifdef HOTSWAP
1249 1249 // Flushes compiled methods dependent on dependee in the evolutionary sense
1250 1250 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1251 1251 // --- Compile_lock is not held. However we are at a safepoint.
1252 1252 assert_locked_or_safepoint(Compile_lock);
1253 1253 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1254 1254
1255 1255 // CodeCache can only be updated by a thread_in_VM and they will all be
1256 1256 // stopped dring the safepoint so CodeCache will be safe to update without
1257 1257 // holding the CodeCache_lock.
1258 1258
1259 1259 // Compute the dependent nmethods
1260 1260 if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) {
1261 1261 // At least one nmethod has been marked for deoptimization
1262 1262
1263 1263 // All this already happens inside a VM_Operation, so we'll do all the work here.
1264 1264 // Stuff copied from VM_Deoptimize and modified slightly.
1265 1265
1266 1266 // We do not want any GCs to happen while we are in the middle of this VM operation
1267 1267 ResourceMark rm;
1268 1268 DeoptimizationMarker dm;
1269 1269
1270 1270 // Deoptimize all activations depending on marked nmethods
1271 1271 Deoptimization::deoptimize_dependents();
1272 1272
1273 1273 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1274 1274 CodeCache::make_marked_nmethods_not_entrant();
1275 1275 }
1276 1276 }
1277 1277 #endif // HOTSWAP
1278 1278
1279 1279
1280 1280 // Flushes compiled methods dependent on dependee
1281 1281 void Universe::flush_dependents_on_method(methodHandle m_h) {
1282 1282 // --- Compile_lock is not held. However we are at a safepoint.
1283 1283 assert_locked_or_safepoint(Compile_lock);
1284 1284
1285 1285 // CodeCache can only be updated by a thread_in_VM and they will all be
1286 1286 // stopped dring the safepoint so CodeCache will be safe to update without
1287 1287 // holding the CodeCache_lock.
1288 1288
1289 1289 // Compute the dependent nmethods
1290 1290 if (CodeCache::mark_for_deoptimization(m_h()) > 0) {
1291 1291 // At least one nmethod has been marked for deoptimization
1292 1292
1293 1293 // All this already happens inside a VM_Operation, so we'll do all the work here.
1294 1294 // Stuff copied from VM_Deoptimize and modified slightly.
1295 1295
1296 1296 // We do not want any GCs to happen while we are in the middle of this VM operation
1297 1297 ResourceMark rm;
1298 1298 DeoptimizationMarker dm;
1299 1299
1300 1300 // Deoptimize all activations depending on marked nmethods
1301 1301 Deoptimization::deoptimize_dependents();
1302 1302
1303 1303 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1304 1304 CodeCache::make_marked_nmethods_not_entrant();
1305 1305 }
1306 1306 }
1307 1307
1308 1308 void Universe::print() {
1309 1309 print_on(gclog_or_tty);
1310 1310 }
1311 1311
1312 1312 void Universe::print_on(outputStream* st, bool extended) {
1313 1313 st->print_cr("Heap");
1314 1314 if (!extended) {
1315 1315 heap()->print_on(st);
1316 1316 } else {
1317 1317 heap()->print_extended_on(st);
1318 1318 }
1319 1319 }
1320 1320
1321 1321 void Universe::print_heap_at_SIGBREAK() {
1322 1322 if (PrintHeapAtSIGBREAK) {
1323 1323 MutexLocker hl(Heap_lock);
1324 1324 print_on(tty);
1325 1325 tty->cr();
1326 1326 tty->flush();
1327 1327 }
1328 1328 }
1329 1329
1330 1330 void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) {
1331 1331 st->print_cr("{Heap before GC invocations=%u (full %u):",
1332 1332 heap()->total_collections(),
1333 1333 heap()->total_full_collections());
1334 1334 if (!PrintHeapAtGCExtended || ignore_extended) {
1335 1335 heap()->print_on(st);
1336 1336 } else {
1337 1337 heap()->print_extended_on(st);
1338 1338 }
1339 1339 }
1340 1340
1341 1341 void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
1342 1342 st->print_cr("Heap after GC invocations=%u (full %u):",
1343 1343 heap()->total_collections(),
1344 1344 heap()->total_full_collections());
1345 1345 if (!PrintHeapAtGCExtended || ignore_extended) {
1346 1346 heap()->print_on(st);
1347 1347 } else {
1348 1348 heap()->print_extended_on(st);
1349 1349 }
1350 1350 st->print_cr("}");
1351 1351 }
1352 1352
1353 1353 void Universe::verify(VerifyOption option, const char* prefix, bool silent) {
1354 1354 // The use of _verify_in_progress is a temporary work around for
1355 1355 // 6320749. Don't bother with a creating a class to set and clear
1356 1356 // it since it is only used in this method and the control flow is
1357 1357 // straight forward.
1358 1358 _verify_in_progress = true;
1359 1359
1360 1360 COMPILER2_PRESENT(
1361 1361 assert(!DerivedPointerTable::is_active(),
1362 1362 "DPT should not be active during verification "
1363 1363 "(of thread stacks below)");
1364 1364 )
1365 1365
1366 1366 ResourceMark rm;
1367 1367 HandleMark hm; // Handles created during verification can be zapped
1368 1368 _verify_count++;
1369 1369
1370 1370 if (!silent) gclog_or_tty->print("%s", prefix);
1371 1371 if (!silent) gclog_or_tty->print("[Verifying ");
1372 1372 if (!silent) gclog_or_tty->print("threads ");
1373 1373 Threads::verify();
1374 1374 if (!silent) gclog_or_tty->print("heap ");
1375 1375 heap()->verify(silent, option);
1376 1376 if (!silent) gclog_or_tty->print("syms ");
1377 1377 SymbolTable::verify();
1378 1378 if (!silent) gclog_or_tty->print("strs ");
1379 1379 StringTable::verify();
1380 1380 {
1381 1381 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1382 1382 if (!silent) gclog_or_tty->print("zone ");
1383 1383 CodeCache::verify();
1384 1384 }
1385 1385 if (!silent) gclog_or_tty->print("dict ");
1386 1386 SystemDictionary::verify();
1387 1387 #ifndef PRODUCT
1388 1388 if (!silent) gclog_or_tty->print("cldg ");
1389 1389 ClassLoaderDataGraph::verify();
1390 1390 #endif
1391 1391 if (!silent) gclog_or_tty->print("metaspace chunks ");
1392 1392 MetaspaceAux::verify_free_chunks();
1393 1393 if (!silent) gclog_or_tty->print("hand ");
1394 1394 JNIHandles::verify();
1395 1395 if (!silent) gclog_or_tty->print("C-heap ");
1396 1396 os::check_heap();
1397 1397 if (!silent) gclog_or_tty->print("code cache ");
1398 1398 CodeCache::verify_oops();
1399 1399 if (!silent) gclog_or_tty->print_cr("]");
1400 1400
1401 1401 _verify_in_progress = false;
1402 1402 }
1403 1403
1404 1404 // Oop verification (see MacroAssembler::verify_oop)
1405 1405
1406 1406 static uintptr_t _verify_oop_data[2] = {0, (uintptr_t)-1};
1407 1407 static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1};
1408 1408
1409 1409
1410 1410 #ifndef PRODUCT
1411 1411
1412 1412 static void calculate_verify_data(uintptr_t verify_data[2],
1413 1413 HeapWord* low_boundary,
1414 1414 HeapWord* high_boundary) {
1415 1415 assert(low_boundary < high_boundary, "bad interval");
1416 1416
1417 1417 // decide which low-order bits we require to be clear:
1418 1418 size_t alignSize = MinObjAlignmentInBytes;
1419 1419 size_t min_object_size = CollectedHeap::min_fill_size();
1420 1420
1421 1421 // make an inclusive limit:
1422 1422 uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
1423 1423 uintptr_t min = (uintptr_t)low_boundary;
1424 1424 assert(min < max, "bad interval");
1425 1425 uintptr_t diff = max ^ min;
1426 1426
1427 1427 // throw away enough low-order bits to make the diff vanish
1428 1428 uintptr_t mask = (uintptr_t)(-1);
1429 1429 while ((mask & diff) != 0)
1430 1430 mask <<= 1;
1431 1431 uintptr_t bits = (min & mask);
1432 1432 assert(bits == (max & mask), "correct mask");
1433 1433 // check an intermediate value between min and max, just to make sure:
1434 1434 assert(bits == ((min + (max-min)/2) & mask), "correct mask");
1435 1435
1436 1436 // require address alignment, too:
1437 1437 mask |= (alignSize - 1);
1438 1438
1439 1439 if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) {
1440 1440 assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability");
1441 1441 }
1442 1442 verify_data[0] = mask;
1443 1443 verify_data[1] = bits;
1444 1444 }
1445 1445
1446 1446 // Oop verification (see MacroAssembler::verify_oop)
1447 1447
1448 1448 uintptr_t Universe::verify_oop_mask() {
1449 1449 MemRegion m = heap()->reserved_region();
1450 1450 calculate_verify_data(_verify_oop_data,
1451 1451 m.start(),
1452 1452 m.end());
1453 1453 return _verify_oop_data[0];
1454 1454 }
1455 1455
1456 1456
1457 1457
1458 1458 uintptr_t Universe::verify_oop_bits() {
1459 1459 verify_oop_mask();
1460 1460 return _verify_oop_data[1];
1461 1461 }
1462 1462
1463 1463 uintptr_t Universe::verify_mark_mask() {
1464 1464 return markOopDesc::lock_mask_in_place;
1465 1465 }
1466 1466
1467 1467 uintptr_t Universe::verify_mark_bits() {
1468 1468 intptr_t mask = verify_mark_mask();
1469 1469 intptr_t bits = (intptr_t)markOopDesc::prototype();
1470 1470 assert((bits & ~mask) == 0, "no stray header bits");
1471 1471 return bits;
1472 1472 }
1473 1473 #endif // PRODUCT
1474 1474
1475 1475
1476 1476 void Universe::compute_verify_oop_data() {
1477 1477 verify_oop_mask();
1478 1478 verify_oop_bits();
1479 1479 verify_mark_mask();
1480 1480 verify_mark_bits();
1481 1481 }
1482 1482
1483 1483
1484 1484 void LatestMethodCache::init(Klass* k, Method* m) {
1485 1485 if (!UseSharedSpaces) {
1486 1486 _klass = k;
1487 1487 }
1488 1488 #ifndef PRODUCT
1489 1489 else {
1490 1490 // sharing initilization should have already set up _klass
1491 1491 assert(_klass != NULL, "just checking");
1492 1492 }
1493 1493 #endif
1494 1494
1495 1495 _method_idnum = m->method_idnum();
1496 1496 assert(_method_idnum >= 0, "sanity check");
1497 1497 }
1498 1498
1499 1499
1500 1500 Method* LatestMethodCache::get_method() {
1501 1501 if (klass() == NULL) return NULL;
1502 1502 InstanceKlass* ik = InstanceKlass::cast(klass());
1503 1503 Method* m = ik->method_with_idnum(method_idnum());
1504 1504 assert(m != NULL, "sanity check");
1505 1505 return m;
1506 1506 }
1507 1507
1508 1508
1509 1509 #ifdef ASSERT
1510 1510 // Release dummy object(s) at bottom of heap
1511 1511 bool Universe::release_fullgc_alot_dummy() {
1512 1512 MutexLocker ml(FullGCALot_lock);
1513 1513 if (_fullgc_alot_dummy_array != NULL) {
1514 1514 if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) {
1515 1515 // No more dummies to release, release entire array instead
1516 1516 _fullgc_alot_dummy_array = NULL;
1517 1517 return false;
1518 1518 }
1519 1519 if (!UseConcMarkSweepGC) {
1520 1520 // Release dummy at bottom of old generation
1521 1521 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1522 1522 }
1523 1523 // Release dummy at bottom of permanent generation
1524 1524 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1525 1525 }
1526 1526 return true;
1527 1527 }
1528 1528
1529 1529 #endif // ASSERT
↓ open down ↓ |
718 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX