1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc_implementation/shared/vmGCOperations.hpp" 28 #include "gc_interface/collectedHeap.hpp" 29 #include "gc_interface/collectedHeap.inline.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "oops/instanceMirrorKlass.hpp" 32 #include "runtime/init.hpp" 33 #include "services/heapDumper.hpp" 34 #ifdef TARGET_OS_FAMILY_linux 35 # include "thread_linux.inline.hpp" 36 #endif 37 #ifdef TARGET_OS_FAMILY_solaris 38 # include "thread_solaris.inline.hpp" 39 #endif 40 #ifdef TARGET_OS_FAMILY_windows 41 # include "thread_windows.inline.hpp" 42 #endif 43 #ifdef TARGET_OS_FAMILY_bsd 44 # include "thread_bsd.inline.hpp" 45 #endif 46 47 48 #ifdef ASSERT 49 int CollectedHeap::_fire_out_of_memory_count = 0; 50 #endif 51 52 size_t CollectedHeap::_filler_array_max_size = 0; 53 54 // Memory state functions. 55 56 57 CollectedHeap::CollectedHeap() : _n_par_threads(0) 58 59 { 60 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 61 const size_t elements_per_word = HeapWordSize / sizeof(jint); 62 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 63 max_len * elements_per_word); 64 65 _barrier_set = NULL; 66 _is_gc_active = false; 67 _total_collections = _total_full_collections = 0; 68 _gc_cause = _gc_lastcause = GCCause::_no_gc; 69 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 70 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 71 72 if (UsePerfData) { 73 EXCEPTION_MARK; 74 75 // create the gc cause jvmstat counters 76 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 77 80, GCCause::to_string(_gc_cause), CHECK); 78 79 _perf_gc_lastcause = 80 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 81 80, GCCause::to_string(_gc_lastcause), CHECK); 82 } 83 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below. 84 } 85 86 void CollectedHeap::pre_initialize() { 87 // Used for ReduceInitialCardMarks (when COMPILER2 is used); 88 // otherwise remains unused. 89 #ifdef COMPILER2 90 _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers() 91 && (DeferInitialCardMark || card_mark_must_follow_store()); 92 #else 93 assert(_defer_initial_card_mark == false, "Who would set it?"); 94 #endif 95 } 96 97 #ifndef PRODUCT 98 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { 99 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 100 for (size_t slot = 0; slot < size; slot += 1) { 101 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), 102 "Found badHeapWordValue in post-allocation check"); 103 } 104 } 105 } 106 107 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { 108 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 109 for (size_t slot = 0; slot < size; slot += 1) { 110 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 111 "Found non badHeapWordValue in pre-allocation check"); 112 } 113 } 114 } 115 #endif // PRODUCT 116 117 #ifdef ASSERT 118 void CollectedHeap::check_for_valid_allocation_state() { 119 Thread *thread = Thread::current(); 120 // How to choose between a pending exception and a potential 121 // OutOfMemoryError? Don't allow pending exceptions. 122 // This is a VM policy failure, so how do we exhaustively test it? 123 assert(!thread->has_pending_exception(), 124 "shouldn't be allocating with pending exception"); 125 if (StrictSafepointChecks) { 126 assert(thread->allow_allocation(), 127 "Allocation done by thread for which allocation is blocked " 128 "by No_Allocation_Verifier!"); 129 // Allocation of an oop can always invoke a safepoint, 130 // hence, the true argument 131 thread->check_for_valid_safepoint_state(true); 132 } 133 } 134 #endif 135 136 HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) { 137 138 // Retain tlab and allocate object in shared space if 139 // the amount free in the tlab is too large to discard. 140 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { 141 thread->tlab().record_slow_allocation(size); 142 return NULL; 143 } 144 145 // Discard tlab and allocate a new one. 146 // To minimize fragmentation, the last TLAB may be smaller than the rest. 147 size_t new_tlab_size = thread->tlab().compute_size(size); 148 149 thread->tlab().clear_before_allocation(); 150 151 if (new_tlab_size == 0) { 152 return NULL; 153 } 154 155 // Allocate a new TLAB... 156 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); 157 if (obj == NULL) { 158 return NULL; 159 } 160 if (ZeroTLAB) { 161 // ..and clear it. 162 Copy::zero_to_words(obj, new_tlab_size); 163 } else { 164 // ...and zap just allocated object. 165 #ifdef ASSERT 166 // Skip mangling the space corresponding to the object header to 167 // ensure that the returned space is not considered parsable by 168 // any concurrent GC thread. 169 size_t hdr_size = oopDesc::header_size(); 170 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); 171 #endif // ASSERT 172 } 173 thread->tlab().fill(obj, obj + size, new_tlab_size); 174 return obj; 175 } 176 177 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { 178 MemRegion deferred = thread->deferred_card_mark(); 179 if (!deferred.is_empty()) { 180 assert(_defer_initial_card_mark, "Otherwise should be empty"); 181 { 182 // Verify that the storage points to a parsable object in heap 183 DEBUG_ONLY(oop old_obj = oop(deferred.start());) 184 assert(is_in(old_obj), "Not in allocated heap"); 185 assert(!can_elide_initializing_store_barrier(old_obj), 186 "Else should have been filtered in new_store_pre_barrier()"); 187 assert(!is_in_permanent(old_obj), "Sanity: not expected"); 188 assert(old_obj->is_oop(true), "Not an oop"); 189 assert(old_obj->is_parsable(), "Will not be concurrently parsable"); 190 assert(deferred.word_size() == (size_t)(old_obj->size()), 191 "Mismatch: multiple objects?"); 192 } 193 BarrierSet* bs = barrier_set(); 194 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 195 bs->write_region(deferred); 196 // "Clear" the deferred_card_mark field 197 thread->set_deferred_card_mark(MemRegion()); 198 } 199 assert(thread->deferred_card_mark().is_empty(), "invariant"); 200 } 201 202 // Helper for ReduceInitialCardMarks. For performance, 203 // compiled code may elide card-marks for initializing stores 204 // to a newly allocated object along the fast-path. We 205 // compensate for such elided card-marks as follows: 206 // (a) Generational, non-concurrent collectors, such as 207 // GenCollectedHeap(ParNew,DefNew,Tenured) and 208 // ParallelScavengeHeap(ParallelGC, ParallelOldGC) 209 // need the card-mark if and only if the region is 210 // in the old gen, and do not care if the card-mark 211 // succeeds or precedes the initializing stores themselves, 212 // so long as the card-mark is completed before the next 213 // scavenge. For all these cases, we can do a card mark 214 // at the point at which we do a slow path allocation 215 // in the old gen, i.e. in this call. 216 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires 217 // in addition that the card-mark for an old gen allocated 218 // object strictly follow any associated initializing stores. 219 // In these cases, the memRegion remembered below is 220 // used to card-mark the entire region either just before the next 221 // slow-path allocation by this thread or just before the next scavenge or 222 // CMS-associated safepoint, whichever of these events happens first. 223 // (The implicit assumption is that the object has been fully 224 // initialized by this point, a fact that we assert when doing the 225 // card-mark.) 226 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a 227 // G1 concurrent marking is in progress an SATB (pre-write-)barrier is 228 // is used to remember the pre-value of any store. Initializing 229 // stores will not need this barrier, so we need not worry about 230 // compensating for the missing pre-barrier here. Turning now 231 // to the post-barrier, we note that G1 needs a RS update barrier 232 // which simply enqueues a (sequence of) dirty cards which may 233 // optionally be refined by the concurrent update threads. Note 234 // that this barrier need only be applied to a non-young write, 235 // but, like in CMS, because of the presence of concurrent refinement 236 // (much like CMS' precleaning), must strictly follow the oop-store. 237 // Thus, using the same protocol for maintaining the intended 238 // invariants turns out, serendepitously, to be the same for both 239 // G1 and CMS. 240 // 241 // For any future collector, this code should be reexamined with 242 // that specific collector in mind, and the documentation above suitably 243 // extended and updated. 244 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 245 // If a previous card-mark was deferred, flush it now. 246 flush_deferred_store_barrier(thread); 247 if (can_elide_initializing_store_barrier(new_obj)) { 248 // The deferred_card_mark region should be empty 249 // following the flush above. 250 assert(thread->deferred_card_mark().is_empty(), "Error"); 251 } else { 252 MemRegion mr((HeapWord*)new_obj, new_obj->size()); 253 assert(!mr.is_empty(), "Error"); 254 if (_defer_initial_card_mark) { 255 // Defer the card mark 256 thread->set_deferred_card_mark(mr); 257 } else { 258 // Do the card mark 259 BarrierSet* bs = barrier_set(); 260 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 261 bs->write_region(mr); 262 } 263 } 264 return new_obj; 265 } 266 267 size_t CollectedHeap::filler_array_hdr_size() { 268 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long 269 } 270 271 size_t CollectedHeap::filler_array_min_size() { 272 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 273 } 274 275 size_t CollectedHeap::filler_array_max_size() { 276 return _filler_array_max_size; 277 } 278 279 #ifdef ASSERT 280 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 281 { 282 assert(words >= min_fill_size(), "too small to fill"); 283 assert(words % MinObjAlignment == 0, "unaligned size"); 284 assert(Universe::heap()->is_in_reserved(start), "not in heap"); 285 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); 286 } 287 288 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 289 { 290 if (ZapFillerObjects && zap) { 291 Copy::fill_to_words(start + filler_array_hdr_size(), 292 words - filler_array_hdr_size(), 0XDEAFBABE); 293 } 294 } 295 #endif // ASSERT 296 297 void 298 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 299 { 300 assert(words >= filler_array_min_size(), "too small for an array"); 301 assert(words <= filler_array_max_size(), "too big for a single object"); 302 303 const size_t payload_size = words - filler_array_hdr_size(); 304 const size_t len = payload_size * HeapWordSize / sizeof(jint); 305 306 // Set the length first for concurrent GC. 307 ((arrayOop)start)->set_length((int)len); 308 post_allocation_setup_common(Universe::intArrayKlassObj(), start, words); 309 DEBUG_ONLY(zap_filler_array(start, words, zap);) 310 } 311 312 void 313 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 314 { 315 assert(words <= filler_array_max_size(), "too big for a single object"); 316 317 if (words >= filler_array_min_size()) { 318 fill_with_array(start, words, zap); 319 } else if (words > 0) { 320 assert(words == min_fill_size(), "unaligned size"); 321 post_allocation_setup_common(SystemDictionary::Object_klass(), start, 322 words); 323 } 324 } 325 326 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 327 { 328 DEBUG_ONLY(fill_args_check(start, words);) 329 HandleMark hm; // Free handles before leaving. 330 fill_with_object_impl(start, words, zap); 331 } 332 333 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 334 { 335 DEBUG_ONLY(fill_args_check(start, words);) 336 HandleMark hm; // Free handles before leaving. 337 338 #ifdef _LP64 339 // A single array can fill ~8G, so multiple objects are needed only in 64-bit. 340 // First fill with arrays, ensuring that any remaining space is big enough to 341 // fill. The remainder is filled with a single object. 342 const size_t min = min_fill_size(); 343 const size_t max = filler_array_max_size(); 344 while (words > max) { 345 const size_t cur = words - max >= min ? max : max - min; 346 fill_with_array(start, cur, zap); 347 start += cur; 348 words -= cur; 349 } 350 #endif 351 352 fill_with_object_impl(start, words, zap); 353 } 354 355 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { 356 guarantee(false, "thread-local allocation buffers not supported"); 357 return NULL; 358 } 359 360 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 361 // The second disjunct in the assertion below makes a concession 362 // for the start-up verification done while the VM is being 363 // created. Callers be careful that you know that mutators 364 // aren't going to interfere -- for instance, this is permissible 365 // if we are still single-threaded and have either not yet 366 // started allocating (nothing much to verify) or we have 367 // started allocating but are now a full-fledged JavaThread 368 // (and have thus made our TLAB's) available for filling. 369 assert(SafepointSynchronize::is_at_safepoint() || 370 !is_init_completed(), 371 "Should only be called at a safepoint or at start-up" 372 " otherwise concurrent mutator activity may make heap " 373 " unparsable again"); 374 const bool use_tlab = UseTLAB; 375 const bool deferred = _defer_initial_card_mark; 376 // The main thread starts allocating via a TLAB even before it 377 // has added itself to the threads list at vm boot-up. 378 assert(!use_tlab || Threads::first() != NULL, 379 "Attempt to fill tlabs before main thread has been added" 380 " to threads list is doomed to failure!"); 381 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { 382 if (use_tlab) thread->tlab().make_parsable(retire_tlabs); 383 #ifdef COMPILER2 384 // The deferred store barriers must all have been flushed to the 385 // card-table (or other remembered set structure) before GC starts 386 // processing the card-table (or other remembered set). 387 if (deferred) flush_deferred_store_barrier(thread); 388 #else 389 assert(!deferred, "Should be false"); 390 assert(thread->deferred_card_mark().is_empty(), "Should be empty"); 391 #endif 392 } 393 } 394 395 void CollectedHeap::accumulate_statistics_all_tlabs() { 396 if (UseTLAB) { 397 assert(SafepointSynchronize::is_at_safepoint() || 398 !is_init_completed(), 399 "should only accumulate statistics on tlabs at safepoint"); 400 401 ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); 402 } 403 } 404 405 void CollectedHeap::resize_all_tlabs() { 406 if (UseTLAB) { 407 assert(SafepointSynchronize::is_at_safepoint() || 408 !is_init_completed(), 409 "should only resize tlabs at safepoint"); 410 411 ThreadLocalAllocBuffer::resize_all_tlabs(); 412 } 413 } 414 415 void CollectedHeap::pre_full_gc_dump() { 416 if (HeapDumpBeforeFullGC) { 417 TraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, gclog_or_tty); 418 // We are doing a "major" collection and a heap dump before 419 // major collection has been requested. 420 HeapDumper::dump_heap(); 421 } 422 if (PrintClassHistogramBeforeFullGC) { 423 TraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, gclog_or_tty); 424 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); 425 inspector.doit(); 426 } 427 } 428 429 void CollectedHeap::post_full_gc_dump() { 430 if (HeapDumpAfterFullGC) { 431 TraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, gclog_or_tty); 432 HeapDumper::dump_heap(); 433 } 434 if (PrintClassHistogramAfterFullGC) { 435 TraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, gclog_or_tty); 436 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); 437 inspector.doit(); 438 } 439 } 440 441 oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) { 442 debug_only(check_for_valid_allocation_state()); 443 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 444 assert(size >= 0, "int won't convert to size_t"); 445 HeapWord* obj; 446 if (JavaObjectsInPerm) { 447 obj = common_permanent_mem_allocate_init(size, CHECK_NULL); 448 } else { 449 assert(ScavengeRootsInCode > 0, "must be"); 450 obj = common_mem_allocate_init(size, CHECK_NULL); 451 } 452 post_allocation_setup_common(klass, obj, size); 453 assert(Universe::is_bootstrapping() || 454 !((oop)obj)->blueprint()->oop_is_array(), "must not be an array"); 455 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 456 oop mirror = (oop)obj; 457 458 java_lang_Class::set_oop_size(mirror, size); 459 460 // Setup indirections 461 if (!real_klass.is_null()) { 462 java_lang_Class::set_klass(mirror, real_klass()); 463 real_klass->set_java_mirror(mirror); 464 } 465 466 instanceMirrorKlass* mk = instanceMirrorKlass::cast(mirror->klass()); 467 assert(size == mk->instance_size(real_klass), "should have been set"); 468 469 // notify jvmti and dtrace 470 post_allocation_notify(klass, (oop)obj); 471 472 return mirror; 473 } 474 475 /////////////// Unit tests /////////////// 476 477 #ifndef PRODUCT 478 void CollectedHeap::test_is_in() { 479 CollectedHeap* heap = Universe::heap(); 480 481 // Test that NULL is not in the heap. 482 assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap"); 483 484 // Test that a pointer to before the heap start is reported as outside the heap. 485 assert(heap->_reserved.start() >= (void*)MinObjAlignment, "sanity"); 486 void* before_heap = (void*)((intptr_t)heap->_reserved.start() - MinObjAlignment); 487 assert(!heap->is_in(before_heap), 488 err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", before_heap)); 489 490 // Test that a pointer to after the heap end is reported as outside the heap. 491 assert(heap->_reserved.end() <= (void*)(uintptr_t(-1) - (uint)MinObjAlignment), "sanity"); 492 void* after_heap = (void*)((intptr_t)heap->_reserved.end() + MinObjAlignment); 493 assert(!heap->is_in(after_heap), 494 err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", after_heap)); 495 } 496 #endif