1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "gc_implementation/shared/vmGCOperations.hpp" 28 #include "gc_interface/collectedHeap.hpp" 29 #include "gc_interface/collectedHeap.inline.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/init.hpp" 32 #include "services/heapDumper.hpp" 33 #ifdef TARGET_OS_FAMILY_linux 34 # include "thread_linux.inline.hpp" 35 #endif 36 #ifdef TARGET_OS_FAMILY_solaris 37 # include "thread_solaris.inline.hpp" 38 #endif 39 #ifdef TARGET_OS_FAMILY_windows 40 # include "thread_windows.inline.hpp" 41 #endif 42 43 44 #ifdef ASSERT 45 int CollectedHeap::_fire_out_of_memory_count = 0; 46 #endif 47 48 size_t CollectedHeap::_filler_array_max_size = 0; 49 50 // Memory state functions. 51 52 53 CollectedHeap::CollectedHeap() : _n_par_threads(0) 54 55 { 56 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); 57 const size_t elements_per_word = HeapWordSize / sizeof(jint); 58 _filler_array_max_size = align_object_size(filler_array_hdr_size() + 59 max_len * elements_per_word); 60 61 _barrier_set = NULL; 62 _is_gc_active = false; 63 _total_collections = _total_full_collections = 0; 64 _gc_cause = _gc_lastcause = GCCause::_no_gc; 65 NOT_PRODUCT(_promotion_failure_alot_count = 0;) 66 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) 67 68 if (UsePerfData) { 69 EXCEPTION_MARK; 70 71 // create the gc cause jvmstat counters 72 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", 73 80, GCCause::to_string(_gc_cause), CHECK); 74 75 _perf_gc_lastcause = 76 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 77 80, GCCause::to_string(_gc_lastcause), CHECK); 78 } 79 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below. 80 } 81 82 void CollectedHeap::pre_initialize() { 83 // Used for ReduceInitialCardMarks (when COMPILER2 is used); 84 // otherwise remains unused. 85 #ifdef COMPILER2 86 _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers() 87 && (DeferInitialCardMark || card_mark_must_follow_store()); 88 #else 89 assert(_defer_initial_card_mark == false, "Who would set it?"); 90 #endif 91 } 92 93 #ifndef PRODUCT 94 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { 95 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 96 for (size_t slot = 0; slot < size; slot += 1) { 97 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), 98 "Found badHeapWordValue in post-allocation check"); 99 } 100 } 101 } 102 103 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) 104 { 105 if (CheckMemoryInitialization && ZapUnusedHeapArea) { 106 for (size_t slot = 0; slot < size; slot += 1) { 107 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), 108 "Found non badHeapWordValue in pre-allocation check"); 109 } 110 } 111 } 112 #endif // PRODUCT 113 114 #ifdef ASSERT 115 void CollectedHeap::check_for_valid_allocation_state() { 116 Thread *thread = Thread::current(); 117 // How to choose between a pending exception and a potential 118 // OutOfMemoryError? Don't allow pending exceptions. 119 // This is a VM policy failure, so how do we exhaustively test it? 120 assert(!thread->has_pending_exception(), 121 "shouldn't be allocating with pending exception"); 122 if (StrictSafepointChecks) { 123 assert(thread->allow_allocation(), 124 "Allocation done by thread for which allocation is blocked " 125 "by No_Allocation_Verifier!"); 126 // Allocation of an oop can always invoke a safepoint, 127 // hence, the true argument 128 thread->check_for_valid_safepoint_state(true); 129 } 130 } 131 #endif 132 133 HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) { 134 135 // Retain tlab and allocate object in shared space if 136 // the amount free in the tlab is too large to discard. 137 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { 138 thread->tlab().record_slow_allocation(size); 139 return NULL; 140 } 141 142 // Discard tlab and allocate a new one. 143 // To minimize fragmentation, the last TLAB may be smaller than the rest. 144 size_t new_tlab_size = thread->tlab().compute_size(size); 145 146 thread->tlab().clear_before_allocation(); 147 148 if (new_tlab_size == 0) { 149 return NULL; 150 } 151 152 // Allocate a new TLAB... 153 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); 154 if (obj == NULL) { 155 return NULL; 156 } 157 if (ZeroTLAB) { 158 // ..and clear it. 159 Copy::zero_to_words(obj, new_tlab_size); 160 } else { 161 // ...and clear just the allocated object. 162 Copy::zero_to_words(obj, size); 163 } 164 thread->tlab().fill(obj, obj + size, new_tlab_size); 165 return obj; 166 } 167 168 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { 169 MemRegion deferred = thread->deferred_card_mark(); 170 if (!deferred.is_empty()) { 171 assert(_defer_initial_card_mark, "Otherwise should be empty"); 172 { 173 // Verify that the storage points to a parsable object in heap 174 DEBUG_ONLY(oop old_obj = oop(deferred.start());) 175 assert(is_in(old_obj), "Not in allocated heap"); 176 assert(!can_elide_initializing_store_barrier(old_obj), 177 "Else should have been filtered in new_store_pre_barrier()"); 178 assert(!is_in_permanent(old_obj), "Sanity: not expected"); 179 assert(old_obj->is_oop(true), "Not an oop"); 180 assert(old_obj->is_parsable(), "Will not be concurrently parsable"); 181 assert(deferred.word_size() == (size_t)(old_obj->size()), 182 "Mismatch: multiple objects?"); 183 } 184 BarrierSet* bs = barrier_set(); 185 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 186 bs->write_region(deferred); 187 // "Clear" the deferred_card_mark field 188 thread->set_deferred_card_mark(MemRegion()); 189 } 190 assert(thread->deferred_card_mark().is_empty(), "invariant"); 191 } 192 193 // Helper for ReduceInitialCardMarks. For performance, 194 // compiled code may elide card-marks for initializing stores 195 // to a newly allocated object along the fast-path. We 196 // compensate for such elided card-marks as follows: 197 // (a) Generational, non-concurrent collectors, such as 198 // GenCollectedHeap(ParNew,DefNew,Tenured) and 199 // ParallelScavengeHeap(ParallelGC, ParallelOldGC) 200 // need the card-mark if and only if the region is 201 // in the old gen, and do not care if the card-mark 202 // succeeds or precedes the initializing stores themselves, 203 // so long as the card-mark is completed before the next 204 // scavenge. For all these cases, we can do a card mark 205 // at the point at which we do a slow path allocation 206 // in the old gen, i.e. in this call. 207 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires 208 // in addition that the card-mark for an old gen allocated 209 // object strictly follow any associated initializing stores. 210 // In these cases, the memRegion remembered below is 211 // used to card-mark the entire region either just before the next 212 // slow-path allocation by this thread or just before the next scavenge or 213 // CMS-associated safepoint, whichever of these events happens first. 214 // (The implicit assumption is that the object has been fully 215 // initialized by this point, a fact that we assert when doing the 216 // card-mark.) 217 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a 218 // G1 concurrent marking is in progress an SATB (pre-write-)barrier is 219 // is used to remember the pre-value of any store. Initializing 220 // stores will not need this barrier, so we need not worry about 221 // compensating for the missing pre-barrier here. Turning now 222 // to the post-barrier, we note that G1 needs a RS update barrier 223 // which simply enqueues a (sequence of) dirty cards which may 224 // optionally be refined by the concurrent update threads. Note 225 // that this barrier need only be applied to a non-young write, 226 // but, like in CMS, because of the presence of concurrent refinement 227 // (much like CMS' precleaning), must strictly follow the oop-store. 228 // Thus, using the same protocol for maintaining the intended 229 // invariants turns out, serendepitously, to be the same for both 230 // G1 and CMS. 231 // 232 // For any future collector, this code should be reexamined with 233 // that specific collector in mind, and the documentation above suitably 234 // extended and updated. 235 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 236 // If a previous card-mark was deferred, flush it now. 237 flush_deferred_store_barrier(thread); 238 if (can_elide_initializing_store_barrier(new_obj)) { 239 // The deferred_card_mark region should be empty 240 // following the flush above. 241 assert(thread->deferred_card_mark().is_empty(), "Error"); 242 } else { 243 MemRegion mr((HeapWord*)new_obj, new_obj->size()); 244 assert(!mr.is_empty(), "Error"); 245 if (_defer_initial_card_mark) { 246 // Defer the card mark 247 thread->set_deferred_card_mark(mr); 248 } else { 249 // Do the card mark 250 BarrierSet* bs = barrier_set(); 251 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 252 bs->write_region(mr); 253 } 254 } 255 return new_obj; 256 } 257 258 size_t CollectedHeap::filler_array_hdr_size() { 259 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long 260 } 261 262 size_t CollectedHeap::filler_array_min_size() { 263 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment 264 } 265 266 size_t CollectedHeap::filler_array_max_size() { 267 return _filler_array_max_size; 268 } 269 270 #ifdef ASSERT 271 void CollectedHeap::fill_args_check(HeapWord* start, size_t words) 272 { 273 assert(words >= min_fill_size(), "too small to fill"); 274 assert(words % MinObjAlignment == 0, "unaligned size"); 275 assert(Universe::heap()->is_in_reserved(start), "not in heap"); 276 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); 277 } 278 279 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) 280 { 281 if (ZapFillerObjects && zap) { 282 Copy::fill_to_words(start + filler_array_hdr_size(), 283 words - filler_array_hdr_size(), 0XDEAFBABE); 284 } 285 } 286 #endif // ASSERT 287 288 void 289 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) 290 { 291 assert(words >= filler_array_min_size(), "too small for an array"); 292 assert(words <= filler_array_max_size(), "too big for a single object"); 293 294 const size_t payload_size = words - filler_array_hdr_size(); 295 const size_t len = payload_size * HeapWordSize / sizeof(jint); 296 297 // Set the length first for concurrent GC. 298 ((arrayOop)start)->set_length((int)len); 299 post_allocation_setup_common(Universe::intArrayKlassObj(), start, words); 300 DEBUG_ONLY(zap_filler_array(start, words, zap);) 301 } 302 303 void 304 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) 305 { 306 assert(words <= filler_array_max_size(), "too big for a single object"); 307 308 if (words >= filler_array_min_size()) { 309 fill_with_array(start, words, zap); 310 } else if (words > 0) { 311 assert(words == min_fill_size(), "unaligned size"); 312 post_allocation_setup_common(SystemDictionary::Object_klass(), start, 313 words); 314 } 315 } 316 317 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) 318 { 319 DEBUG_ONLY(fill_args_check(start, words);) 320 HandleMark hm; // Free handles before leaving. 321 fill_with_object_impl(start, words, zap); 322 } 323 324 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) 325 { 326 DEBUG_ONLY(fill_args_check(start, words);) 327 HandleMark hm; // Free handles before leaving. 328 329 #ifdef _LP64 330 // A single array can fill ~8G, so multiple objects are needed only in 64-bit. 331 // First fill with arrays, ensuring that any remaining space is big enough to 332 // fill. The remainder is filled with a single object. 333 const size_t min = min_fill_size(); 334 const size_t max = filler_array_max_size(); 335 while (words > max) { 336 const size_t cur = words - max >= min ? max : max - min; 337 fill_with_array(start, cur, zap); 338 start += cur; 339 words -= cur; 340 } 341 #endif 342 343 fill_with_object_impl(start, words, zap); 344 } 345 346 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { 347 guarantee(false, "thread-local allocation buffers not supported"); 348 return NULL; 349 } 350 351 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 352 // The second disjunct in the assertion below makes a concession 353 // for the start-up verification done while the VM is being 354 // created. Callers be careful that you know that mutators 355 // aren't going to interfere -- for instance, this is permissible 356 // if we are still single-threaded and have either not yet 357 // started allocating (nothing much to verify) or we have 358 // started allocating but are now a full-fledged JavaThread 359 // (and have thus made our TLAB's) available for filling. 360 assert(SafepointSynchronize::is_at_safepoint() || 361 !is_init_completed(), 362 "Should only be called at a safepoint or at start-up" 363 " otherwise concurrent mutator activity may make heap " 364 " unparsable again"); 365 const bool use_tlab = UseTLAB; 366 const bool deferred = _defer_initial_card_mark; 367 // The main thread starts allocating via a TLAB even before it 368 // has added itself to the threads list at vm boot-up. 369 assert(!use_tlab || Threads::first() != NULL, 370 "Attempt to fill tlabs before main thread has been added" 371 " to threads list is doomed to failure!"); 372 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { 373 if (use_tlab) thread->tlab().make_parsable(retire_tlabs); 374 #ifdef COMPILER2 375 // The deferred store barriers must all have been flushed to the 376 // card-table (or other remembered set structure) before GC starts 377 // processing the card-table (or other remembered set). 378 if (deferred) flush_deferred_store_barrier(thread); 379 #else 380 assert(!deferred, "Should be false"); 381 assert(thread->deferred_card_mark().is_empty(), "Should be empty"); 382 #endif 383 } 384 } 385 386 void CollectedHeap::accumulate_statistics_all_tlabs() { 387 if (UseTLAB) { 388 assert(SafepointSynchronize::is_at_safepoint() || 389 !is_init_completed(), 390 "should only accumulate statistics on tlabs at safepoint"); 391 392 ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); 393 } 394 } 395 396 void CollectedHeap::resize_all_tlabs() { 397 if (UseTLAB) { 398 assert(SafepointSynchronize::is_at_safepoint() || 399 !is_init_completed(), 400 "should only resize tlabs at safepoint"); 401 402 ThreadLocalAllocBuffer::resize_all_tlabs(); 403 } 404 } 405 406 void CollectedHeap::pre_full_gc_dump() { 407 if (HeapDumpBeforeFullGC) { 408 TraceTime tt("Heap Dump: ", PrintGCDetails, false, gclog_or_tty); 409 // We are doing a "major" collection and a heap dump before 410 // major collection has been requested. 411 HeapDumper::dump_heap(); 412 } 413 if (PrintClassHistogramBeforeFullGC) { 414 TraceTime tt("Class Histogram: ", PrintGCDetails, true, gclog_or_tty); 415 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); 416 inspector.doit(); 417 } 418 } 419 420 void CollectedHeap::post_full_gc_dump() { 421 if (HeapDumpAfterFullGC) { 422 TraceTime tt("Heap Dump", PrintGCDetails, false, gclog_or_tty); 423 HeapDumper::dump_heap(); 424 } 425 if (PrintClassHistogramAfterFullGC) { 426 TraceTime tt("Class Histogram", PrintGCDetails, true, gclog_or_tty); 427 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); 428 inspector.doit(); 429 } 430 }