1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_collectedHeap.cpp.incl"
  27 
  28 
  29 #ifdef ASSERT
  30 int CollectedHeap::_fire_out_of_memory_count = 0;
  31 #endif
  32 
  33 size_t CollectedHeap::_filler_array_max_size = 0;
  34 
  35 // Memory state functions.
  36 
  37 
  38 CollectedHeap::CollectedHeap() : _n_par_threads(0)
  39 
  40 {
  41   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
  42   const size_t elements_per_word = HeapWordSize / sizeof(jint);
  43   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
  44                                              max_len * elements_per_word);
  45 
  46   _barrier_set = NULL;
  47   _is_gc_active = false;
  48   _total_collections = _total_full_collections = 0;
  49   _gc_cause = _gc_lastcause = GCCause::_no_gc;
  50   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
  51   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
  52 
  53   if (UsePerfData) {
  54     EXCEPTION_MARK;
  55 
  56     // create the gc cause jvmstat counters
  57     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
  58                              80, GCCause::to_string(_gc_cause), CHECK);
  59 
  60     _perf_gc_lastcause =
  61                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
  62                              80, GCCause::to_string(_gc_lastcause), CHECK);
  63   }
  64   _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
  65 }
  66 
  67 void CollectedHeap::pre_initialize() {
  68   // Used for ReduceInitialCardMarks (when COMPILER2 is used);
  69   // otherwise remains unused.
  70 #ifdef COMPILER2
  71   _defer_initial_card_mark =    ReduceInitialCardMarks && can_elide_tlab_store_barriers()
  72                              && (DeferInitialCardMark || card_mark_must_follow_store());
  73 #else
  74   assert(_defer_initial_card_mark == false, "Who would set it?");
  75 #endif
  76 }
  77 
  78 #ifndef PRODUCT
  79 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
  80   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
  81     for (size_t slot = 0; slot < size; slot += 1) {
  82       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
  83              "Found badHeapWordValue in post-allocation check");
  84     }
  85   }
  86 }
  87 
  88 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
  89  {
  90   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
  91     for (size_t slot = 0; slot < size; slot += 1) {
  92       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
  93              "Found non badHeapWordValue in pre-allocation check");
  94     }
  95   }
  96 }
  97 #endif // PRODUCT
  98 
  99 #ifdef ASSERT
 100 void CollectedHeap::check_for_valid_allocation_state() {
 101   Thread *thread = Thread::current();
 102   // How to choose between a pending exception and a potential
 103   // OutOfMemoryError?  Don't allow pending exceptions.
 104   // This is a VM policy failure, so how do we exhaustively test it?
 105   assert(!thread->has_pending_exception(),
 106          "shouldn't be allocating with pending exception");
 107   if (StrictSafepointChecks) {
 108     assert(thread->allow_allocation(),
 109            "Allocation done by thread for which allocation is blocked "
 110            "by No_Allocation_Verifier!");
 111     // Allocation of an oop can always invoke a safepoint,
 112     // hence, the true argument
 113     thread->check_for_valid_safepoint_state(true);
 114   }
 115 }
 116 #endif
 117 
 118 HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
 119 
 120   // Retain tlab and allocate object in shared space if
 121   // the amount free in the tlab is too large to discard.
 122   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 123     thread->tlab().record_slow_allocation(size);
 124     return NULL;
 125   }
 126 
 127   // Discard tlab and allocate a new one.
 128   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 129   size_t new_tlab_size = thread->tlab().compute_size(size);
 130 
 131   thread->tlab().clear_before_allocation();
 132 
 133   if (new_tlab_size == 0) {
 134     return NULL;
 135   }
 136 
 137   // Allocate a new TLAB...
 138   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
 139   if (obj == NULL) {
 140     return NULL;
 141   }
 142   if (ZeroTLAB) {
 143     // ..and clear it.
 144     Copy::zero_to_words(obj, new_tlab_size);
 145   } else {
 146     // ...and clear just the allocated object.
 147     Copy::zero_to_words(obj, size);
 148   }
 149   thread->tlab().fill(obj, obj + size, new_tlab_size);
 150   return obj;
 151 }
 152 
 153 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
 154   MemRegion deferred = thread->deferred_card_mark();
 155   if (!deferred.is_empty()) {
 156     assert(_defer_initial_card_mark, "Otherwise should be empty");
 157     {
 158       // Verify that the storage points to a parsable object in heap
 159       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 160       assert(is_in(old_obj), "Not in allocated heap");
 161       assert(!can_elide_initializing_store_barrier(old_obj),
 162              "Else should have been filtered in new_store_pre_barrier()");
 163       assert(!is_in_permanent(old_obj), "Sanity: not expected");
 164       assert(old_obj->is_oop(true), "Not an oop");
 165       assert(old_obj->is_parsable(), "Will not be concurrently parsable");
 166       assert(deferred.word_size() == (size_t)(old_obj->size()),
 167              "Mismatch: multiple objects?");
 168     }
 169     BarrierSet* bs = barrier_set();
 170     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 171     bs->write_region(deferred);
 172     // "Clear" the deferred_card_mark field
 173     thread->set_deferred_card_mark(MemRegion());
 174   }
 175   assert(thread->deferred_card_mark().is_empty(), "invariant");
 176 }
 177 
 178 // Helper for ReduceInitialCardMarks. For performance,
 179 // compiled code may elide card-marks for initializing stores
 180 // to a newly allocated object along the fast-path. We
 181 // compensate for such elided card-marks as follows:
 182 // (a) Generational, non-concurrent collectors, such as
 183 //     GenCollectedHeap(ParNew,DefNew,Tenured) and
 184 //     ParallelScavengeHeap(ParallelGC, ParallelOldGC)
 185 //     need the card-mark if and only if the region is
 186 //     in the old gen, and do not care if the card-mark
 187 //     succeeds or precedes the initializing stores themselves,
 188 //     so long as the card-mark is completed before the next
 189 //     scavenge. For all these cases, we can do a card mark
 190 //     at the point at which we do a slow path allocation
 191 //     in the old gen, i.e. in this call.
 192 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
 193 //     in addition that the card-mark for an old gen allocated
 194 //     object strictly follow any associated initializing stores.
 195 //     In these cases, the memRegion remembered below is
 196 //     used to card-mark the entire region either just before the next
 197 //     slow-path allocation by this thread or just before the next scavenge or
 198 //     CMS-associated safepoint, whichever of these events happens first.
 199 //     (The implicit assumption is that the object has been fully
 200 //     initialized by this point, a fact that we assert when doing the
 201 //     card-mark.)
 202 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
 203 //     G1 concurrent marking is in progress an SATB (pre-write-)barrier is
 204 //     is used to remember the pre-value of any store. Initializing
 205 //     stores will not need this barrier, so we need not worry about
 206 //     compensating for the missing pre-barrier here. Turning now
 207 //     to the post-barrier, we note that G1 needs a RS update barrier
 208 //     which simply enqueues a (sequence of) dirty cards which may
 209 //     optionally be refined by the concurrent update threads. Note
 210 //     that this barrier need only be applied to a non-young write,
 211 //     but, like in CMS, because of the presence of concurrent refinement
 212 //     (much like CMS' precleaning), must strictly follow the oop-store.
 213 //     Thus, using the same protocol for maintaining the intended
 214 //     invariants turns out, serendepitously, to be the same for both
 215 //     G1 and CMS.
 216 //
 217 // For any future collector, this code should be reexamined with
 218 // that specific collector in mind, and the documentation above suitably
 219 // extended and updated.
 220 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
 221   // If a previous card-mark was deferred, flush it now.
 222   flush_deferred_store_barrier(thread);
 223   if (can_elide_initializing_store_barrier(new_obj)) {
 224     // The deferred_card_mark region should be empty
 225     // following the flush above.
 226     assert(thread->deferred_card_mark().is_empty(), "Error");
 227   } else {
 228     MemRegion mr((HeapWord*)new_obj, new_obj->size());
 229     assert(!mr.is_empty(), "Error");
 230     if (_defer_initial_card_mark) {
 231       // Defer the card mark
 232       thread->set_deferred_card_mark(mr);
 233     } else {
 234       // Do the card mark
 235       BarrierSet* bs = barrier_set();
 236       assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 237       bs->write_region(mr);
 238     }
 239   }
 240   return new_obj;
 241 }
 242 
 243 size_t CollectedHeap::filler_array_hdr_size() {
 244   return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
 245 }
 246 
 247 size_t CollectedHeap::filler_array_min_size() {
 248   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
 249 }
 250 
 251 size_t CollectedHeap::filler_array_max_size() {
 252   return _filler_array_max_size;
 253 }
 254 
 255 #ifdef ASSERT
 256 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
 257 {
 258   assert(words >= min_fill_size(), "too small to fill");
 259   assert(words % MinObjAlignment == 0, "unaligned size");
 260   assert(Universe::heap()->is_in_reserved(start), "not in heap");
 261   assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
 262 }
 263 
 264 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
 265 {
 266   if (ZapFillerObjects && zap) {
 267     Copy::fill_to_words(start + filler_array_hdr_size(),
 268                         words - filler_array_hdr_size(), 0XDEAFBABE);
 269   }
 270 }
 271 #endif // ASSERT
 272 
 273 void
 274 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
 275 {
 276   assert(words >= filler_array_min_size(), "too small for an array");
 277   assert(words <= filler_array_max_size(), "too big for a single object");
 278 
 279   const size_t payload_size = words - filler_array_hdr_size();
 280   const size_t len = payload_size * HeapWordSize / sizeof(jint);
 281 
 282   // Set the length first for concurrent GC.
 283   ((arrayOop)start)->set_length((int)len);
 284   post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
 285   DEBUG_ONLY(zap_filler_array(start, words, zap);)
 286 }
 287 
 288 void
 289 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
 290 {
 291   assert(words <= filler_array_max_size(), "too big for a single object");
 292 
 293   if (words >= filler_array_min_size()) {
 294     fill_with_array(start, words, zap);
 295   } else if (words > 0) {
 296     assert(words == min_fill_size(), "unaligned size");
 297     post_allocation_setup_common(SystemDictionary::Object_klass(), start,
 298                                  words);
 299   }
 300 }
 301 
 302 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
 303 {
 304   DEBUG_ONLY(fill_args_check(start, words);)
 305   HandleMark hm;  // Free handles before leaving.
 306   fill_with_object_impl(start, words, zap);
 307 }
 308 
 309 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
 310 {
 311   DEBUG_ONLY(fill_args_check(start, words);)
 312   HandleMark hm;  // Free handles before leaving.
 313 
 314 #ifdef _LP64
 315   // A single array can fill ~8G, so multiple objects are needed only in 64-bit.
 316   // First fill with arrays, ensuring that any remaining space is big enough to
 317   // fill.  The remainder is filled with a single object.
 318   const size_t min = min_fill_size();
 319   const size_t max = filler_array_max_size();
 320   while (words > max) {
 321     const size_t cur = words - max >= min ? max : max - min;
 322     fill_with_array(start, cur, zap);
 323     start += cur;
 324     words -= cur;
 325   }
 326 #endif
 327 
 328   fill_with_object_impl(start, words, zap);
 329 }
 330 
 331 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
 332   guarantee(false, "thread-local allocation buffers not supported");
 333   return NULL;
 334 }
 335 
 336 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 337   // The second disjunct in the assertion below makes a concession
 338   // for the start-up verification done while the VM is being
 339   // created. Callers be careful that you know that mutators
 340   // aren't going to interfere -- for instance, this is permissible
 341   // if we are still single-threaded and have either not yet
 342   // started allocating (nothing much to verify) or we have
 343   // started allocating but are now a full-fledged JavaThread
 344   // (and have thus made our TLAB's) available for filling.
 345   assert(SafepointSynchronize::is_at_safepoint() ||
 346          !is_init_completed(),
 347          "Should only be called at a safepoint or at start-up"
 348          " otherwise concurrent mutator activity may make heap "
 349          " unparsable again");
 350   const bool use_tlab = UseTLAB;
 351   const bool deferred = _defer_initial_card_mark;
 352   // The main thread starts allocating via a TLAB even before it
 353   // has added itself to the threads list at vm boot-up.
 354   assert(!use_tlab || Threads::first() != NULL,
 355          "Attempt to fill tlabs before main thread has been added"
 356          " to threads list is doomed to failure!");
 357   for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
 358      if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
 359 #ifdef COMPILER2
 360      // The deferred store barriers must all have been flushed to the
 361      // card-table (or other remembered set structure) before GC starts
 362      // processing the card-table (or other remembered set).
 363      if (deferred) flush_deferred_store_barrier(thread);
 364 #else
 365      assert(!deferred, "Should be false");
 366      assert(thread->deferred_card_mark().is_empty(), "Should be empty");
 367 #endif
 368   }
 369 }
 370 
 371 void CollectedHeap::accumulate_statistics_all_tlabs() {
 372   if (UseTLAB) {
 373     assert(SafepointSynchronize::is_at_safepoint() ||
 374          !is_init_completed(),
 375          "should only accumulate statistics on tlabs at safepoint");
 376 
 377     ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
 378   }
 379 }
 380 
 381 void CollectedHeap::resize_all_tlabs() {
 382   if (UseTLAB) {
 383     assert(SafepointSynchronize::is_at_safepoint() ||
 384          !is_init_completed(),
 385          "should only resize tlabs at safepoint");
 386 
 387     ThreadLocalAllocBuffer::resize_all_tlabs();
 388   }
 389 }
 390 
 391 void CollectedHeap::pre_full_gc_dump() {
 392   if (HeapDumpBeforeFullGC) {
 393     TraceTime tt("Heap Dump: ", PrintGCDetails, false, gclog_or_tty);
 394     // We are doing a "major" collection and a heap dump before
 395     // major collection has been requested.
 396     HeapDumper::dump_heap();
 397   }
 398   if (PrintClassHistogramBeforeFullGC) {
 399     TraceTime tt("Class Histogram: ", PrintGCDetails, true, gclog_or_tty);
 400     VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
 401     inspector.doit();
 402   }
 403 }
 404 
 405 void CollectedHeap::post_full_gc_dump() {
 406   if (HeapDumpAfterFullGC) {
 407     TraceTime tt("Heap Dump", PrintGCDetails, false, gclog_or_tty);
 408     HeapDumper::dump_heap();
 409   }
 410   if (PrintClassHistogramAfterFullGC) {
 411     TraceTime tt("Class Histogram", PrintGCDetails, true, gclog_or_tty);
 412     VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
 413     inspector.doit();
 414   }
 415 }