1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/heapMonitoring.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/thread.inline.hpp"
  44 #include "services/heapDumper.hpp"
  45 
  46 
  47 #ifdef ASSERT
  48 int CollectedHeap::_fire_out_of_memory_count = 0;
  49 #endif
  50 
  51 size_t CollectedHeap::_filler_array_max_size = 0;
  52 
  53 template <>
  54 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  55   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  56   st->print_raw(m);
  57 }
  58 
  59 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  60   if (!should_log()) {
  61     return;
  62   }
  63 
  64   double timestamp = fetch_timestamp();
  65   MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
  66   int index = compute_log_index();
  67   _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
  68   _records[index].timestamp = timestamp;
  69   _records[index].data.is_before = before;
  70   stringStream st(_records[index].data.buffer(), _records[index].data.size());
  71 
  72   st.print_cr("{Heap %s GC invocations=%u (full %u):",
  73                  before ? "before" : "after",
  74                  heap->total_collections(),
  75                  heap->total_full_collections());
  76 
  77   heap->print_on(&st);
  78   st.print_cr("}");
  79 }
  80 
  81 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
  82   size_t capacity_in_words = capacity() / HeapWordSize;
  83 
  84   return VirtualSpaceSummary(
  85     reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
  86 }
  87 
  88 GCHeapSummary CollectedHeap::create_heap_summary() {
  89   VirtualSpaceSummary heap_space = create_heap_space_summary();
  90   return GCHeapSummary(heap_space, used());
  91 }
  92 
  93 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
  94   const MetaspaceSizes meta_space(
  95       MetaspaceAux::committed_bytes(),
  96       MetaspaceAux::used_bytes(),
  97       MetaspaceAux::reserved_bytes());
  98   const MetaspaceSizes data_space(
  99       MetaspaceAux::committed_bytes(Metaspace::NonClassType),
 100       MetaspaceAux::used_bytes(Metaspace::NonClassType),
 101       MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
 102   const MetaspaceSizes class_space(
 103       MetaspaceAux::committed_bytes(Metaspace::ClassType),
 104       MetaspaceAux::used_bytes(Metaspace::ClassType),
 105       MetaspaceAux::reserved_bytes(Metaspace::ClassType));
 106 
 107   const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
 108     MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType);
 109   const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
 110     MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType);
 111 
 112   return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
 113                           ms_chunk_free_list_summary, class_chunk_free_list_summary);
 114 }
 115 
 116 void CollectedHeap::print_heap_before_gc() {
 117   Universe::print_heap_before_gc();
 118   if (_gc_heap_log != NULL) {
 119     _gc_heap_log->log_heap_before(this);
 120   }
 121 }
 122 
 123 void CollectedHeap::print_heap_after_gc() {
 124   Universe::print_heap_after_gc();
 125   if (_gc_heap_log != NULL) {
 126     _gc_heap_log->log_heap_after(this);
 127   }
 128 }
 129 
 130 void CollectedHeap::print_on_error(outputStream* st) const {
 131   st->print_cr("Heap:");
 132   print_extended_on(st);
 133   st->cr();
 134 
 135   _barrier_set->print_on(st);
 136 }
 137 
 138 void CollectedHeap::register_nmethod(nmethod* nm) {
 139   assert_locked_or_safepoint(CodeCache_lock);
 140 }
 141 
 142 void CollectedHeap::unregister_nmethod(nmethod* nm) {
 143   assert_locked_or_safepoint(CodeCache_lock);
 144 }
 145 
 146 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
 147   const GCHeapSummary& heap_summary = create_heap_summary();
 148   gc_tracer->report_gc_heap_summary(when, heap_summary);
 149 
 150   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
 151   gc_tracer->report_metaspace_summary(when, metaspace_summary);
 152 }
 153 
 154 void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
 155   trace_heap(GCWhen::BeforeGC, gc_tracer);
 156 }
 157 
 158 void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
 159   trace_heap(GCWhen::AfterGC, gc_tracer);
 160 }
 161 
 162 // WhiteBox API support for concurrent collectors.  These are the
 163 // default implementations, for collectors which don't support this
 164 // feature.
 165 bool CollectedHeap::supports_concurrent_phase_control() const {
 166   return false;
 167 }
 168 
 169 const char* const* CollectedHeap::concurrent_phases() const {
 170   static const char* const result[] = { NULL };
 171   return result;
 172 }
 173 
 174 bool CollectedHeap::request_concurrent_phase(const char* phase) {
 175   return false;
 176 }
 177 
 178 // Memory state functions.
 179 
 180 
 181 CollectedHeap::CollectedHeap() :
 182   _barrier_set(NULL),
 183   _is_gc_active(false),
 184   _total_collections(0),
 185   _total_full_collections(0),
 186   _gc_cause(GCCause::_no_gc),
 187   _gc_lastcause(GCCause::_no_gc),
 188   _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below.
 189 {
 190   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
 191   const size_t elements_per_word = HeapWordSize / sizeof(jint);
 192   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
 193                                              max_len / elements_per_word);
 194 
 195   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
 196   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
 197 
 198   if (UsePerfData) {
 199     EXCEPTION_MARK;
 200 
 201     // create the gc cause jvmstat counters
 202     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
 203                              80, GCCause::to_string(_gc_cause), CHECK);
 204 
 205     _perf_gc_lastcause =
 206                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
 207                              80, GCCause::to_string(_gc_lastcause), CHECK);
 208   }
 209 
 210   // Create the ring log
 211   if (LogEvents) {
 212     _gc_heap_log = new GCHeapLog();
 213   } else {
 214     _gc_heap_log = NULL;
 215   }
 216 }
 217 
 218 // This interface assumes that it's being called by the
 219 // vm thread. It collects the heap assuming that the
 220 // heap lock is already held and that we are executing in
 221 // the context of the vm thread.
 222 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
 223   assert(Thread::current()->is_VM_thread(), "Precondition#1");
 224   assert(Heap_lock->is_locked(), "Precondition#2");
 225   GCCauseSetter gcs(this, cause);
 226   switch (cause) {
 227     case GCCause::_heap_inspection:
 228     case GCCause::_heap_dump:
 229     case GCCause::_metadata_GC_threshold : {
 230       HandleMark hm;
 231       do_full_collection(false);        // don't clear all soft refs
 232       break;
 233     }
 234     case GCCause::_metadata_GC_clear_soft_refs: {
 235       HandleMark hm;
 236       do_full_collection(true);         // do clear all soft refs
 237       break;
 238     }
 239     default:
 240       ShouldNotReachHere(); // Unexpected use of this function
 241   }
 242 }
 243 
 244 void CollectedHeap::set_barrier_set(BarrierSet* barrier_set) {
 245   _barrier_set = barrier_set;
 246   oopDesc::set_bs(_barrier_set);
 247 }
 248 
 249 void CollectedHeap::pre_initialize() {
 250   // Used for ReduceInitialCardMarks (when COMPILER2 is used);
 251   // otherwise remains unused.
 252 #if defined(COMPILER2) || INCLUDE_JVMCI
 253   _defer_initial_card_mark = is_server_compilation_mode_vm() &&  ReduceInitialCardMarks && can_elide_tlab_store_barriers()
 254                              && (DeferInitialCardMark || card_mark_must_follow_store());
 255 #else
 256   assert(_defer_initial_card_mark == false, "Who would set it?");
 257 #endif
 258 }
 259 
 260 #ifndef PRODUCT
 261 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
 262   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 263     for (size_t slot = 0; slot < size; slot += 1) {
 264       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
 265              "Found badHeapWordValue in post-allocation check");
 266     }
 267   }
 268 }
 269 
 270 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
 271   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
 272     for (size_t slot = 0; slot < size; slot += 1) {
 273       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
 274              "Found non badHeapWordValue in pre-allocation check");
 275     }
 276   }
 277 }
 278 #endif // PRODUCT
 279 
 280 #ifdef ASSERT
 281 void CollectedHeap::check_for_valid_allocation_state() {
 282   Thread *thread = Thread::current();
 283   // How to choose between a pending exception and a potential
 284   // OutOfMemoryError?  Don't allow pending exceptions.
 285   // This is a VM policy failure, so how do we exhaustively test it?
 286   assert(!thread->has_pending_exception(),
 287          "shouldn't be allocating with pending exception");
 288   if (StrictSafepointChecks) {
 289     assert(thread->allow_allocation(),
 290            "Allocation done by thread for which allocation is blocked "
 291            "by No_Allocation_Verifier!");
 292     // Allocation of an oop can always invoke a safepoint,
 293     // hence, the true argument
 294     thread->check_for_valid_safepoint_state(true);
 295   }
 296 }
 297 #endif
 298 
 299 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 300   // We can come here for three reasons:
 301   //  - We either really did fill the tlab.
 302   //  - We pretended to everyone we did and we want to sample.
 303   //  - Both of the above reasons are true at the same time.
 304   if (HeapMonitoring::enabled()) {
 305     if (thread->tlab().should_sample()) {
 306       // The tlab could still have space after this sample.
 307       thread->tlab().set_back_actual_end();
 308 
 309       // Try to allocate again: it could work now.
 310       HeapWord* obj = thread->tlab().allocate(size);
 311       if (obj != NULL) {
 312         // Object got allocated, sample it now.
 313         HeapMonitoring::object_alloc_do_sample(thread,
 314                                                reinterpret_cast<oopDesc*>(obj),
 315                                                size);
 316         thread->tlab().pick_next_sample();
 317         return obj;
 318       }
 319     }
 320   }
 321 
 322   // Retain tlab and allocate object in shared space if
 323   // the amount free in the tlab is too large to discard.
 324   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 325     thread->tlab().record_slow_allocation(size);
 326     return NULL;
 327   }
 328 
 329   // Discard tlab and allocate a new one.
 330   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 331   size_t new_tlab_size = thread->tlab().compute_size(size);
 332 
 333   thread->tlab().clear_before_allocation();
 334 
 335   if (new_tlab_size == 0) {
 336     return NULL;
 337   }
 338 
 339   // Allocate a new TLAB...
 340   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
 341   if (obj == NULL) {
 342     return NULL;
 343   }
 344 
 345   AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
 346 
 347   if (ZeroTLAB) {
 348     // ..and clear it.
 349     Copy::zero_to_words(obj, new_tlab_size);
 350   } else {
 351     // ...and zap just allocated object.
 352 #ifdef ASSERT
 353     // Skip mangling the space corresponding to the object header to
 354     // ensure that the returned space is not considered parsable by
 355     // any concurrent GC thread.
 356     size_t hdr_size = oopDesc::header_size();
 357     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 358 #endif // ASSERT
 359   }
 360   thread->tlab().fill(obj, obj + size, new_tlab_size);
 361 
 362   if (HeapMonitoring::enabled()) {
 363     if (thread->tlab().should_sample()) {
 364       HeapMonitoring::object_alloc_do_sample(thread,
 365                                              reinterpret_cast<oopDesc*>(obj),
 366                                              size);
 367     }
 368     // Always pick a next sample here.
 369     thread->tlab().pick_next_sample();
 370   }
 371   return obj;
 372 }
 373 
 374 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
 375   MemRegion deferred = thread->deferred_card_mark();
 376   if (!deferred.is_empty()) {
 377     assert(_defer_initial_card_mark, "Otherwise should be empty");
 378     {
 379       // Verify that the storage points to a parsable object in heap
 380       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 381       assert(is_in(old_obj), "Not in allocated heap");
 382       assert(!can_elide_initializing_store_barrier(old_obj),
 383              "Else should have been filtered in new_store_pre_barrier()");
 384       assert(old_obj->is_oop(true), "Not an oop");
 385       assert(deferred.word_size() == (size_t)(old_obj->size()),
 386              "Mismatch: multiple objects?");
 387     }
 388     BarrierSet* bs = barrier_set();
 389     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 390     bs->write_region(deferred);
 391     // "Clear" the deferred_card_mark field
 392     thread->set_deferred_card_mark(MemRegion());
 393   }
 394   assert(thread->deferred_card_mark().is_empty(), "invariant");
 395 }
 396 
 397 size_t CollectedHeap::max_tlab_size() const {
 398   // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
 399   // This restriction could be removed by enabling filling with multiple arrays.
 400   // If we compute that the reasonable way as
 401   //    header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
 402   // we'll overflow on the multiply, so we do the divide first.
 403   // We actually lose a little by dividing first,
 404   // but that just makes the TLAB  somewhat smaller than the biggest array,
 405   // which is fine, since we'll be able to fill that.
 406   size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
 407               sizeof(jint) *
 408               ((juint) max_jint / (size_t) HeapWordSize);
 409   return align_size_down(max_int_size, MinObjAlignment);
 410 }
 411 
 412 // Helper for ReduceInitialCardMarks. For performance,
 413 // compiled code may elide card-marks for initializing stores
 414 // to a newly allocated object along the fast-path. We
 415 // compensate for such elided card-marks as follows:
 416 // (a) Generational, non-concurrent collectors, such as
 417 //     GenCollectedHeap(ParNew,DefNew,Tenured) and
 418 //     ParallelScavengeHeap(ParallelGC, ParallelOldGC)
 419 //     need the card-mark if and only if the region is
 420 //     in the old gen, and do not care if the card-mark
 421 //     succeeds or precedes the initializing stores themselves,
 422 //     so long as the card-mark is completed before the next
 423 //     scavenge. For all these cases, we can do a card mark
 424 //     at the point at which we do a slow path allocation
 425 //     in the old gen, i.e. in this call.
 426 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
 427 //     in addition that the card-mark for an old gen allocated
 428 //     object strictly follow any associated initializing stores.
 429 //     In these cases, the memRegion remembered below is
 430 //     used to card-mark the entire region either just before the next
 431 //     slow-path allocation by this thread or just before the next scavenge or
 432 //     CMS-associated safepoint, whichever of these events happens first.
 433 //     (The implicit assumption is that the object has been fully
 434 //     initialized by this point, a fact that we assert when doing the
 435 //     card-mark.)
 436 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
 437 //     G1 concurrent marking is in progress an SATB (pre-write-)barrier
 438 //     is used to remember the pre-value of any store. Initializing
 439 //     stores will not need this barrier, so we need not worry about
 440 //     compensating for the missing pre-barrier here. Turning now
 441 //     to the post-barrier, we note that G1 needs a RS update barrier
 442 //     which simply enqueues a (sequence of) dirty cards which may
 443 //     optionally be refined by the concurrent update threads. Note
 444 //     that this barrier need only be applied to a non-young write,
 445 //     but, like in CMS, because of the presence of concurrent refinement
 446 //     (much like CMS' precleaning), must strictly follow the oop-store.
 447 //     Thus, using the same protocol for maintaining the intended
 448 //     invariants turns out, serendepitously, to be the same for both
 449 //     G1 and CMS.
 450 //
 451 // For any future collector, this code should be reexamined with
 452 // that specific collector in mind, and the documentation above suitably
 453 // extended and updated.
 454 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
 455   // If a previous card-mark was deferred, flush it now.
 456   flush_deferred_store_barrier(thread);
 457   if (can_elide_initializing_store_barrier(new_obj) ||
 458       new_obj->is_typeArray()) {
 459     // Arrays of non-references don't need a pre-barrier.
 460     // The deferred_card_mark region should be empty
 461     // following the flush above.
 462     assert(thread->deferred_card_mark().is_empty(), "Error");
 463   } else {
 464     MemRegion mr((HeapWord*)new_obj, new_obj->size());
 465     assert(!mr.is_empty(), "Error");
 466     if (_defer_initial_card_mark) {
 467       // Defer the card mark
 468       thread->set_deferred_card_mark(mr);
 469     } else {
 470       // Do the card mark
 471       BarrierSet* bs = barrier_set();
 472       assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 473       bs->write_region(mr);
 474     }
 475   }
 476   return new_obj;
 477 }
 478 
 479 size_t CollectedHeap::filler_array_hdr_size() {
 480   return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
 481 }
 482 
 483 size_t CollectedHeap::filler_array_min_size() {
 484   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
 485 }
 486 
 487 #ifdef ASSERT
 488 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
 489 {
 490   assert(words >= min_fill_size(), "too small to fill");
 491   assert(words % MinObjAlignment == 0, "unaligned size");
 492   assert(Universe::heap()->is_in_reserved(start), "not in heap");
 493   assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
 494 }
 495 
 496 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
 497 {
 498   if (ZapFillerObjects && zap) {
 499     Copy::fill_to_words(start + filler_array_hdr_size(),
 500                         words - filler_array_hdr_size(), 0XDEAFBABE);
 501   }
 502 }
 503 #endif // ASSERT
 504 
 505 void
 506 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
 507 {
 508   assert(words >= filler_array_min_size(), "too small for an array");
 509   assert(words <= filler_array_max_size(), "too big for a single object");
 510 
 511   const size_t payload_size = words - filler_array_hdr_size();
 512   const size_t len = payload_size * HeapWordSize / sizeof(jint);
 513   assert((int)len >= 0, "size too large " SIZE_FORMAT " becomes %d", words, (int)len);
 514 
 515   // Set the length first for concurrent GC.
 516   ((arrayOop)start)->set_length((int)len);
 517   post_allocation_setup_common(Universe::intArrayKlassObj(), start);
 518   DEBUG_ONLY(zap_filler_array(start, words, zap);)
 519 }
 520 
 521 void
 522 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
 523 {
 524   assert(words <= filler_array_max_size(), "too big for a single object");
 525 
 526   if (words >= filler_array_min_size()) {
 527     fill_with_array(start, words, zap);
 528   } else if (words > 0) {
 529     assert(words == min_fill_size(), "unaligned size");
 530     post_allocation_setup_common(SystemDictionary::Object_klass(), start);
 531   }
 532 }
 533 
 534 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
 535 {
 536   DEBUG_ONLY(fill_args_check(start, words);)
 537   HandleMark hm;  // Free handles before leaving.
 538   fill_with_object_impl(start, words, zap);
 539 }
 540 
 541 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
 542 {
 543   DEBUG_ONLY(fill_args_check(start, words);)
 544   HandleMark hm;  // Free handles before leaving.
 545 
 546   // Multiple objects may be required depending on the filler array maximum size. Fill
 547   // the range up to that with objects that are filler_array_max_size sized. The
 548   // remainder is filled with a single object.
 549   const size_t min = min_fill_size();
 550   const size_t max = filler_array_max_size();
 551   while (words > max) {
 552     const size_t cur = (words - max) >= min ? max : max - min;
 553     fill_with_array(start, cur, zap);
 554     start += cur;
 555     words -= cur;
 556   }
 557 
 558   fill_with_object_impl(start, words, zap);
 559 }
 560 
 561 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
 562   guarantee(false, "thread-local allocation buffers not supported");
 563   return NULL;
 564 }
 565 
 566 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
 567   // The second disjunct in the assertion below makes a concession
 568   // for the start-up verification done while the VM is being
 569   // created. Callers be careful that you know that mutators
 570   // aren't going to interfere -- for instance, this is permissible
 571   // if we are still single-threaded and have either not yet
 572   // started allocating (nothing much to verify) or we have
 573   // started allocating but are now a full-fledged JavaThread
 574   // (and have thus made our TLAB's) available for filling.
 575   assert(SafepointSynchronize::is_at_safepoint() ||
 576          !is_init_completed(),
 577          "Should only be called at a safepoint or at start-up"
 578          " otherwise concurrent mutator activity may make heap "
 579          " unparsable again");
 580   const bool use_tlab = UseTLAB;
 581   const bool deferred = _defer_initial_card_mark;
 582   // The main thread starts allocating via a TLAB even before it
 583   // has added itself to the threads list at vm boot-up.
 584   assert(!use_tlab || Threads::first() != NULL,
 585          "Attempt to fill tlabs before main thread has been added"
 586          " to threads list is doomed to failure!");
 587   for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
 588      if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
 589 #if defined(COMPILER2) || INCLUDE_JVMCI
 590      // The deferred store barriers must all have been flushed to the
 591      // card-table (or other remembered set structure) before GC starts
 592      // processing the card-table (or other remembered set).
 593      if (deferred) flush_deferred_store_barrier(thread);
 594 #else
 595      assert(!deferred, "Should be false");
 596      assert(thread->deferred_card_mark().is_empty(), "Should be empty");
 597 #endif
 598   }
 599 }
 600 
 601 void CollectedHeap::accumulate_statistics_all_tlabs() {
 602   if (UseTLAB) {
 603     assert(SafepointSynchronize::is_at_safepoint() ||
 604          !is_init_completed(),
 605          "should only accumulate statistics on tlabs at safepoint");
 606 
 607     ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
 608   }
 609 }
 610 
 611 void CollectedHeap::resize_all_tlabs() {
 612   if (UseTLAB) {
 613     assert(SafepointSynchronize::is_at_safepoint() ||
 614          !is_init_completed(),
 615          "should only resize tlabs at safepoint");
 616 
 617     ThreadLocalAllocBuffer::resize_all_tlabs();
 618   }
 619 }
 620 
 621 void CollectedHeap::full_gc_dump(GCTimer* timer, bool before) {
 622   assert(timer != NULL, "timer is null");
 623   if ((HeapDumpBeforeFullGC && before) || (HeapDumpAfterFullGC && !before)) {
 624     GCTraceTime(Info, gc) tm(before ? "Heap Dump (before full gc)" : "Heap Dump (after full gc)", timer);
 625     HeapDumper::dump_heap();
 626   }
 627 
 628   Log(gc, classhisto) log;
 629   if (log.is_trace()) {
 630     GCTraceTime(Trace, gc, classhisto) tm(before ? "Class Histogram (before full gc)" : "Class Histogram (after full gc)", timer);
 631     ResourceMark rm;
 632     VM_GC_HeapInspection inspector(log.trace_stream(), false /* ! full gc */);
 633     inspector.doit();
 634   }
 635 }
 636 
 637 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
 638   full_gc_dump(timer, true);
 639 }
 640 
 641 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
 642   full_gc_dump(timer, false);
 643 }
 644 
 645 void CollectedHeap::initialize_reserved_region(HeapWord *start, HeapWord *end) {
 646   // It is important to do this in a way such that concurrent readers can't
 647   // temporarily think something is in the heap.  (Seen this happen in asserts.)
 648   _reserved.set_word_size(0);
 649   _reserved.set_start(start);
 650   _reserved.set_end(end);
 651 }