< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page
rev 47223 : [mq]: heapz8
rev 47224 : [mq]: heap9a


  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"

  41 #include "runtime/init.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 #include "services/heapDumper.hpp"
  44 #include "utilities/align.hpp"
  45 
  46 
  47 #ifdef ASSERT
  48 int CollectedHeap::_fire_out_of_memory_count = 0;
  49 #endif
  50 
  51 size_t CollectedHeap::_filler_array_max_size = 0;
  52 
  53 template <>
  54 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  55   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  56   st->print_raw(m);
  57 }
  58 
  59 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  60   if (!should_log()) {


 279 
 280 #ifdef ASSERT
 281 void CollectedHeap::check_for_valid_allocation_state() {
 282   Thread *thread = Thread::current();
 283   // How to choose between a pending exception and a potential
 284   // OutOfMemoryError?  Don't allow pending exceptions.
 285   // This is a VM policy failure, so how do we exhaustively test it?
 286   assert(!thread->has_pending_exception(),
 287          "shouldn't be allocating with pending exception");
 288   if (StrictSafepointChecks) {
 289     assert(thread->allow_allocation(),
 290            "Allocation done by thread for which allocation is blocked "
 291            "by No_Allocation_Verifier!");
 292     // Allocation of an oop can always invoke a safepoint,
 293     // hence, the true argument
 294     thread->check_for_valid_safepoint_state(true);
 295   }
 296 }
 297 #endif
 298 

































 299 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {






 300 
 301   // Retain tlab and allocate object in shared space if
 302   // the amount free in the tlab is too large to discard.
 303   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 304     thread->tlab().record_slow_allocation(size);
 305     return NULL;
 306   }
 307 
 308   // Discard tlab and allocate a new one.
 309   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 310   size_t new_tlab_size = thread->tlab().compute_size(size);
 311 
 312   thread->tlab().clear_before_allocation();
 313 
 314   if (new_tlab_size == 0) {
 315     return NULL;
 316   }
 317 
 318   // Allocate a new TLAB...
 319   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
 320   if (obj == NULL) {
 321     return NULL;
 322   }
 323 
 324   AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
 325 
 326   if (ZeroTLAB) {
 327     // ..and clear it.
 328     Copy::zero_to_words(obj, new_tlab_size);
 329   } else {
 330     // ...and zap just allocated object.
 331 #ifdef ASSERT
 332     // Skip mangling the space corresponding to the object header to
 333     // ensure that the returned space is not considered parsable by
 334     // any concurrent GC thread.
 335     size_t hdr_size = oopDesc::header_size();
 336     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 337 #endif // ASSERT
 338   }
 339   thread->tlab().fill(obj, obj + size, new_tlab_size);




 340   return obj;

 341 }
 342 
 343 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
 344   MemRegion deferred = thread->deferred_card_mark();
 345   if (!deferred.is_empty()) {
 346     assert(_defer_initial_card_mark, "Otherwise should be empty");
 347     {
 348       // Verify that the storage points to a parsable object in heap
 349       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 350       assert(is_in(old_obj), "Not in allocated heap");
 351       assert(!can_elide_initializing_store_barrier(old_obj),
 352              "Else should have been filtered in new_store_pre_barrier()");
 353       assert(oopDesc::is_oop(old_obj, true), "Not an oop");
 354       assert(deferred.word_size() == (size_t)(old_obj->size()),
 355              "Mismatch: multiple objects?");
 356     }
 357     BarrierSet* bs = barrier_set();
 358     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 359     bs->write_region(deferred);
 360     // "Clear" the deferred_card_mark field




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/heapMonitoring.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/thread.inline.hpp"
  44 #include "services/heapDumper.hpp"
  45 #include "utilities/align.hpp"
  46 
  47 
  48 #ifdef ASSERT
  49 int CollectedHeap::_fire_out_of_memory_count = 0;
  50 #endif
  51 
  52 size_t CollectedHeap::_filler_array_max_size = 0;
  53 
  54 template <>
  55 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  56   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  57   st->print_raw(m);
  58 }
  59 
  60 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  61   if (!should_log()) {


 280 
 281 #ifdef ASSERT
 282 void CollectedHeap::check_for_valid_allocation_state() {
 283   Thread *thread = Thread::current();
 284   // How to choose between a pending exception and a potential
 285   // OutOfMemoryError?  Don't allow pending exceptions.
 286   // This is a VM policy failure, so how do we exhaustively test it?
 287   assert(!thread->has_pending_exception(),
 288          "shouldn't be allocating with pending exception");
 289   if (StrictSafepointChecks) {
 290     assert(thread->allow_allocation(),
 291            "Allocation done by thread for which allocation is blocked "
 292            "by No_Allocation_Verifier!");
 293     // Allocation of an oop can always invoke a safepoint,
 294     // hence, the true argument
 295     thread->check_for_valid_safepoint_state(true);
 296   }
 297 }
 298 #endif
 299 
 300 HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) {
 301   // We can come here for three reasons:
 302   //  - We either really did fill the tlab.
 303   //  - We pretended to everyone we did and we want to sample.
 304   //  - Both of the above reasons are true at the same time.
 305   if (HeapMonitoring::enabled()) {
 306     if (thread->tlab().should_sample()) {
 307       HeapWord *end = thread->tlab().end();
 308       thread->tlab().set_back_actual_end();
 309 
 310       // If we don't have an object yet, try to allocate it.
 311       if (obj == NULL) {
 312         // The tlab could still have space after this sample.
 313         obj = thread->tlab().allocate(size);
 314       }
 315 
 316       // Is the object allocated now?
 317       // If not, this means we have to wait till a new TLAB, let the subsequent
 318       // call to handle_heap_sampling pick the next sample.
 319       if (obj != NULL) {
 320         // Object is allocated, sample it now.
 321         HeapMonitoring::object_alloc_do_sample(thread,
 322                                                reinterpret_cast<oopDesc*>(obj),
 323                                                size * HeapWordSize);
 324         // Pick a next sample in this case, we allocated right.
 325         thread->tlab().pick_next_sample(thread->tlab().top() - end);
 326       }
 327     }
 328   }
 329 
 330   return obj;
 331 }
 332 
 333 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 334   HeapWord* obj = handle_heap_sampling(thread, NULL, size);
 335   bool should_sample = thread->tlab().should_sample();
 336 
 337   if (obj != NULL) {
 338     return obj;
 339   }
 340 
 341   // Retain tlab and allocate object in shared space if
 342   // the amount free in the tlab is too large to discard.
 343   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 344     thread->tlab().record_slow_allocation(size);
 345     return NULL;
 346   }
 347 
 348   // Discard tlab and allocate a new one.
 349   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 350   size_t new_tlab_size = thread->tlab().compute_size(size);
 351 
 352   thread->tlab().clear_before_allocation();
 353 
 354   if (new_tlab_size == 0) {
 355     return NULL;
 356   }
 357 
 358   // Allocate a new TLAB...
 359   obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
 360   if (obj == NULL) {
 361     return NULL;
 362   }
 363 
 364   AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
 365 
 366   if (ZeroTLAB) {
 367     // ..and clear it.
 368     Copy::zero_to_words(obj, new_tlab_size);
 369   } else {
 370     // ...and zap just allocated object.
 371 #ifdef ASSERT
 372     // Skip mangling the space corresponding to the object header to
 373     // ensure that the returned space is not considered parsable by
 374     // any concurrent GC thread.
 375     size_t hdr_size = oopDesc::header_size();
 376     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 377 #endif // ASSERT
 378   }
 379   thread->tlab().fill(obj, obj + size, new_tlab_size);
 380 
 381   if (should_sample) {
 382     return handle_heap_sampling(thread, obj, size);
 383   } else {
 384     return obj;
 385   }
 386 }
 387 
 388 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
 389   MemRegion deferred = thread->deferred_card_mark();
 390   if (!deferred.is_empty()) {
 391     assert(_defer_initial_card_mark, "Otherwise should be empty");
 392     {
 393       // Verify that the storage points to a parsable object in heap
 394       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 395       assert(is_in(old_obj), "Not in allocated heap");
 396       assert(!can_elide_initializing_store_barrier(old_obj),
 397              "Else should have been filtered in new_store_pre_barrier()");
 398       assert(oopDesc::is_oop(old_obj, true), "Not an oop");
 399       assert(deferred.word_size() == (size_t)(old_obj->size()),
 400              "Mismatch: multiple objects?");
 401     }
 402     BarrierSet* bs = barrier_set();
 403     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 404     bs->write_region(deferred);
 405     // "Clear" the deferred_card_mark field


< prev index next >