< prev index next >

src/hotspot/share/gc/shared/collectedHeap.cpp

Print this page
rev 48551 : [mq]: heap8
rev 48552 : [mq]: heap10a
rev 48553 : [mq]: heap14_rebased
rev 48557 : [mq]: heap17


  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"

  41 #include "runtime/init.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 #include "runtime/threadSMR.hpp"
  44 #include "services/heapDumper.hpp"
  45 #include "utilities/align.hpp"
  46 
  47 
  48 #ifdef ASSERT
  49 int CollectedHeap::_fire_out_of_memory_count = 0;
  50 #endif
  51 
  52 size_t CollectedHeap::_filler_array_max_size = 0;
  53 
  54 template <>
  55 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  56   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  57   st->print_raw(m);
  58 }
  59 
  60 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {


 272 
 273 #ifdef ASSERT
 274 void CollectedHeap::check_for_valid_allocation_state() {
 275   Thread *thread = Thread::current();
 276   // How to choose between a pending exception and a potential
 277   // OutOfMemoryError?  Don't allow pending exceptions.
 278   // This is a VM policy failure, so how do we exhaustively test it?
 279   assert(!thread->has_pending_exception(),
 280          "shouldn't be allocating with pending exception");
 281   if (StrictSafepointChecks) {
 282     assert(thread->allow_allocation(),
 283            "Allocation done by thread for which allocation is blocked "
 284            "by No_Allocation_Verifier!");
 285     // Allocation of an oop can always invoke a safepoint,
 286     // hence, the true argument
 287     thread->check_for_valid_safepoint_state(true);
 288   }
 289 }
 290 #endif
 291 


















 292 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {



















 293 
 294   // Retain tlab and allocate object in shared space if
 295   // the amount free in the tlab is too large to discard.
 296   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 297     thread->tlab().record_slow_allocation(size);
 298     return NULL;
 299   }
 300 
 301   // Discard tlab and allocate a new one.
 302   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 303   size_t new_tlab_size = thread->tlab().compute_size(size);
 304 
 305   thread->tlab().clear_before_allocation();
 306 
 307   if (new_tlab_size == 0) {
 308     return NULL;
 309   }
 310 
 311   // Allocate a new TLAB...
 312   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
 313   if (obj == NULL) {
 314     return NULL;
 315   }
 316 
 317   AllocTracer::send_allocation_in_new_tlab(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, thread);
 318 
 319   if (ZeroTLAB) {
 320     // ..and clear it.
 321     Copy::zero_to_words(obj, new_tlab_size);
 322   } else {
 323     // ...and zap just allocated object.
 324 #ifdef ASSERT
 325     // Skip mangling the space corresponding to the object header to
 326     // ensure that the returned space is not considered parsable by
 327     // any concurrent GC thread.
 328     size_t hdr_size = oopDesc::header_size();
 329     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 330 #endif // ASSERT
 331   }
 332   thread->tlab().fill(obj, obj + size, new_tlab_size);





 333   return obj;
 334 }
 335 
 336 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
 337   MemRegion deferred = thread->deferred_card_mark();
 338   if (!deferred.is_empty()) {
 339     assert(_defer_initial_card_mark, "Otherwise should be empty");
 340     {
 341       // Verify that the storage points to a parsable object in heap
 342       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 343       assert(is_in(old_obj), "Not in allocated heap");
 344       assert(!can_elide_initializing_store_barrier(old_obj),
 345              "Else should have been filtered in new_store_pre_barrier()");
 346       assert(oopDesc::is_oop(old_obj, true), "Not an oop");
 347       assert(deferred.word_size() == (size_t)(old_obj->size()),
 348              "Mismatch: multiple objects?");
 349     }
 350     BarrierSet* bs = barrier_set();
 351     bs->write_region(deferred);
 352     // "Clear" the deferred_card_mark field




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/heapMonitoring.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/thread.inline.hpp"
  44 #include "runtime/threadSMR.hpp"
  45 #include "services/heapDumper.hpp"
  46 #include "utilities/align.hpp"
  47 
  48 
  49 #ifdef ASSERT
  50 int CollectedHeap::_fire_out_of_memory_count = 0;
  51 #endif
  52 
  53 size_t CollectedHeap::_filler_array_max_size = 0;
  54 
  55 template <>
  56 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  57   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  58   st->print_raw(m);
  59 }
  60 
  61 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {


 273 
 274 #ifdef ASSERT
 275 void CollectedHeap::check_for_valid_allocation_state() {
 276   Thread *thread = Thread::current();
 277   // How to choose between a pending exception and a potential
 278   // OutOfMemoryError?  Don't allow pending exceptions.
 279   // This is a VM policy failure, so how do we exhaustively test it?
 280   assert(!thread->has_pending_exception(),
 281          "shouldn't be allocating with pending exception");
 282   if (StrictSafepointChecks) {
 283     assert(thread->allow_allocation(),
 284            "Allocation done by thread for which allocation is blocked "
 285            "by No_Allocation_Verifier!");
 286     // Allocation of an oop can always invoke a safepoint,
 287     // hence, the true argument
 288     thread->check_for_valid_safepoint_state(true);
 289   }
 290 }
 291 #endif
 292 
 293 
 294 void CollectedHeap::sample_allocation(Thread* thread, HeapWord* obj,
 295                                       size_t size, size_t overflowed_words) {
 296   // Object is allocated, sample it now.
 297   HeapMonitoring::object_alloc_do_sample(thread,
 298                                          reinterpret_cast<oopDesc*>(obj),
 299                                          size * HeapWordSize);
 300   // Pick a next sample in this case, we allocated right.
 301   thread->tlab().pick_next_sample(overflowed_words);
 302 }
 303 
 304 HeapWord* CollectedHeap::allocate_sampled_object(Thread* thread, size_t size) {
 305   thread->tlab().set_back_actual_end();
 306 
 307   // The tlab could still have space after this sample.
 308   return thread->tlab().allocate(size);
 309 }
 310 
 311 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 312   // In case the tlab changes, remember if this one wanted a sample.
 313   bool should_sample = HeapMonitoring::enabled() && thread->tlab().should_sample();
 314 
 315   HeapWord* obj = NULL;
 316   if (should_sample) {
 317     // Remember the tlab end to fix up the sampling rate.
 318     HeapWord* tlab_old_end = thread->tlab().end();
 319     obj = allocate_sampled_object(thread, size);
 320 
 321     // If we did allocate in this tlab, sample it. Otherwise, we wait for the
 322     // new tlab's first allocation at the end of this method.
 323     if (obj != NULL) {
 324       // Fix sample rate by removing the extra words allocated in this last
 325       // sample.
 326       size_t overflowed_words = pointer_delta(thread->tlab().top(), tlab_old_end);
 327       sample_allocation(thread, obj, size, overflowed_words);
 328       return obj;
 329     }
 330   }
 331 
 332   // Retain tlab and allocate object in shared space if
 333   // the amount free in the tlab is too large to discard.
 334   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 335     thread->tlab().record_slow_allocation(size);
 336     return NULL;
 337   }
 338 
 339   // Discard tlab and allocate a new one.
 340   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 341   size_t new_tlab_size = thread->tlab().compute_size(size);
 342 
 343   thread->tlab().clear_before_allocation();
 344 
 345   if (new_tlab_size == 0) {
 346     return NULL;
 347   }
 348 
 349   // Allocate a new TLAB...
 350   obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
 351   if (obj == NULL) {
 352     return NULL;
 353   }
 354 
 355   AllocTracer::send_allocation_in_new_tlab(klass, obj, new_tlab_size * HeapWordSize, size * HeapWordSize, thread);
 356 
 357   if (ZeroTLAB) {
 358     // ..and clear it.
 359     Copy::zero_to_words(obj, new_tlab_size);
 360   } else {
 361     // ...and zap just allocated object.
 362 #ifdef ASSERT
 363     // Skip mangling the space corresponding to the object header to
 364     // ensure that the returned space is not considered parsable by
 365     // any concurrent GC thread.
 366     size_t hdr_size = oopDesc::header_size();
 367     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 368 #endif // ASSERT
 369   }
 370   thread->tlab().fill(obj, obj + size, new_tlab_size);
 371 
 372   // Did we initially want to sample?
 373   if (should_sample) {
 374     sample_allocation(thread, obj, size);
 375   }
 376   return obj;
 377 }
 378 
 379 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
 380   MemRegion deferred = thread->deferred_card_mark();
 381   if (!deferred.is_empty()) {
 382     assert(_defer_initial_card_mark, "Otherwise should be empty");
 383     {
 384       // Verify that the storage points to a parsable object in heap
 385       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 386       assert(is_in(old_obj), "Not in allocated heap");
 387       assert(!can_elide_initializing_store_barrier(old_obj),
 388              "Else should have been filtered in new_store_pre_barrier()");
 389       assert(oopDesc::is_oop(old_obj, true), "Not an oop");
 390       assert(deferred.word_size() == (size_t)(old_obj->size()),
 391              "Mismatch: multiple objects?");
 392     }
 393     BarrierSet* bs = barrier_set();
 394     bs->write_region(deferred);
 395     // "Clear" the deferred_card_mark field


< prev index next >