< prev index next >

src/share/vm/gc/shared/collectedHeap.cpp

Print this page




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"

  41 #include "runtime/init.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 #include "services/heapDumper.hpp"
  44 
  45 
  46 #ifdef ASSERT
  47 int CollectedHeap::_fire_out_of_memory_count = 0;
  48 #endif
  49 
  50 size_t CollectedHeap::_filler_array_max_size = 0;
  51 
  52 template <>
  53 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  54   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  55   st->print_raw(m);
  56 }
  57 
  58 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  59   if (!should_log()) {
  60     return;


 278 
 279 #ifdef ASSERT
 280 void CollectedHeap::check_for_valid_allocation_state() {
 281   Thread *thread = Thread::current();
 282   // How to choose between a pending exception and a potential
 283   // OutOfMemoryError?  Don't allow pending exceptions.
 284   // This is a VM policy failure, so how do we exhaustively test it?
 285   assert(!thread->has_pending_exception(),
 286          "shouldn't be allocating with pending exception");
 287   if (StrictSafepointChecks) {
 288     assert(thread->allow_allocation(),
 289            "Allocation done by thread for which allocation is blocked "
 290            "by No_Allocation_Verifier!");
 291     // Allocation of an oop can always invoke a safepoint,
 292     // hence, the true argument
 293     thread->check_for_valid_safepoint_state(true);
 294   }
 295 }
 296 #endif
 297 




























 298 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {





 299 
 300   // Retain tlab and allocate object in shared space if
 301   // the amount free in the tlab is too large to discard.
 302   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 303     thread->tlab().record_slow_allocation(size);
 304     return NULL;
 305   }
 306 
 307   // Discard tlab and allocate a new one.
 308   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 309   size_t new_tlab_size = thread->tlab().compute_size(size);
 310 
 311   thread->tlab().clear_before_allocation();
 312 
 313   if (new_tlab_size == 0) {
 314     return NULL;
 315   }
 316 
 317   // Allocate a new TLAB...
 318   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
 319   if (obj == NULL) {
 320     return NULL;
 321   }
 322 
 323   AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
 324 
 325   if (ZeroTLAB) {
 326     // ..and clear it.
 327     Copy::zero_to_words(obj, new_tlab_size);
 328   } else {
 329     // ...and zap just allocated object.
 330 #ifdef ASSERT
 331     // Skip mangling the space corresponding to the object header to
 332     // ensure that the returned space is not considered parsable by
 333     // any concurrent GC thread.
 334     size_t hdr_size = oopDesc::header_size();
 335     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 336 #endif // ASSERT
 337   }
 338   thread->tlab().fill(obj, obj + size, new_tlab_size);

 339   return obj;
 340 }
 341 
 342 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
 343   MemRegion deferred = thread->deferred_card_mark();
 344   if (!deferred.is_empty()) {
 345     assert(_defer_initial_card_mark, "Otherwise should be empty");
 346     {
 347       // Verify that the storage points to a parsable object in heap
 348       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 349       assert(is_in(old_obj), "Not in allocated heap");
 350       assert(!can_elide_initializing_store_barrier(old_obj),
 351              "Else should have been filtered in new_store_pre_barrier()");
 352       assert(old_obj->is_oop(true), "Not an oop");
 353       assert(deferred.word_size() == (size_t)(old_obj->size()),
 354              "Mismatch: multiple objects?");
 355     }
 356     BarrierSet* bs = barrier_set();
 357     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 358     bs->write_region(deferred);




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/heapMonitoring.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/thread.inline.hpp"
  44 #include "services/heapDumper.hpp"
  45 
  46 
  47 #ifdef ASSERT
  48 int CollectedHeap::_fire_out_of_memory_count = 0;
  49 #endif
  50 
  51 size_t CollectedHeap::_filler_array_max_size = 0;
  52 
  53 template <>
  54 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  55   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  56   st->print_raw(m);
  57 }
  58 
  59 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  60   if (!should_log()) {
  61     return;


 279 
 280 #ifdef ASSERT
 281 void CollectedHeap::check_for_valid_allocation_state() {
 282   Thread *thread = Thread::current();
 283   // How to choose between a pending exception and a potential
 284   // OutOfMemoryError?  Don't allow pending exceptions.
 285   // This is a VM policy failure, so how do we exhaustively test it?
 286   assert(!thread->has_pending_exception(),
 287          "shouldn't be allocating with pending exception");
 288   if (StrictSafepointChecks) {
 289     assert(thread->allow_allocation(),
 290            "Allocation done by thread for which allocation is blocked "
 291            "by No_Allocation_Verifier!");
 292     // Allocation of an oop can always invoke a safepoint,
 293     // hence, the true argument
 294     thread->check_for_valid_safepoint_state(true);
 295   }
 296 }
 297 #endif
 298 
 299 HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) {
 300   // We can come here for three reasons:
 301   //  - We either really did fill the tlab.
 302   //  - We pretended to everyone we did and we want to sample.
 303   //  - Both of the above reasons are true at the same time.
 304   if (HeapMonitoring::enabled()) {
 305     if (thread->tlab().should_sample()) {
 306       // If we don't have an object yet, try to allocate it.
 307       if (obj == NULL) {
 308         // The tlab could still have space after this sample.
 309         thread->tlab().set_back_actual_end();
 310         obj = thread->tlab().allocate(size);
 311       }
 312 
 313       // Is the object allocated now?
 314       if (obj != NULL) {
 315         // Object is allocated, sample it now.
 316         HeapMonitoring::object_alloc_do_sample(thread,
 317                                                reinterpret_cast<oopDesc*>(obj),
 318                                                size);
 319       }
 320     }
 321   }
 322 
 323   thread->tlab().pick_next_sample();
 324   return obj;
 325 }
 326 
 327 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 328   HeapWord* obj = handle_heap_sampling(thread, NULL, size);
 329 
 330   if (obj != NULL) {
 331     return obj;
 332   }
 333 
 334   // Retain tlab and allocate object in shared space if
 335   // the amount free in the tlab is too large to discard.
 336   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 337     thread->tlab().record_slow_allocation(size);
 338     return NULL;
 339   }
 340 
 341   // Discard tlab and allocate a new one.
 342   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 343   size_t new_tlab_size = thread->tlab().compute_size(size);
 344 
 345   thread->tlab().clear_before_allocation();
 346 
 347   if (new_tlab_size == 0) {
 348     return NULL;
 349   }
 350 
 351   // Allocate a new TLAB...
 352   obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
 353   if (obj == NULL) {
 354     return NULL;
 355   }
 356 
 357   AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
 358 
 359   if (ZeroTLAB) {
 360     // ..and clear it.
 361     Copy::zero_to_words(obj, new_tlab_size);
 362   } else {
 363     // ...and zap just allocated object.
 364 #ifdef ASSERT
 365     // Skip mangling the space corresponding to the object header to
 366     // ensure that the returned space is not considered parsable by
 367     // any concurrent GC thread.
 368     size_t hdr_size = oopDesc::header_size();
 369     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 370 #endif // ASSERT
 371   }
 372   thread->tlab().fill(obj, obj + size, new_tlab_size);
 373   handle_heap_sampling(thread, obj, size);
 374   return obj;
 375 }
 376 
 377 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
 378   MemRegion deferred = thread->deferred_card_mark();
 379   if (!deferred.is_empty()) {
 380     assert(_defer_initial_card_mark, "Otherwise should be empty");
 381     {
 382       // Verify that the storage points to a parsable object in heap
 383       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 384       assert(is_in(old_obj), "Not in allocated heap");
 385       assert(!can_elide_initializing_store_barrier(old_obj),
 386              "Else should have been filtered in new_store_pre_barrier()");
 387       assert(old_obj->is_oop(true), "Not an oop");
 388       assert(deferred.word_size() == (size_t)(old_obj->size()),
 389              "Mismatch: multiple objects?");
 390     }
 391     BarrierSet* bs = barrier_set();
 392     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 393     bs->write_region(deferred);


< prev index next >