< prev index next >

src/share/vm/gc/shared/collectedHeap.cpp

Print this page




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"

  41 #include "runtime/init.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 #include "services/heapDumper.hpp"
  44 
  45 
  46 #ifdef ASSERT
  47 int CollectedHeap::_fire_out_of_memory_count = 0;
  48 #endif
  49 
  50 size_t CollectedHeap::_filler_array_max_size = 0;
  51 
  52 template <>
  53 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  54   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  55   st->print_raw(m);
  56 }
  57 
  58 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  59   if (!should_log()) {
  60     return;


 279 #ifdef ASSERT
 280 void CollectedHeap::check_for_valid_allocation_state() {
 281   Thread *thread = Thread::current();
 282   // How to choose between a pending exception and a potential
 283   // OutOfMemoryError?  Don't allow pending exceptions.
 284   // This is a VM policy failure, so how do we exhaustively test it?
 285   assert(!thread->has_pending_exception(),
 286          "shouldn't be allocating with pending exception");
 287   if (StrictSafepointChecks) {
 288     assert(thread->allow_allocation(),
 289            "Allocation done by thread for which allocation is blocked "
 290            "by No_Allocation_Verifier!");
 291     // Allocation of an oop can always invoke a safepoint,
 292     // hence, the true argument
 293     thread->check_for_valid_safepoint_state(true);
 294   }
 295 }
 296 #endif
 297 
 298 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {



















 299 
 300   // Retain tlab and allocate object in shared space if
 301   // the amount free in the tlab is too large to discard.
 302   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 303     thread->tlab().record_slow_allocation(size);
 304     return NULL;
 305   }
 306 
 307   // Discard tlab and allocate a new one.
 308   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 309   size_t new_tlab_size = thread->tlab().compute_size(size);
 310 
 311   thread->tlab().clear_before_allocation();
 312 
 313   if (new_tlab_size == 0) {
 314     return NULL;
 315   }
 316 
 317   // Allocate a new TLAB...
 318   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
 319   if (obj == NULL) {
 320     return NULL;
 321   }
 322 
 323   AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
 324 
 325   if (ZeroTLAB) {
 326     // ..and clear it.
 327     Copy::zero_to_words(obj, new_tlab_size);
 328   } else {
 329     // ...and zap just allocated object.
 330 #ifdef ASSERT
 331     // Skip mangling the space corresponding to the object header to
 332     // ensure that the returned space is not considered parsable by
 333     // any concurrent GC thread.
 334     size_t hdr_size = oopDesc::header_size();
 335     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 336 #endif // ASSERT
 337   }
 338   thread->tlab().fill(obj, obj + size, new_tlab_size);








 339   return obj;
 340 }
 341 
 342 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
 343   MemRegion deferred = thread->deferred_card_mark();
 344   if (!deferred.is_empty()) {
 345     assert(_defer_initial_card_mark, "Otherwise should be empty");
 346     {
 347       // Verify that the storage points to a parsable object in heap
 348       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 349       assert(is_in(old_obj), "Not in allocated heap");
 350       assert(!can_elide_initializing_store_barrier(old_obj),
 351              "Else should have been filtered in new_store_pre_barrier()");
 352       assert(old_obj->is_oop(true), "Not an oop");
 353       assert(deferred.word_size() == (size_t)(old_obj->size()),
 354              "Mismatch: multiple objects?");
 355     }
 356     BarrierSet* bs = barrier_set();
 357     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 358     bs->write_region(deferred);




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/heapMonitoring.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/thread.inline.hpp"
  44 #include "services/heapDumper.hpp"
  45 
  46 
  47 #ifdef ASSERT
  48 int CollectedHeap::_fire_out_of_memory_count = 0;
  49 #endif
  50 
  51 size_t CollectedHeap::_filler_array_max_size = 0;
  52 
  53 template <>
  54 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  55   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  56   st->print_raw(m);
  57 }
  58 
  59 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  60   if (!should_log()) {
  61     return;


 280 #ifdef ASSERT
 281 void CollectedHeap::check_for_valid_allocation_state() {
 282   Thread *thread = Thread::current();
 283   // How to choose between a pending exception and a potential
 284   // OutOfMemoryError?  Don't allow pending exceptions.
 285   // This is a VM policy failure, so how do we exhaustively test it?
 286   assert(!thread->has_pending_exception(),
 287          "shouldn't be allocating with pending exception");
 288   if (StrictSafepointChecks) {
 289     assert(thread->allow_allocation(),
 290            "Allocation done by thread for which allocation is blocked "
 291            "by No_Allocation_Verifier!");
 292     // Allocation of an oop can always invoke a safepoint,
 293     // hence, the true argument
 294     thread->check_for_valid_safepoint_state(true);
 295   }
 296 }
 297 #endif
 298 
 299 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 300   // We can come here for three reasons:
 301   //  - We either really did fill the tlab.
 302   //  - We pretended to everyone we did and we want to sample.
 303   //  - Both of the above reasons are true at the same time.
 304   if (thread->tlab().should_sample()) {
 305     // The tlab could still have space after this sample.
 306     thread->tlab().set_back_actual_end();
 307 
 308     // Try to allocate again: it could work now.
 309     HeapWord* obj = thread->tlab().allocate(size);
 310     if (obj != NULL) {
 311       // Object got allocated, sample it now.
 312       HeapMonitoring::object_alloc_do_sample(thread,
 313                                              reinterpret_cast<oopDesc*>(obj),
 314                                              size);
 315       thread->tlab().pick_next_sample();
 316       return obj;
 317     }
 318   }
 319 
 320   // Retain tlab and allocate object in shared space if
 321   // the amount free in the tlab is too large to discard.
 322   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 323     thread->tlab().record_slow_allocation(size);
 324     return NULL;
 325   }
 326 
 327   // Discard tlab and allocate a new one.
 328   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 329   size_t new_tlab_size = thread->tlab().compute_size(size);
 330 
 331   thread->tlab().clear_before_allocation();
 332 
 333   if (new_tlab_size == 0) {
 334     return NULL;
 335   }
 336 
 337   // Allocate a new TLAB...
 338   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
 339   if (obj == NULL) {
 340     return NULL;
 341   }
 342 
 343   AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
 344 
 345   if (ZeroTLAB) {
 346     // ..and clear it.
 347     Copy::zero_to_words(obj, new_tlab_size);
 348   } else {
 349     // ...and zap just allocated object.
 350 #ifdef ASSERT
 351     // Skip mangling the space corresponding to the object header to
 352     // ensure that the returned space is not considered parsable by
 353     // any concurrent GC thread.
 354     size_t hdr_size = oopDesc::header_size();
 355     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 356 #endif // ASSERT
 357   }
 358   thread->tlab().fill(obj, obj + size, new_tlab_size);
 359 
 360   if (thread->tlab().should_sample()) {
 361     HeapMonitoring::object_alloc_do_sample(thread,
 362                                            reinterpret_cast<oopDesc*>(obj),
 363                                            size);
 364   }
 365   // Always pick a next sample here.
 366   thread->tlab().pick_next_sample();
 367   return obj;
 368 }
 369 
 370 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
 371   MemRegion deferred = thread->deferred_card_mark();
 372   if (!deferred.is_empty()) {
 373     assert(_defer_initial_card_mark, "Otherwise should be empty");
 374     {
 375       // Verify that the storage points to a parsable object in heap
 376       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 377       assert(is_in(old_obj), "Not in allocated heap");
 378       assert(!can_elide_initializing_store_barrier(old_obj),
 379              "Else should have been filtered in new_store_pre_barrier()");
 380       assert(old_obj->is_oop(true), "Not an oop");
 381       assert(deferred.word_size() == (size_t)(old_obj->size()),
 382              "Mismatch: multiple objects?");
 383     }
 384     BarrierSet* bs = barrier_set();
 385     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 386     bs->write_region(deferred);


< prev index next >