< prev index next >

src/share/vm/gc/shared/collectedHeap.cpp

Print this page
rev 13139 : [mq]: heap7
rev 13140 : [mq]: heapz8


  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"

  41 #include "runtime/init.hpp"
  42 #include "runtime/thread.inline.hpp"
  43 #include "services/heapDumper.hpp"
  44 
  45 
  46 #ifdef ASSERT
  47 int CollectedHeap::_fire_out_of_memory_count = 0;
  48 #endif
  49 
  50 size_t CollectedHeap::_filler_array_max_size = 0;
  51 
  52 template <>
  53 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  54   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  55   st->print_raw(m);
  56 }
  57 
  58 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  59   if (!should_log()) {
  60     return;


 278 
 279 #ifdef ASSERT
 280 void CollectedHeap::check_for_valid_allocation_state() {
 281   Thread *thread = Thread::current();
 282   // How to choose between a pending exception and a potential
 283   // OutOfMemoryError?  Don't allow pending exceptions.
 284   // This is a VM policy failure, so how do we exhaustively test it?
 285   assert(!thread->has_pending_exception(),
 286          "shouldn't be allocating with pending exception");
 287   if (StrictSafepointChecks) {
 288     assert(thread->allow_allocation(),
 289            "Allocation done by thread for which allocation is blocked "
 290            "by No_Allocation_Verifier!");
 291     // Allocation of an oop can always invoke a safepoint,
 292     // hence, the true argument
 293     thread->check_for_valid_safepoint_state(true);
 294   }
 295 }
 296 #endif
 297 































 298 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {





 299 
 300   // Retain tlab and allocate object in shared space if
 301   // the amount free in the tlab is too large to discard.
 302   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 303     thread->tlab().record_slow_allocation(size);
 304     return NULL;
 305   }
 306 
 307   // Discard tlab and allocate a new one.
 308   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 309   size_t new_tlab_size = thread->tlab().compute_size(size);
 310 
 311   thread->tlab().clear_before_allocation();
 312 
 313   if (new_tlab_size == 0) {
 314     return NULL;
 315   }
 316 
 317   // Allocate a new TLAB...
 318   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
 319   if (obj == NULL) {
 320     return NULL;
 321   }
 322 
 323   AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
 324 
 325   if (ZeroTLAB) {
 326     // ..and clear it.
 327     Copy::zero_to_words(obj, new_tlab_size);
 328   } else {
 329     // ...and zap just allocated object.
 330 #ifdef ASSERT
 331     // Skip mangling the space corresponding to the object header to
 332     // ensure that the returned space is not considered parsable by
 333     // any concurrent GC thread.
 334     size_t hdr_size = oopDesc::header_size();
 335     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 336 #endif // ASSERT
 337   }
 338   thread->tlab().fill(obj, obj + size, new_tlab_size);

 339   return obj;
 340 }
 341 
 342 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
 343   MemRegion deferred = thread->deferred_card_mark();
 344   if (!deferred.is_empty()) {
 345     assert(_defer_initial_card_mark, "Otherwise should be empty");
 346     {
 347       // Verify that the storage points to a parsable object in heap
 348       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 349       assert(is_in(old_obj), "Not in allocated heap");
 350       assert(!can_elide_initializing_store_barrier(old_obj),
 351              "Else should have been filtered in new_store_pre_barrier()");
 352       assert(old_obj->is_oop(true), "Not an oop");
 353       assert(deferred.word_size() == (size_t)(old_obj->size()),
 354              "Mismatch: multiple objects?");
 355     }
 356     BarrierSet* bs = barrier_set();
 357     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 358     bs->write_region(deferred);




  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/shared/allocTracer.hpp"
  28 #include "gc/shared/barrierSet.inline.hpp"
  29 #include "gc/shared/collectedHeap.hpp"
  30 #include "gc/shared/collectedHeap.inline.hpp"
  31 #include "gc/shared/gcHeapSummary.hpp"
  32 #include "gc/shared/gcTrace.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/gcWhen.hpp"
  35 #include "gc/shared/vmGCOperations.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/metaspace.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "oops/instanceMirrorKlass.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/heapMonitoring.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/thread.inline.hpp"
  44 #include "services/heapDumper.hpp"
  45 
  46 
  47 #ifdef ASSERT
  48 int CollectedHeap::_fire_out_of_memory_count = 0;
  49 #endif
  50 
  51 size_t CollectedHeap::_filler_array_max_size = 0;
  52 
  53 template <>
  54 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
  55   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
  56   st->print_raw(m);
  57 }
  58 
  59 void GCHeapLog::log_heap(CollectedHeap* heap, bool before) {
  60   if (!should_log()) {
  61     return;


 279 
 280 #ifdef ASSERT
 281 void CollectedHeap::check_for_valid_allocation_state() {
 282   Thread *thread = Thread::current();
 283   // How to choose between a pending exception and a potential
 284   // OutOfMemoryError?  Don't allow pending exceptions.
 285   // This is a VM policy failure, so how do we exhaustively test it?
 286   assert(!thread->has_pending_exception(),
 287          "shouldn't be allocating with pending exception");
 288   if (StrictSafepointChecks) {
 289     assert(thread->allow_allocation(),
 290            "Allocation done by thread for which allocation is blocked "
 291            "by No_Allocation_Verifier!");
 292     // Allocation of an oop can always invoke a safepoint,
 293     // hence, the true argument
 294     thread->check_for_valid_safepoint_state(true);
 295   }
 296 }
 297 #endif
 298 
 299 HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) {
 300   // We can come here for three reasons:
 301   //  - We either really did fill the tlab.
 302   //  - We pretended to everyone we did and we want to sample.
 303   //  - Both of the above reasons are true at the same time.
 304   if (HeapMonitoring::enabled()) {
 305     if (thread->tlab().should_sample()) {
 306       // If we don't have an object yet, try to allocate it.
 307       if (obj == NULL) {
 308         // The tlab could still have space after this sample.
 309         thread->tlab().set_back_actual_end();
 310         obj = thread->tlab().allocate(size);
 311       }
 312 
 313       // Is the object allocated now?
 314       // If not, this means we have to wait till a new TLAB, let the subsequent
 315       // call to handle_heap_sampling pick the next sample.
 316       if (obj != NULL) {
 317         // Object is allocated, sample it now.
 318         HeapMonitoring::object_alloc_do_sample(thread,
 319                                                reinterpret_cast<oopDesc*>(obj),
 320                                                size);
 321         // Pick a next sample in this case, we allocated right.
 322         thread->tlab().pick_next_sample();
 323       }
 324     }
 325   }
 326 
 327   return obj;
 328 }
 329 
 330 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
 331   HeapWord* obj = handle_heap_sampling(thread, NULL, size);
 332 
 333   if (obj != NULL) {
 334     return obj;
 335   }
 336 
 337   // Retain tlab and allocate object in shared space if
 338   // the amount free in the tlab is too large to discard.
 339   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
 340     thread->tlab().record_slow_allocation(size);
 341     return NULL;
 342   }
 343 
 344   // Discard tlab and allocate a new one.
 345   // To minimize fragmentation, the last TLAB may be smaller than the rest.
 346   size_t new_tlab_size = thread->tlab().compute_size(size);
 347 
 348   thread->tlab().clear_before_allocation();
 349 
 350   if (new_tlab_size == 0) {
 351     return NULL;
 352   }
 353 
 354   // Allocate a new TLAB...
 355   obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
 356   if (obj == NULL) {
 357     return NULL;
 358   }
 359 
 360   AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
 361 
 362   if (ZeroTLAB) {
 363     // ..and clear it.
 364     Copy::zero_to_words(obj, new_tlab_size);
 365   } else {
 366     // ...and zap just allocated object.
 367 #ifdef ASSERT
 368     // Skip mangling the space corresponding to the object header to
 369     // ensure that the returned space is not considered parsable by
 370     // any concurrent GC thread.
 371     size_t hdr_size = oopDesc::header_size();
 372     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
 373 #endif // ASSERT
 374   }
 375   thread->tlab().fill(obj, obj + size, new_tlab_size);
 376   handle_heap_sampling(thread, obj, size);
 377   return obj;
 378 }
 379 
 380 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
 381   MemRegion deferred = thread->deferred_card_mark();
 382   if (!deferred.is_empty()) {
 383     assert(_defer_initial_card_mark, "Otherwise should be empty");
 384     {
 385       // Verify that the storage points to a parsable object in heap
 386       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
 387       assert(is_in(old_obj), "Not in allocated heap");
 388       assert(!can_elide_initializing_store_barrier(old_obj),
 389              "Else should have been filtered in new_store_pre_barrier()");
 390       assert(old_obj->is_oop(true), "Not an oop");
 391       assert(deferred.word_size() == (size_t)(old_obj->size()),
 392              "Mismatch: multiple objects?");
 393     }
 394     BarrierSet* bs = barrier_set();
 395     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
 396     bs->write_region(deferred);


< prev index next >