< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 47223 : [mq]: heapz8
rev 47224 : [mq]: heap9a


  60 #include "gc/g1/suspendibleThreadSet.hpp"
  61 #include "gc/g1/vm_operations_g1.hpp"
  62 #include "gc/shared/gcHeapSummary.hpp"
  63 #include "gc/shared/gcId.hpp"
  64 #include "gc/shared/gcLocker.inline.hpp"
  65 #include "gc/shared/gcTimer.hpp"
  66 #include "gc/shared/gcTrace.hpp"
  67 #include "gc/shared/gcTraceTime.inline.hpp"
  68 #include "gc/shared/generationSpec.hpp"
  69 #include "gc/shared/isGCActiveMark.hpp"
  70 #include "gc/shared/preservedMarks.inline.hpp"
  71 #include "gc/shared/referenceProcessor.inline.hpp"
  72 #include "gc/shared/taskqueue.inline.hpp"
  73 #include "logging/log.hpp"
  74 #include "memory/allocation.hpp"
  75 #include "memory/iterator.hpp"
  76 #include "memory/resourceArea.hpp"
  77 #include "oops/oop.inline.hpp"
  78 #include "prims/resolvedMethodTable.hpp"
  79 #include "runtime/atomic.hpp"

  80 #include "runtime/init.hpp"
  81 #include "runtime/orderAccess.inline.hpp"
  82 #include "runtime/vmThread.hpp"
  83 #include "utilities/align.hpp"
  84 #include "utilities/globalDefinitions.hpp"
  85 #include "utilities/stack.inline.hpp"
  86 
  87 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  88 
  89 // INVARIANTS/NOTES
  90 //
  91 // All allocation activity covered by the G1CollectedHeap interface is
  92 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  93 // and allocate_new_tlab, which are the "entry" points to the
  94 // allocation code from the rest of the JVM.  (Note that this does not
  95 // apply to TLAB allocation, which is not part of this interface: it
  96 // is done by clients of this interface.)
  97 
  98 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
  99  private:


4121 
4122     // Drain the queue - which may cause stealing
4123     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
4124     drain_queue.do_void();
4125     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
4126     assert(pss->queue_is_empty(), "should be");
4127   }
4128 };
4129 
4130 void G1CollectedHeap::process_weak_jni_handles() {
4131   double ref_proc_start = os::elapsedTime();
4132 
4133   G1STWIsAliveClosure is_alive(this);
4134   G1KeepAliveClosure keep_alive(this);
4135   JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4136 
4137   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4138   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4139 }
4140 







4141 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
4142   // Any reference objects, in the collection set, that were 'discovered'
4143   // by the CM ref processor should have already been copied (either by
4144   // applying the external root copy closure to the discovered lists, or
4145   // by following an RSet entry).
4146   //
4147   // But some of the referents, that are in the collection set, that these
4148   // reference objects point to may not have been copied: the STW ref
4149   // processor would have seen that the reference object had already
4150   // been 'discovered' and would have skipped discovering the reference,
4151   // but would not have treated the reference object as a regular oop.
4152   // As a result the copy closure would not have been applied to the
4153   // referent object.
4154   //
4155   // We need to explicitly copy these referent objects - the references
4156   // will be processed at the end of remarking.
4157   //
4158   // We also need to do this copying before we process the reference
4159   // objects discovered by the STW ref processor in case one of these
4160   // referents points to another object which is also referenced by an


4352   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4353   phase_times->record_par_time(par_time_ms);
4354 
4355   double code_root_fixup_time_ms =
4356         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4357   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4358 }
4359 
4360 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4361   // Process any discovered reference objects - we have
4362   // to do this _before_ we retire the GC alloc regions
4363   // as we may have to copy some 'reachable' referent
4364   // objects (and their reachable sub-graphs) that were
4365   // not copied during the pause.
4366   if (g1_policy()->should_process_references()) {
4367     preserve_cm_referents(per_thread_states);
4368     process_discovered_references(per_thread_states);
4369   } else {
4370     ref_processor_stw()->verify_no_references_recorded();
4371     process_weak_jni_handles();

4372   }
4373 
4374   if (G1StringDedup::is_enabled()) {
4375     double fixup_start = os::elapsedTime();
4376 
4377     G1STWIsAliveClosure is_alive(this);
4378     G1KeepAliveClosure keep_alive(this);
4379     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4380 
4381     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
4382     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
4383   }
4384 
4385   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4386 
4387   if (evacuation_failed()) {
4388     restore_after_evac_failure();
4389 
4390     // Reset the G1EvacuationFailureALot counters and flags
4391     // Note: the values are reset only when an actual




  60 #include "gc/g1/suspendibleThreadSet.hpp"
  61 #include "gc/g1/vm_operations_g1.hpp"
  62 #include "gc/shared/gcHeapSummary.hpp"
  63 #include "gc/shared/gcId.hpp"
  64 #include "gc/shared/gcLocker.inline.hpp"
  65 #include "gc/shared/gcTimer.hpp"
  66 #include "gc/shared/gcTrace.hpp"
  67 #include "gc/shared/gcTraceTime.inline.hpp"
  68 #include "gc/shared/generationSpec.hpp"
  69 #include "gc/shared/isGCActiveMark.hpp"
  70 #include "gc/shared/preservedMarks.inline.hpp"
  71 #include "gc/shared/referenceProcessor.inline.hpp"
  72 #include "gc/shared/taskqueue.inline.hpp"
  73 #include "logging/log.hpp"
  74 #include "memory/allocation.hpp"
  75 #include "memory/iterator.hpp"
  76 #include "memory/resourceArea.hpp"
  77 #include "oops/oop.inline.hpp"
  78 #include "prims/resolvedMethodTable.hpp"
  79 #include "runtime/atomic.hpp"
  80 #include "runtime/heapMonitoring.hpp"
  81 #include "runtime/init.hpp"
  82 #include "runtime/orderAccess.inline.hpp"
  83 #include "runtime/vmThread.hpp"
  84 #include "utilities/align.hpp"
  85 #include "utilities/globalDefinitions.hpp"
  86 #include "utilities/stack.inline.hpp"
  87 
  88 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  89 
  90 // INVARIANTS/NOTES
  91 //
  92 // All allocation activity covered by the G1CollectedHeap interface is
  93 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  94 // and allocate_new_tlab, which are the "entry" points to the
  95 // allocation code from the rest of the JVM.  (Note that this does not
  96 // apply to TLAB allocation, which is not part of this interface: it
  97 // is done by clients of this interface.)
  98 
  99 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 100  private:


4122 
4123     // Drain the queue - which may cause stealing
4124     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
4125     drain_queue.do_void();
4126     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
4127     assert(pss->queue_is_empty(), "should be");
4128   }
4129 };
4130 
4131 void G1CollectedHeap::process_weak_jni_handles() {
4132   double ref_proc_start = os::elapsedTime();
4133 
4134   G1STWIsAliveClosure is_alive(this);
4135   G1KeepAliveClosure keep_alive(this);
4136   JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4137 
4138   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4139   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4140 }
4141 
4142 void G1CollectedHeap::process_heap_monitoring() {
4143   log_develop_trace(gc, ref)("Heap Sampler: heap monitoring processing");
4144   G1STWIsAliveClosure is_alive(this);
4145   G1KeepAliveClosure keep_alive(this);
4146   HeapMonitoring::weak_oops_do(&is_alive, &keep_alive);
4147 }
4148 
4149 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
4150   // Any reference objects, in the collection set, that were 'discovered'
4151   // by the CM ref processor should have already been copied (either by
4152   // applying the external root copy closure to the discovered lists, or
4153   // by following an RSet entry).
4154   //
4155   // But some of the referents, that are in the collection set, that these
4156   // reference objects point to may not have been copied: the STW ref
4157   // processor would have seen that the reference object had already
4158   // been 'discovered' and would have skipped discovering the reference,
4159   // but would not have treated the reference object as a regular oop.
4160   // As a result the copy closure would not have been applied to the
4161   // referent object.
4162   //
4163   // We need to explicitly copy these referent objects - the references
4164   // will be processed at the end of remarking.
4165   //
4166   // We also need to do this copying before we process the reference
4167   // objects discovered by the STW ref processor in case one of these
4168   // referents points to another object which is also referenced by an


4360   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4361   phase_times->record_par_time(par_time_ms);
4362 
4363   double code_root_fixup_time_ms =
4364         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4365   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4366 }
4367 
4368 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4369   // Process any discovered reference objects - we have
4370   // to do this _before_ we retire the GC alloc regions
4371   // as we may have to copy some 'reachable' referent
4372   // objects (and their reachable sub-graphs) that were
4373   // not copied during the pause.
4374   if (g1_policy()->should_process_references()) {
4375     preserve_cm_referents(per_thread_states);
4376     process_discovered_references(per_thread_states);
4377   } else {
4378     ref_processor_stw()->verify_no_references_recorded();
4379     process_weak_jni_handles();
4380     process_heap_monitoring();
4381   }
4382 
4383   if (G1StringDedup::is_enabled()) {
4384     double fixup_start = os::elapsedTime();
4385 
4386     G1STWIsAliveClosure is_alive(this);
4387     G1KeepAliveClosure keep_alive(this);
4388     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4389 
4390     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
4391     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
4392   }
4393 
4394   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4395 
4396   if (evacuation_failed()) {
4397     restore_after_evac_failure();
4398 
4399     // Reset the G1EvacuationFailureALot counters and flags
4400     // Note: the values are reset only when an actual


< prev index next >