< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page




  53 #include "gc/g1/g1RootProcessor.hpp"
  54 #include "gc/g1/g1SerialFullCollector.hpp"
  55 #include "gc/g1/g1StringDedup.hpp"
  56 #include "gc/g1/g1YCTypes.hpp"
  57 #include "gc/g1/heapRegion.inline.hpp"
  58 #include "gc/g1/heapRegionRemSet.hpp"
  59 #include "gc/g1/heapRegionSet.inline.hpp"
  60 #include "gc/g1/suspendibleThreadSet.hpp"
  61 #include "gc/g1/vm_operations_g1.hpp"
  62 #include "gc/shared/gcHeapSummary.hpp"
  63 #include "gc/shared/gcId.hpp"
  64 #include "gc/shared/gcLocker.inline.hpp"
  65 #include "gc/shared/gcTimer.hpp"
  66 #include "gc/shared/gcTrace.hpp"
  67 #include "gc/shared/gcTraceTime.inline.hpp"
  68 #include "gc/shared/generationSpec.hpp"
  69 #include "gc/shared/isGCActiveMark.hpp"
  70 #include "gc/shared/preservedMarks.inline.hpp"
  71 #include "gc/shared/referenceProcessor.inline.hpp"
  72 #include "gc/shared/taskqueue.inline.hpp"

  73 #include "logging/log.hpp"
  74 #include "memory/allocation.hpp"
  75 #include "memory/iterator.hpp"
  76 #include "memory/resourceArea.hpp"
  77 #include "oops/oop.inline.hpp"
  78 #include "prims/resolvedMethodTable.hpp"
  79 #include "runtime/atomic.hpp"
  80 #include "runtime/init.hpp"
  81 #include "runtime/orderAccess.inline.hpp"
  82 #include "runtime/vmThread.hpp"
  83 #include "utilities/align.hpp"
  84 #include "utilities/globalDefinitions.hpp"
  85 #include "utilities/stack.inline.hpp"
  86 
  87 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  88 
  89 // INVARIANTS/NOTES
  90 //
  91 // All allocation activity covered by the G1CollectedHeap interface is
  92 // serialized by acquiring the HeapLock.  This happens in mem_allocate


4110         // can see some null referent objects.
4111         iter.load_ptrs(DEBUG_ONLY(true));
4112         oop ref = iter.obj();
4113 
4114         // This will filter nulls.
4115         if (iter.is_referent_alive()) {
4116           iter.make_referent_alive();
4117         }
4118         iter.move_to_next();
4119       }
4120     }
4121 
4122     // Drain the queue - which may cause stealing
4123     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
4124     drain_queue.do_void();
4125     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
4126     assert(pss->queue_is_empty(), "should be");
4127   }
4128 };
4129 
4130 void G1CollectedHeap::process_weak_jni_handles() {
4131   double ref_proc_start = os::elapsedTime();
4132 
4133   G1STWIsAliveClosure is_alive(this);
4134   G1KeepAliveClosure keep_alive(this);
4135   JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4136 
4137   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4138   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4139 }
4140 
4141 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
4142   // Any reference objects, in the collection set, that were 'discovered'
4143   // by the CM ref processor should have already been copied (either by
4144   // applying the external root copy closure to the discovered lists, or
4145   // by following an RSet entry).
4146   //
4147   // But some of the referents, that are in the collection set, that these
4148   // reference objects point to may not have been copied: the STW ref
4149   // processor would have seen that the reference object had already
4150   // been 'discovered' and would have skipped discovering the reference,
4151   // but would not have treated the reference object as a regular oop.
4152   // As a result the copy closure would not have been applied to the
4153   // referent object.
4154   //
4155   // We need to explicitly copy these referent objects - the references
4156   // will be processed at the end of remarking.
4157   //
4158   // We also need to do this copying before we process the reference
4159   // objects discovered by the STW ref processor in case one of these
4160   // referents points to another object which is also referenced by an


4351 
4352   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4353   phase_times->record_par_time(par_time_ms);
4354 
4355   double code_root_fixup_time_ms =
4356         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4357   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4358 }
4359 
4360 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4361   // Process any discovered reference objects - we have
4362   // to do this _before_ we retire the GC alloc regions
4363   // as we may have to copy some 'reachable' referent
4364   // objects (and their reachable sub-graphs) that were
4365   // not copied during the pause.
4366   if (g1_policy()->should_process_references()) {
4367     preserve_cm_referents(per_thread_states);
4368     process_discovered_references(per_thread_states);
4369   } else {
4370     ref_processor_stw()->verify_no_references_recorded();
4371     process_weak_jni_handles();











4372   }
4373 
4374   if (G1StringDedup::is_enabled()) {
4375     double fixup_start = os::elapsedTime();
4376 
4377     G1STWIsAliveClosure is_alive(this);
4378     G1KeepAliveClosure keep_alive(this);
4379     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4380 
4381     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
4382     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
4383   }
4384 
4385   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4386 
4387   if (evacuation_failed()) {
4388     restore_after_evac_failure();
4389 
4390     // Reset the G1EvacuationFailureALot counters and flags
4391     // Note: the values are reset only when an actual
4392     // evacuation failure occurs.
4393     NOT_PRODUCT(reset_evacuation_should_fail();)
4394   }
4395 
4396   _preserved_marks_set.assert_empty();
4397 
4398   // Enqueue any remaining references remaining on the STW




  53 #include "gc/g1/g1RootProcessor.hpp"
  54 #include "gc/g1/g1SerialFullCollector.hpp"
  55 #include "gc/g1/g1StringDedup.hpp"
  56 #include "gc/g1/g1YCTypes.hpp"
  57 #include "gc/g1/heapRegion.inline.hpp"
  58 #include "gc/g1/heapRegionRemSet.hpp"
  59 #include "gc/g1/heapRegionSet.inline.hpp"
  60 #include "gc/g1/suspendibleThreadSet.hpp"
  61 #include "gc/g1/vm_operations_g1.hpp"
  62 #include "gc/shared/gcHeapSummary.hpp"
  63 #include "gc/shared/gcId.hpp"
  64 #include "gc/shared/gcLocker.inline.hpp"
  65 #include "gc/shared/gcTimer.hpp"
  66 #include "gc/shared/gcTrace.hpp"
  67 #include "gc/shared/gcTraceTime.inline.hpp"
  68 #include "gc/shared/generationSpec.hpp"
  69 #include "gc/shared/isGCActiveMark.hpp"
  70 #include "gc/shared/preservedMarks.inline.hpp"
  71 #include "gc/shared/referenceProcessor.inline.hpp"
  72 #include "gc/shared/taskqueue.inline.hpp"
  73 #include "gc/shared/weakProcessor.hpp"
  74 #include "logging/log.hpp"
  75 #include "memory/allocation.hpp"
  76 #include "memory/iterator.hpp"
  77 #include "memory/resourceArea.hpp"
  78 #include "oops/oop.inline.hpp"
  79 #include "prims/resolvedMethodTable.hpp"
  80 #include "runtime/atomic.hpp"
  81 #include "runtime/init.hpp"
  82 #include "runtime/orderAccess.inline.hpp"
  83 #include "runtime/vmThread.hpp"
  84 #include "utilities/align.hpp"
  85 #include "utilities/globalDefinitions.hpp"
  86 #include "utilities/stack.inline.hpp"
  87 
  88 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  89 
  90 // INVARIANTS/NOTES
  91 //
  92 // All allocation activity covered by the G1CollectedHeap interface is
  93 // serialized by acquiring the HeapLock.  This happens in mem_allocate


4111         // can see some null referent objects.
4112         iter.load_ptrs(DEBUG_ONLY(true));
4113         oop ref = iter.obj();
4114 
4115         // This will filter nulls.
4116         if (iter.is_referent_alive()) {
4117           iter.make_referent_alive();
4118         }
4119         iter.move_to_next();
4120       }
4121     }
4122 
4123     // Drain the queue - which may cause stealing
4124     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
4125     drain_queue.do_void();
4126     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
4127     assert(pss->queue_is_empty(), "should be");
4128   }
4129 };
4130 











4131 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
4132   // Any reference objects, in the collection set, that were 'discovered'
4133   // by the CM ref processor should have already been copied (either by
4134   // applying the external root copy closure to the discovered lists, or
4135   // by following an RSet entry).
4136   //
4137   // But some of the referents, that are in the collection set, that these
4138   // reference objects point to may not have been copied: the STW ref
4139   // processor would have seen that the reference object had already
4140   // been 'discovered' and would have skipped discovering the reference,
4141   // but would not have treated the reference object as a regular oop.
4142   // As a result the copy closure would not have been applied to the
4143   // referent object.
4144   //
4145   // We need to explicitly copy these referent objects - the references
4146   // will be processed at the end of remarking.
4147   //
4148   // We also need to do this copying before we process the reference
4149   // objects discovered by the STW ref processor in case one of these
4150   // referents points to another object which is also referenced by an


4341 
4342   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4343   phase_times->record_par_time(par_time_ms);
4344 
4345   double code_root_fixup_time_ms =
4346         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4347   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4348 }
4349 
4350 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4351   // Process any discovered reference objects - we have
4352   // to do this _before_ we retire the GC alloc regions
4353   // as we may have to copy some 'reachable' referent
4354   // objects (and their reachable sub-graphs) that were
4355   // not copied during the pause.
4356   if (g1_policy()->should_process_references()) {
4357     preserve_cm_referents(per_thread_states);
4358     process_discovered_references(per_thread_states);
4359   } else {
4360     ref_processor_stw()->verify_no_references_recorded();
4361   }
4362 
4363   G1STWIsAliveClosure is_alive(this);
4364   G1KeepAliveClosure keep_alive(this);
4365 
4366   {
4367     double start = os::elapsedTime();
4368 
4369     WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
4370 
4371     double time_ms = (os::elapsedTime() - start) * 1000.0;
4372     g1_policy()->phase_times()->record_ref_proc_time(time_ms);
4373   }
4374 
4375   if (G1StringDedup::is_enabled()) {
4376     double fixup_start = os::elapsedTime();
4377 


4378     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4379 
4380     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
4381     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
4382   }
4383 
4384   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4385 
4386   if (evacuation_failed()) {
4387     restore_after_evac_failure();
4388 
4389     // Reset the G1EvacuationFailureALot counters and flags
4390     // Note: the values are reset only when an actual
4391     // evacuation failure occurs.
4392     NOT_PRODUCT(reset_evacuation_should_fail();)
4393   }
4394 
4395   _preserved_marks_set.assert_empty();
4396 
4397   // Enqueue any remaining references remaining on the STW


< prev index next >