< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 13139 : [mq]: heap7


  59 #include "gc/g1/suspendibleThreadSet.hpp"
  60 #include "gc/g1/vm_operations_g1.hpp"
  61 #include "gc/shared/gcHeapSummary.hpp"
  62 #include "gc/shared/gcId.hpp"
  63 #include "gc/shared/gcLocker.inline.hpp"
  64 #include "gc/shared/gcTimer.hpp"
  65 #include "gc/shared/gcTrace.hpp"
  66 #include "gc/shared/gcTraceTime.inline.hpp"
  67 #include "gc/shared/generationSpec.hpp"
  68 #include "gc/shared/isGCActiveMark.hpp"
  69 #include "gc/shared/preservedMarks.inline.hpp"
  70 #include "gc/shared/referenceProcessor.inline.hpp"
  71 #include "gc/shared/taskqueue.inline.hpp"
  72 #include "logging/log.hpp"
  73 #include "memory/allocation.hpp"
  74 #include "memory/iterator.hpp"
  75 #include "memory/resourceArea.hpp"
  76 #include "oops/oop.inline.hpp"
  77 #include "prims/resolvedMethodTable.hpp"
  78 #include "runtime/atomic.hpp"

  79 #include "runtime/init.hpp"
  80 #include "runtime/orderAccess.inline.hpp"
  81 #include "runtime/vmThread.hpp"
  82 #include "utilities/globalDefinitions.hpp"
  83 #include "utilities/stack.inline.hpp"
  84 
  85 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  86 
  87 // INVARIANTS/NOTES
  88 //
  89 // All allocation activity covered by the G1CollectedHeap interface is
  90 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  91 // and allocate_new_tlab, which are the "entry" points to the
  92 // allocation code from the rest of the JVM.  (Note that this does not
  93 // apply to TLAB allocation, which is not part of this interface: it
  94 // is done by clients of this interface.)
  95 
  96 // Local to this file.
  97 
  98 class RefineCardTableEntryClosure: public CardTableEntryClosure {


4290 
4291     // Drain the queue - which may cause stealing
4292     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
4293     drain_queue.do_void();
4294     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
4295     assert(pss->queue_is_empty(), "should be");
4296   }
4297 };
4298 
4299 void G1CollectedHeap::process_weak_jni_handles() {
4300   double ref_proc_start = os::elapsedTime();
4301 
4302   G1STWIsAliveClosure is_alive(this);
4303   G1KeepAliveClosure keep_alive(this);
4304   JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4305 
4306   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4307   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4308 }
4309 







4310 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
4311   // Any reference objects, in the collection set, that were 'discovered'
4312   // by the CM ref processor should have already been copied (either by
4313   // applying the external root copy closure to the discovered lists, or
4314   // by following an RSet entry).
4315   //
4316   // But some of the referents, that are in the collection set, that these
4317   // reference objects point to may not have been copied: the STW ref
4318   // processor would have seen that the reference object had already
4319   // been 'discovered' and would have skipped discovering the reference,
4320   // but would not have treated the reference object as a regular oop.
4321   // As a result the copy closure would not have been applied to the
4322   // referent object.
4323   //
4324   // We need to explicitly copy these referent objects - the references
4325   // will be processed at the end of remarking.
4326   //
4327   // We also need to do this copying before we process the reference
4328   // objects discovered by the STW ref processor in case one of these
4329   // referents points to another object which is also referenced by an


4508   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4509   phase_times->record_par_time(par_time_ms);
4510 
4511   double code_root_fixup_time_ms =
4512         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4513   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4514 }
4515 
4516 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4517   // Process any discovered reference objects - we have
4518   // to do this _before_ we retire the GC alloc regions
4519   // as we may have to copy some 'reachable' referent
4520   // objects (and their reachable sub-graphs) that were
4521   // not copied during the pause.
4522   if (g1_policy()->should_process_references()) {
4523     preserve_cm_referents(per_thread_states);
4524     process_discovered_references(per_thread_states);
4525   } else {
4526     ref_processor_stw()->verify_no_references_recorded();
4527     process_weak_jni_handles();

4528   }
4529 
4530   if (G1StringDedup::is_enabled()) {
4531     double fixup_start = os::elapsedTime();
4532 
4533     G1STWIsAliveClosure is_alive(this);
4534     G1KeepAliveClosure keep_alive(this);
4535     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4536 
4537     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
4538     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
4539   }
4540 
4541   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4542 
4543   if (evacuation_failed()) {
4544     restore_after_evac_failure();
4545 
4546     // Reset the G1EvacuationFailureALot counters and flags
4547     // Note: the values are reset only when an actual




  59 #include "gc/g1/suspendibleThreadSet.hpp"
  60 #include "gc/g1/vm_operations_g1.hpp"
  61 #include "gc/shared/gcHeapSummary.hpp"
  62 #include "gc/shared/gcId.hpp"
  63 #include "gc/shared/gcLocker.inline.hpp"
  64 #include "gc/shared/gcTimer.hpp"
  65 #include "gc/shared/gcTrace.hpp"
  66 #include "gc/shared/gcTraceTime.inline.hpp"
  67 #include "gc/shared/generationSpec.hpp"
  68 #include "gc/shared/isGCActiveMark.hpp"
  69 #include "gc/shared/preservedMarks.inline.hpp"
  70 #include "gc/shared/referenceProcessor.inline.hpp"
  71 #include "gc/shared/taskqueue.inline.hpp"
  72 #include "logging/log.hpp"
  73 #include "memory/allocation.hpp"
  74 #include "memory/iterator.hpp"
  75 #include "memory/resourceArea.hpp"
  76 #include "oops/oop.inline.hpp"
  77 #include "prims/resolvedMethodTable.hpp"
  78 #include "runtime/atomic.hpp"
  79 #include "runtime/heapMonitoring.hpp"
  80 #include "runtime/init.hpp"
  81 #include "runtime/orderAccess.inline.hpp"
  82 #include "runtime/vmThread.hpp"
  83 #include "utilities/globalDefinitions.hpp"
  84 #include "utilities/stack.inline.hpp"
  85 
  86 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  87 
  88 // INVARIANTS/NOTES
  89 //
  90 // All allocation activity covered by the G1CollectedHeap interface is
  91 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  92 // and allocate_new_tlab, which are the "entry" points to the
  93 // allocation code from the rest of the JVM.  (Note that this does not
  94 // apply to TLAB allocation, which is not part of this interface: it
  95 // is done by clients of this interface.)
  96 
  97 // Local to this file.
  98 
  99 class RefineCardTableEntryClosure: public CardTableEntryClosure {


4291 
4292     // Drain the queue - which may cause stealing
4293     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
4294     drain_queue.do_void();
4295     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
4296     assert(pss->queue_is_empty(), "should be");
4297   }
4298 };
4299 
4300 void G1CollectedHeap::process_weak_jni_handles() {
4301   double ref_proc_start = os::elapsedTime();
4302 
4303   G1STWIsAliveClosure is_alive(this);
4304   G1KeepAliveClosure keep_alive(this);
4305   JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4306 
4307   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4308   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4309 }
4310 
4311 void G1CollectedHeap::process_heap_monitoring() {
4312   log_develop_trace(gc, ref)("HeapSampling [other] : heap monitoring processing");
4313   G1STWIsAliveClosure is_alive(this);
4314   G1KeepAliveClosure keep_alive(this);
4315   HeapMonitoring::weak_oops_do(&is_alive, &keep_alive);
4316 }
4317 
4318 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
4319   // Any reference objects, in the collection set, that were 'discovered'
4320   // by the CM ref processor should have already been copied (either by
4321   // applying the external root copy closure to the discovered lists, or
4322   // by following an RSet entry).
4323   //
4324   // But some of the referents, that are in the collection set, that these
4325   // reference objects point to may not have been copied: the STW ref
4326   // processor would have seen that the reference object had already
4327   // been 'discovered' and would have skipped discovering the reference,
4328   // but would not have treated the reference object as a regular oop.
4329   // As a result the copy closure would not have been applied to the
4330   // referent object.
4331   //
4332   // We need to explicitly copy these referent objects - the references
4333   // will be processed at the end of remarking.
4334   //
4335   // We also need to do this copying before we process the reference
4336   // objects discovered by the STW ref processor in case one of these
4337   // referents points to another object which is also referenced by an


4516   double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
4517   phase_times->record_par_time(par_time_ms);
4518 
4519   double code_root_fixup_time_ms =
4520         (os::elapsedTime() - end_par_time_sec) * 1000.0;
4521   phase_times->record_code_root_fixup_time(code_root_fixup_time_ms);
4522 }
4523 
4524 void G1CollectedHeap::post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* per_thread_states) {
4525   // Process any discovered reference objects - we have
4526   // to do this _before_ we retire the GC alloc regions
4527   // as we may have to copy some 'reachable' referent
4528   // objects (and their reachable sub-graphs) that were
4529   // not copied during the pause.
4530   if (g1_policy()->should_process_references()) {
4531     preserve_cm_referents(per_thread_states);
4532     process_discovered_references(per_thread_states);
4533   } else {
4534     ref_processor_stw()->verify_no_references_recorded();
4535     process_weak_jni_handles();
4536     process_heap_monitoring();
4537   }
4538 
4539   if (G1StringDedup::is_enabled()) {
4540     double fixup_start = os::elapsedTime();
4541 
4542     G1STWIsAliveClosure is_alive(this);
4543     G1KeepAliveClosure keep_alive(this);
4544     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
4545 
4546     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
4547     g1_policy()->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
4548   }
4549 
4550   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4551 
4552   if (evacuation_failed()) {
4553     restore_after_evac_failure();
4554 
4555     // Reset the G1EvacuationFailureALot counters and flags
4556     // Note: the values are reset only when an actual


< prev index next >