< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page




  60 #include "gc/g1/heapRegionSet.inline.hpp"
  61 #include "gc/g1/vm_operations_g1.hpp"
  62 #include "gc/shared/adaptiveSizePolicy.hpp"
  63 #include "gc/shared/gcHeapSummary.hpp"
  64 #include "gc/shared/gcId.hpp"
  65 #include "gc/shared/gcLocker.inline.hpp"
  66 #include "gc/shared/gcTimer.hpp"
  67 #include "gc/shared/gcTrace.hpp"
  68 #include "gc/shared/gcTraceTime.inline.hpp"
  69 #include "gc/shared/generationSpec.hpp"
  70 #include "gc/shared/isGCActiveMark.hpp"
  71 #include "gc/shared/preservedMarks.inline.hpp"
  72 #include "gc/shared/suspendibleThreadSet.hpp"
  73 #include "gc/shared/referenceProcessor.inline.hpp"
  74 #include "gc/shared/taskqueue.inline.hpp"
  75 #include "gc/shared/weakProcessor.hpp"
  76 #include "logging/log.hpp"
  77 #include "memory/allocation.hpp"
  78 #include "memory/iterator.hpp"
  79 #include "memory/resourceArea.hpp"


  80 #include "oops/oop.inline.hpp"
  81 #include "prims/resolvedMethodTable.hpp"
  82 #include "runtime/atomic.hpp"
  83 #include "runtime/handles.inline.hpp"
  84 #include "runtime/init.hpp"
  85 #include "runtime/orderAccess.inline.hpp"
  86 #include "runtime/threadSMR.hpp"
  87 #include "runtime/vmThread.hpp"
  88 #include "utilities/align.hpp"
  89 #include "utilities/globalDefinitions.hpp"
  90 #include "utilities/stack.inline.hpp"
  91 
  92 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  93 
  94 // INVARIANTS/NOTES
  95 //
  96 // All allocation activity covered by the G1CollectedHeap interface is
  97 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  98 // and allocate_new_tlab, which are the "entry" points to the
  99 // allocation code from the rest of the JVM.  (Note that this does not


3793 // and different queues.
3794 
3795 class G1CopyingKeepAliveClosure: public OopClosure {
3796   G1CollectedHeap*         _g1h;
3797   OopClosure*              _copy_non_heap_obj_cl;
3798   G1ParScanThreadState*    _par_scan_state;
3799 
3800 public:
3801   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
3802                             OopClosure* non_heap_obj_cl,
3803                             G1ParScanThreadState* pss):
3804     _g1h(g1h),
3805     _copy_non_heap_obj_cl(non_heap_obj_cl),
3806     _par_scan_state(pss)
3807   {}
3808 
3809   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3810   virtual void do_oop(      oop* p) { do_oop_work(p); }
3811 
3812   template <class T> void do_oop_work(T* p) {
3813     oop obj = oopDesc::load_decode_heap_oop(p);
3814 
3815     if (_g1h->is_in_cset_or_humongous(obj)) {
3816       // If the referent object has been forwarded (either copied
3817       // to a new location or to itself in the event of an
3818       // evacuation failure) then we need to update the reference
3819       // field and, if both reference and referent are in the G1
3820       // heap, update the RSet for the referent.
3821       //
3822       // If the referent has not been forwarded then we have to keep
3823       // it alive by policy. Therefore we have copy the referent.
3824       //
3825       // If the reference field is in the G1 heap then we can push
3826       // on the PSS queue. When the queue is drained (after each
3827       // phase of reference processing) the object and it's followers
3828       // will be copied, the reference field set to point to the
3829       // new location, and the RSet updated. Otherwise we need to
3830       // use the the non-heap or metadata closures directly to copy
3831       // the referent object and update the pointer, while avoiding
3832       // updating the RSet.
3833 


5198   uint index = _hrm.find_highest_free(&expanded);
5199 
5200   if (index != G1_NO_HRM_INDEX) {
5201     if (expanded) {
5202       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
5203                                 HeapRegion::GrainWords * HeapWordSize);
5204     }
5205     _hrm.allocate_free_regions_starting_at(index, 1);
5206     return region_at(index);
5207   }
5208   return NULL;
5209 }
5210 
5211 // Optimized nmethod scanning
5212 
5213 class RegisterNMethodOopClosure: public OopClosure {
5214   G1CollectedHeap* _g1h;
5215   nmethod* _nm;
5216 
5217   template <class T> void do_oop_work(T* p) {
5218     T heap_oop = oopDesc::load_heap_oop(p);
5219     if (!oopDesc::is_null(heap_oop)) {
5220       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
5221       HeapRegion* hr = _g1h->heap_region_containing(obj);
5222       assert(!hr->is_continues_humongous(),
5223              "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5224              " starting at " HR_FORMAT,
5225              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5226 
5227       // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
5228       hr->add_strong_code_root_locked(_nm);
5229     }
5230   }
5231 
5232 public:
5233   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
5234     _g1h(g1h), _nm(nm) {}
5235 
5236   void do_oop(oop* p)       { do_oop_work(p); }
5237   void do_oop(narrowOop* p) { do_oop_work(p); }
5238 };
5239 
5240 class UnregisterNMethodOopClosure: public OopClosure {
5241   G1CollectedHeap* _g1h;
5242   nmethod* _nm;
5243 
5244   template <class T> void do_oop_work(T* p) {
5245     T heap_oop = oopDesc::load_heap_oop(p);
5246     if (!oopDesc::is_null(heap_oop)) {
5247       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
5248       HeapRegion* hr = _g1h->heap_region_containing(obj);
5249       assert(!hr->is_continues_humongous(),
5250              "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5251              " starting at " HR_FORMAT,
5252              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5253 
5254       hr->remove_strong_code_root(_nm);
5255     }
5256   }
5257 
5258 public:
5259   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
5260     _g1h(g1h), _nm(nm) {}
5261 
5262   void do_oop(oop* p)       { do_oop_work(p); }
5263   void do_oop(narrowOop* p) { do_oop_work(p); }
5264 };
5265 
5266 // Returns true if the reference points to an object that
5267 // can move in an incremental collection.




  60 #include "gc/g1/heapRegionSet.inline.hpp"
  61 #include "gc/g1/vm_operations_g1.hpp"
  62 #include "gc/shared/adaptiveSizePolicy.hpp"
  63 #include "gc/shared/gcHeapSummary.hpp"
  64 #include "gc/shared/gcId.hpp"
  65 #include "gc/shared/gcLocker.inline.hpp"
  66 #include "gc/shared/gcTimer.hpp"
  67 #include "gc/shared/gcTrace.hpp"
  68 #include "gc/shared/gcTraceTime.inline.hpp"
  69 #include "gc/shared/generationSpec.hpp"
  70 #include "gc/shared/isGCActiveMark.hpp"
  71 #include "gc/shared/preservedMarks.inline.hpp"
  72 #include "gc/shared/suspendibleThreadSet.hpp"
  73 #include "gc/shared/referenceProcessor.inline.hpp"
  74 #include "gc/shared/taskqueue.inline.hpp"
  75 #include "gc/shared/weakProcessor.hpp"
  76 #include "logging/log.hpp"
  77 #include "memory/allocation.hpp"
  78 #include "memory/iterator.hpp"
  79 #include "memory/resourceArea.hpp"
  80 #include "oops/access.inline.hpp"
  81 #include "oops/compressedOops.inline.hpp"
  82 #include "oops/oop.inline.hpp"
  83 #include "prims/resolvedMethodTable.hpp"
  84 #include "runtime/atomic.hpp"
  85 #include "runtime/handles.inline.hpp"
  86 #include "runtime/init.hpp"
  87 #include "runtime/orderAccess.inline.hpp"
  88 #include "runtime/threadSMR.hpp"
  89 #include "runtime/vmThread.hpp"
  90 #include "utilities/align.hpp"
  91 #include "utilities/globalDefinitions.hpp"
  92 #include "utilities/stack.inline.hpp"
  93 
  94 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  95 
  96 // INVARIANTS/NOTES
  97 //
  98 // All allocation activity covered by the G1CollectedHeap interface is
  99 // serialized by acquiring the HeapLock.  This happens in mem_allocate
 100 // and allocate_new_tlab, which are the "entry" points to the
 101 // allocation code from the rest of the JVM.  (Note that this does not


3795 // and different queues.
3796 
3797 class G1CopyingKeepAliveClosure: public OopClosure {
3798   G1CollectedHeap*         _g1h;
3799   OopClosure*              _copy_non_heap_obj_cl;
3800   G1ParScanThreadState*    _par_scan_state;
3801 
3802 public:
3803   G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
3804                             OopClosure* non_heap_obj_cl,
3805                             G1ParScanThreadState* pss):
3806     _g1h(g1h),
3807     _copy_non_heap_obj_cl(non_heap_obj_cl),
3808     _par_scan_state(pss)
3809   {}
3810 
3811   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3812   virtual void do_oop(      oop* p) { do_oop_work(p); }
3813 
3814   template <class T> void do_oop_work(T* p) {
3815     oop obj = RawAccess<>::oop_load(p);
3816 
3817     if (_g1h->is_in_cset_or_humongous(obj)) {
3818       // If the referent object has been forwarded (either copied
3819       // to a new location or to itself in the event of an
3820       // evacuation failure) then we need to update the reference
3821       // field and, if both reference and referent are in the G1
3822       // heap, update the RSet for the referent.
3823       //
3824       // If the referent has not been forwarded then we have to keep
3825       // it alive by policy. Therefore we have copy the referent.
3826       //
3827       // If the reference field is in the G1 heap then we can push
3828       // on the PSS queue. When the queue is drained (after each
3829       // phase of reference processing) the object and it's followers
3830       // will be copied, the reference field set to point to the
3831       // new location, and the RSet updated. Otherwise we need to
3832       // use the the non-heap or metadata closures directly to copy
3833       // the referent object and update the pointer, while avoiding
3834       // updating the RSet.
3835 


5200   uint index = _hrm.find_highest_free(&expanded);
5201 
5202   if (index != G1_NO_HRM_INDEX) {
5203     if (expanded) {
5204       log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B",
5205                                 HeapRegion::GrainWords * HeapWordSize);
5206     }
5207     _hrm.allocate_free_regions_starting_at(index, 1);
5208     return region_at(index);
5209   }
5210   return NULL;
5211 }
5212 
5213 // Optimized nmethod scanning
5214 
5215 class RegisterNMethodOopClosure: public OopClosure {
5216   G1CollectedHeap* _g1h;
5217   nmethod* _nm;
5218 
5219   template <class T> void do_oop_work(T* p) {
5220     T heap_oop = RawAccess<>::oop_load(p);
5221     if (!CompressedOops::is_null(heap_oop)) {
5222       oop obj = CompressedOops::decode_not_null(heap_oop);
5223       HeapRegion* hr = _g1h->heap_region_containing(obj);
5224       assert(!hr->is_continues_humongous(),
5225              "trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5226              " starting at " HR_FORMAT,
5227              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5228 
5229       // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
5230       hr->add_strong_code_root_locked(_nm);
5231     }
5232   }
5233 
5234 public:
5235   RegisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
5236     _g1h(g1h), _nm(nm) {}
5237 
5238   void do_oop(oop* p)       { do_oop_work(p); }
5239   void do_oop(narrowOop* p) { do_oop_work(p); }
5240 };
5241 
5242 class UnregisterNMethodOopClosure: public OopClosure {
5243   G1CollectedHeap* _g1h;
5244   nmethod* _nm;
5245 
5246   template <class T> void do_oop_work(T* p) {
5247     T heap_oop = RawAccess<>::oop_load(p);
5248     if (!CompressedOops::is_null(heap_oop)) {
5249       oop obj = CompressedOops::decode_not_null(heap_oop);
5250       HeapRegion* hr = _g1h->heap_region_containing(obj);
5251       assert(!hr->is_continues_humongous(),
5252              "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
5253              " starting at " HR_FORMAT,
5254              p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
5255 
5256       hr->remove_strong_code_root(_nm);
5257     }
5258   }
5259 
5260 public:
5261   UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) :
5262     _g1h(g1h), _nm(nm) {}
5263 
5264   void do_oop(oop* p)       { do_oop_work(p); }
5265   void do_oop(narrowOop* p) { do_oop_work(p); }
5266 };
5267 
5268 // Returns true if the reference points to an object that
5269 // can move in an incremental collection.


< prev index next >