src/share/vm/gc_implementation/g1/heapRegionSets.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hs-gc9 Sdiff src/share/vm/gc_implementation/g1

src/share/vm/gc_implementation/g1/heapRegionSets.cpp

Print this page




   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  27 #include "gc_implementation/g1/heapRegionSets.hpp"
  28 
  29 // Note on the check_mt_safety() methods below:
  30 //
  31 // Verification of the "master" heap region sets / lists that are
  32 // maintained by G1CollectedHeap is always done during a STW pause and
  33 // by the VM thread at the start / end of the pause. The standard
  34 // verification methods all assert check_mt_safety(). This is
  35 // important as it ensures that verification is done without
  36 // concurrent updates taking place at the same time. It follows, that,
  37 // for the "master" heap region sets / lists, the check_mt_safety()
  38 // method should include the VM thread / STW case.
  39 
  40 //////////////////// FreeRegionList ////////////////////
  41 
  42 const char* FreeRegionList::verify_region_extra(HeapRegion* hr) {
  43   if (hr->is_young()) {
  44     return "the region should not be young";
  45   }
  46   // The superclass will check that the region is empty and
  47   // not humongous.
  48   return HeapRegionLinkedList::verify_region_extra(hr);


















  49 }
  50 
  51 //////////////////// MasterFreeRegionList ////////////////////
  52 
  53 const char* MasterFreeRegionList::verify_region_extra(HeapRegion* hr) {
  54   // We should reset the RSet for parallel iteration before we add it
  55   // to the master free list so that it is ready when the region is
  56   // re-allocated.
  57   if (!hr->rem_set()->verify_ready_for_par_iteration()) {
  58     return "the region's RSet should be ready for parallel iteration";
  59   }
  60   return FreeRegionList::verify_region_extra(hr);
  61 }
  62 
  63 bool MasterFreeRegionList::check_mt_safety() {
  64   // Master Free List MT safety protocol:
  65   // (a) If we're at a safepoint, operations on the master free list
  66   // should be invoked by either the VM thread (which will serialize
  67   // them) or by the GC workers while holding the
  68   // FreeList_lock.
  69   // (b) If we're not at a safepoint, operations on the master free
  70   // list should be invoked while holding the Heap_lock.
  71 
  72   if (SafepointSynchronize::is_at_safepoint()) {
  73     guarantee(Thread::current()->is_VM_thread() ||
  74               FreeList_lock->owned_by_self(),
  75               hrs_ext_msg(this, "master free list MT safety protocol "
  76                                 "at a safepoint"));
  77   } else {
  78     guarantee(Heap_lock->owned_by_self(),
  79               hrs_ext_msg(this, "master free list MT safety protocol "
  80                                 "outside a safepoint"));
  81   }
  82 
  83   return FreeRegionList::check_mt_safety();
  84 }
  85 
  86 //////////////////// SecondaryFreeRegionList ////////////////////
  87 
  88 bool SecondaryFreeRegionList::check_mt_safety() {
  89   // Secondary Free List MT safety protocol:
  90   // Operations on the secondary free list should always be invoked
  91   // while holding the SecondaryFreeList_lock.
  92 
  93   guarantee(SecondaryFreeList_lock->owned_by_self(),
  94             hrs_ext_msg(this, "secondary free list MT safety protocol"));
  95 
  96   return FreeRegionList::check_mt_safety();
  97 }
  98 
  99 //////////////////// OldRegionSet ////////////////////
 100 
 101 const char* OldRegionSet::verify_region_extra(HeapRegion* hr) {
 102   if (hr->is_young()) {
 103     return "the region should not be young";
 104   }
 105   // The superclass will check that the region is not empty and not
 106   // humongous.
 107   return HeapRegionSet::verify_region_extra(hr);
 108 }
 109 
 110 //////////////////// MasterOldRegionSet ////////////////////
 111 
 112 bool MasterOldRegionSet::check_mt_safety() {
 113   // Master Old Set MT safety protocol:
 114   // (a) If we're at a safepoint, operations on the master old set
 115   // should be invoked:
 116   // - by the VM thread (which will serialize them), or
 117   // - by the GC workers while holding the FreeList_lock, if we're
 118   //   at a safepoint for an evacuation pause (this lock is taken
 119   //   anyway when an GC alloc region is retired so that a new one
 120   //   is allocated from the free list), or
 121   // - by the GC workers while holding the OldSets_lock, if we're at a
 122   //   safepoint for a cleanup pause.
 123   // (b) If we're not at a safepoint, operations on the master old set
 124   // should be invoked while holding the Heap_lock.
 125 
 126   if (SafepointSynchronize::is_at_safepoint()) {
 127     guarantee(Thread::current()->is_VM_thread() ||
 128               _phase == HRSPhaseEvacuation && FreeList_lock->owned_by_self() ||
 129               _phase == HRSPhaseCleanup && OldSets_lock->owned_by_self(),
 130               hrs_ext_msg(this, "master old set MT safety protocol "
 131                                 "at a safepoint"));
 132   } else {
 133     guarantee(Heap_lock->owned_by_self(),
 134               hrs_ext_msg(this, "master old set MT safety protocol "
 135                                 "outside a safepoint"));
 136   }
 137 
 138   return OldRegionSet::check_mt_safety();
 139 }
 140 
 141 //////////////////// HumongousRegionSet ////////////////////
 142 
 143 const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) {
 144   if (hr->is_young()) {
 145     return "the region should not be young";
 146   }
 147   // The superclass will check that the region is not empty and
 148   // humongous.
 149   return HeapRegionSet::verify_region_extra(hr);
 150 }
 151 
 152 //////////////////// MasterHumongousRegionSet ////////////////////
 153 
 154 bool MasterHumongousRegionSet::check_mt_safety() {
 155   // Master Humongous Set MT safety protocol:
 156   // (a) If we're at a safepoint, operations on the master humongous
 157   // set should be invoked by either the VM thread (which will
 158   // serialize them) or by the GC workers while holding the
 159   // OldSets_lock.
 160   // (b) If we're not at a safepoint, operations on the master
 161   // humongous set should be invoked while holding the Heap_lock.
 162 
 163   if (SafepointSynchronize::is_at_safepoint()) {
 164     guarantee(Thread::current()->is_VM_thread() ||
 165               OldSets_lock->owned_by_self(),
 166               hrs_ext_msg(this, "master humongous set MT safety protocol "
 167                                 "at a safepoint"));
 168   } else {
 169     guarantee(Heap_lock->owned_by_self(),
 170               hrs_ext_msg(this, "master humongous set MT safety protocol "
 171                                 "outside a safepoint"));
 172   }
 173 
 174   return HumongousRegionSet::check_mt_safety();
 175 }


   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/heapRegionRemSet.hpp"
  27 #include "gc_implementation/g1/heapRegionSet.hpp"
  28 
  29 // Note on the check_mt_safety() methods below:
  30 //
  31 // Verification of the "master" heap region sets / lists that are
  32 // maintained by G1CollectedHeap is always done during a STW pause and
  33 // by the VM thread at the start / end of the pause. The standard
  34 // verification methods all assert check_mt_safety(). This is
  35 // important as it ensures that verification is done without
  36 // concurrent updates taking place at the same time. It follows, that,
  37 // for the "master" heap region sets / lists, the check_mt_safety()
  38 // method should include the VM thread / STW case.
  39 
  40 //////////////////// FreeRegionList ////////////////////
  41 
  42 void FreeRegionList::verify_list() {
  43   HeapRegion* curr = head();
  44   HeapRegion* prev1 = NULL;
  45   HeapRegion* prev0 = NULL;
  46   uint count = 0;
  47   size_t capacity = 0;
  48   while (curr != NULL) {
  49     verify_region(curr);
  50 
  51     count++;
  52     guarantee(count < _unrealistically_long_length,
  53         hrs_err_msg("[%s] the calculated length: %u seems very long, is there maybe a cycle? curr: "PTR_FORMAT" prev0: "PTR_FORMAT" " "prev1: "PTR_FORMAT" length: %u", name(), count, curr, prev0, prev1, length()));
  54 
  55     capacity += curr->capacity();
  56 
  57     prev1 = prev0;
  58     prev0 = curr;
  59     curr = curr->next();
  60   }
  61 
  62   guarantee(tail() == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), tail()->hrs_index(), prev0->hrs_index()));
  63 
  64   guarantee(length() == count, err_msg("%s count mismatch. Expected %u, actual %u.", name(), length(), count));
  65   guarantee(total_capacity_bytes() == capacity, err_msg("%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
  66       name(), total_capacity_bytes(), capacity));
  67 }
  68 
  69 //////////////////// MasterFreeRegionList ////////////////////
  70 
  71 bool MasterFreeRegionListMtSafeChecker::check() {










  72   // Master Free List MT safety protocol:
  73   // (a) If we're at a safepoint, operations on the master free list
  74   // should be invoked by either the VM thread (which will serialize
  75   // them) or by the GC workers while holding the
  76   // FreeList_lock.
  77   // (b) If we're not at a safepoint, operations on the master free
  78   // list should be invoked while holding the Heap_lock.
  79 
  80   if (SafepointSynchronize::is_at_safepoint()) {
  81     guarantee(Thread::current()->is_VM_thread() ||
  82               FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint");


  83   } else {
  84     guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint");


  85   }
  86 
  87   return true;
  88 }
  89 
  90 //////////////////// SecondaryFreeRegionList ////////////////////
  91 
  92 bool SecondaryFreeRegionListMtSafeChecker::check() {
  93   // Secondary Free List MT safety protocol:
  94   // Operations on the secondary free list should always be invoked
  95   // while holding the SecondaryFreeList_lock.
  96 
  97   guarantee(SecondaryFreeList_lock->owned_by_self(), "secondary free list MT safety protocol");

  98 
  99   return true;
 100 }
 101 
 102 //////////////////// OldRegionSet ////////////////////
 103 
 104 bool OldRegionSetMtSafeChecker::check() {











 105   // Master Old Set MT safety protocol:
 106   // (a) If we're at a safepoint, operations on the master old set
 107   // should be invoked:
 108   // - by the VM thread (which will serialize them), or
 109   // - by the GC workers while holding the FreeList_lock, if we're
 110   //   at a safepoint for an evacuation pause (this lock is taken
 111   //   anyway when an GC alloc region is retired so that a new one
 112   //   is allocated from the free list), or
 113   // - by the GC workers while holding the OldSets_lock, if we're at a
 114   //   safepoint for a cleanup pause.
 115   // (b) If we're not at a safepoint, operations on the master old set
 116   // should be invoked while holding the Heap_lock.
 117 
 118   if (SafepointSynchronize::is_at_safepoint()) {
 119     guarantee(Thread::current()->is_VM_thread()
 120         || FreeList_lock->owned_by_self() || OldSets_lock->owned_by_self(),
 121         "master old set MT safety protocol at a safepoint");


 122   } else {
 123     guarantee(Heap_lock->owned_by_self(), "master old set MT safety protocol outside a safepoint");


 124   }
 125 
 126   return true;
 127 }
 128 
 129 //////////////////// HumongousRegionSet ////////////////////
 130 
 131 bool HumongousRegionSetMtSafeChecker::check() {
 132   // Humongous Set MT safety protocol:











 133   // (a) If we're at a safepoint, operations on the master humongous
 134   // set should be invoked by either the VM thread (which will
 135   // serialize them) or by the GC workers while holding the
 136   // OldSets_lock.
 137   // (b) If we're not at a safepoint, operations on the master
 138   // humongous set should be invoked while holding the Heap_lock.
 139 
 140   if (SafepointSynchronize::is_at_safepoint()) {
 141     guarantee(Thread::current()->is_VM_thread() ||
 142               OldSets_lock->owned_by_self(),
 143               "master humongous set MT safety protocol at a safepoint");

 144   } else {
 145     guarantee(Heap_lock->owned_by_self(),
 146               "master humongous set MT safety protocol outside a safepoint");

 147   }
 148 
 149   return true;
 150 }
src/share/vm/gc_implementation/g1/heapRegionSets.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File