< prev index next >

src/hotspot/share/gc/g1/heapRegionManager.cpp

Print this page
rev 56821 : imported patch 8220310.mut.0
rev 56822 : imported patch 8220310.mut.1
rev 56823 : imported patch 8220310.mut.2
rev 56824 : imported patch 8220310.mut.3
rev 56825 : imported patch 8220310.mut.4
rev 56834 : imported patch 8220312.stat.2
rev 56836 : imported patch 8220312.stat.4
rev 56838 : [mq]: 8220312.stat.5


   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Arguments.hpp"
  27 #include "gc/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc/g1/g1ConcurrentRefine.hpp"

  29 #include "gc/g1/heapRegion.hpp"
  30 #include "gc/g1/heapRegionManager.inline.hpp"
  31 #include "gc/g1/heapRegionSet.inline.hpp"
  32 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
  33 #include "logging/logStream.hpp"
  34 #include "memory/allocation.hpp"
  35 #include "utilities/bitMap.inline.hpp"
  36 
  37 class MasterFreeRegionListChecker : public HeapRegionSetChecker {
  38 public:
  39   void check_mt_safety() {
  40     // Master Free List MT safety protocol:
  41     // (a) If we're at a safepoint, operations on the master free list
  42     // should be invoked by either the VM thread (which will serialize
  43     // them) or by the GC workers while holding the
  44     // FreeList_lock.
  45     // (b) If we're not at a safepoint, operations on the master free
  46     // list should be invoked while holding the Heap_lock.
  47 
  48     if (SafepointSynchronize::is_at_safepoint()) {


  90   _next_bitmap_mapper = next_bitmap;
  91 
  92   _bot_mapper = bot;
  93   _cardtable_mapper = cardtable;
  94 
  95   _card_counts_mapper = card_counts;
  96 
  97   MemRegion reserved = heap_storage->reserved();
  98   _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
  99 
 100   _available_map.initialize(_regions.length());
 101 }
 102 
 103 bool HeapRegionManager::is_available(uint region) const {
 104   return _available_map.at(region);
 105 }
 106 
 107 HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
 108   HeapRegion* hr = NULL;
 109   bool from_head = !type.is_young();

 110 
 111   if (requested_node_index != G1NUMA::AnyNodeIndex && G1NUMA::numa()->is_enabled()) {
 112     // Try to allocate with requested node index.
 113     hr = _free_list.remove_region_with_node_index(from_head, requested_node_index, NULL);
 114   }
 115 
 116   if (hr == NULL) {
 117     // If there's a single active node or we did not get a region from our requested node,
 118     // try without requested node index.
 119     hr = _free_list.remove_region(from_head);
 120   }
 121 
 122   if (hr != NULL) {
 123     assert(hr->next() == NULL, "Single region should not have next");
 124     assert(is_available(hr->hrm_index()), "Must be committed");




 125   }
 126 
 127   return hr;
 128 }
 129 
 130 #ifdef ASSERT
 131 bool HeapRegionManager::is_free(HeapRegion* hr) const {
 132   return _free_list.contains(hr);
 133 }
 134 #endif
 135 
 136 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
 137   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 138   HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
 139   MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
 140   assert(reserved().contains(mr), "invariant");
 141   return g1h->new_heap_region(hrm_index, mr);
 142 }
 143 
 144 void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) {




   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Arguments.hpp"
  27 #include "gc/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc/g1/g1ConcurrentRefine.hpp"
  29 #include "gc/g1/g1NUMAStats.hpp"
  30 #include "gc/g1/heapRegion.hpp"
  31 #include "gc/g1/heapRegionManager.inline.hpp"
  32 #include "gc/g1/heapRegionSet.inline.hpp"
  33 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
  34 #include "logging/logStream.hpp"
  35 #include "memory/allocation.hpp"
  36 #include "utilities/bitMap.inline.hpp"
  37 
  38 class MasterFreeRegionListChecker : public HeapRegionSetChecker {
  39 public:
  40   void check_mt_safety() {
  41     // Master Free List MT safety protocol:
  42     // (a) If we're at a safepoint, operations on the master free list
  43     // should be invoked by either the VM thread (which will serialize
  44     // them) or by the GC workers while holding the
  45     // FreeList_lock.
  46     // (b) If we're not at a safepoint, operations on the master free
  47     // list should be invoked while holding the Heap_lock.
  48 
  49     if (SafepointSynchronize::is_at_safepoint()) {


  91   _next_bitmap_mapper = next_bitmap;
  92 
  93   _bot_mapper = bot;
  94   _cardtable_mapper = cardtable;
  95 
  96   _card_counts_mapper = card_counts;
  97 
  98   MemRegion reserved = heap_storage->reserved();
  99   _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes);
 100 
 101   _available_map.initialize(_regions.length());
 102 }
 103 
 104 bool HeapRegionManager::is_available(uint region) const {
 105   return _available_map.at(region);
 106 }
 107 
 108 HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
 109   HeapRegion* hr = NULL;
 110   bool from_head = !type.is_young();
 111   G1NUMA* numa = G1NUMA::numa();
 112 
 113   if (requested_node_index != G1NUMA::AnyNodeIndex && numa->is_enabled()) {
 114     // Try to allocate with requested node index.
 115     hr = _free_list.remove_region_with_node_index(from_head, requested_node_index);
 116   }
 117 
 118   if (hr == NULL) {
 119     // If there's a single active node or we did not get a region from our requested node,
 120     // try without requested node index.
 121     hr = _free_list.remove_region(from_head);
 122   }
 123 
 124   if (hr != NULL) {
 125     assert(hr->next() == NULL, "Single region should not have next");
 126     assert(is_available(hr->hrm_index()), "Must be committed");
 127 
 128     if (numa->is_enabled() && hr->node_index() < numa->num_active_nodes()) {
 129       numa->update_statistics(G1NUMAStats::NewRegionAlloc, requested_node_index, hr->node_index());
 130     }
 131   }
 132 
 133   return hr;
 134 }
 135 
 136 #ifdef ASSERT
 137 bool HeapRegionManager::is_free(HeapRegion* hr) const {
 138   return _free_list.contains(hr);
 139 }
 140 #endif
 141 
 142 HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) {
 143   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 144   HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index);
 145   MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
 146   assert(reserved().contains(mr), "invariant");
 147   return g1h->new_heap_region(hrm_index, mr);
 148 }
 149 
 150 void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) {


< prev index next >