< prev index next >

src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp

Print this page
rev 56323 : imported patch 8220310.mut.0
rev 56326 : [mq]: 8220310.mut.1-3_kim
   1 /*
   2  * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "gc/g1/g1PageBasedVirtualSpace.hpp"
  27 #include "gc/shared/workgroup.hpp"
  28 #include "oops/markWord.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "runtime/atomic.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/bitMap.inline.hpp"
  35 
  36 G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) :
  37   _low_boundary(NULL), _high_boundary(NULL), _tail_size(0), _page_size(0),
  38   _committed(mtGC), _dirty(mtGC), _special(false), _executable(false) {
  39   initialize_with_page_size(rs, used_size, page_size);
  40 }
  41 
  42 void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) {
  43   guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");
  44 
  45   vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
  46   vmassert(page_size > 0, "Page size must be non-zero.");
  47 
  48   guarantee(is_aligned(rs.base(), page_size),
  49             "Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size);
  50   guarantee(is_aligned(used_size, os::vm_page_size()),
  51             "Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size);
  52   guarantee(used_size <= rs.size(),
  53             "Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size());
  54   guarantee(is_aligned(rs.size(), page_size),
  55             "Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size);
  56 
  57   _low_boundary  = rs.base();
  58   _high_boundary = _low_boundary + used_size;
  59 
  60   _special = rs.special();
  61   _executable = rs.executable();
  62 
  63   _page_size = page_size;
  64 
  65   vmassert(_committed.size() == 0, "virtual space initialized more than once");
  66   BitMap::idx_t size_in_pages = rs.size() / page_size;
  67   _committed.initialize(size_in_pages);
  68   if (_special) {
  69     _dirty.initialize(size_in_pages);
  70   }
  71 
  72   _tail_size = used_size % _page_size;







  73 }
  74 
  75 G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
  76   // This does not release memory it never reserved.
  77   // Caller must release via rs.release();
  78   _low_boundary           = NULL;
  79   _high_boundary          = NULL;
  80   _special                = false;
  81   _executable             = false;
  82   _page_size              = 0;
  83   _tail_size              = 0;

  84 }
  85 
  86 size_t G1PageBasedVirtualSpace::committed_size() const {
  87   size_t result = _committed.count_one_bits() * _page_size;
  88   // The last page might not be in full.
  89   if (is_last_page_partial() && _committed.at(_committed.size() - 1)) {
  90     result -= _page_size - _tail_size;
  91   }
  92   return result;
  93 }
  94 
  95 size_t G1PageBasedVirtualSpace::reserved_size() const {
  96   return pointer_delta(_high_boundary, _low_boundary, sizeof(char));
  97 }
  98 
  99 size_t G1PageBasedVirtualSpace::uncommitted_size()  const {
 100   return reserved_size() - committed_size();
 101 }
 102 
 103 void G1PageBasedVirtualSpace::commit_and_set_special() {


 172   if (pages > 0) {
 173     commit_preferred_pages(start_page, pages);
 174   }
 175 
 176   if (need_to_commit_tail) {
 177     commit_tail();
 178   }
 179 }
 180 
 181 char* G1PageBasedVirtualSpace::bounded_end_addr(size_t end_page) const {
 182   return MIN2(_high_boundary, page_start(end_page));
 183 }
 184 
 185 void G1PageBasedVirtualSpace::pretouch_internal(size_t start_page, size_t end_page) {
 186   guarantee(start_page < end_page,
 187             "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page);
 188 
 189   os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page), _page_size);
 190 }
 191 
 192 bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages) {
 193   // We need to make sure to commit all pages covered by the given area.
 194   guarantee(is_area_uncommitted(start_page, size_in_pages), "Specified area is not uncommitted");
 195 
 196   bool zero_filled = true;
 197   size_t end_page = start_page + size_in_pages;
 198 
 199   if (_special) {
 200     // Check for dirty pages and update zero_filled if any found.
 201     if (_dirty.get_next_one_offset(start_page, end_page) < end_page) {
 202       zero_filled = false;
 203       _dirty.clear_range(start_page, end_page);
 204     }
 205   } else {
 206     commit_internal(start_page, end_page);
 207   }
 208   _committed.set_range(start_page, end_page);






 209 
 210   return zero_filled;
 211 }
 212 
 213 void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) {
 214   guarantee(start_page < end_page,
 215             "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page);
 216 
 217   char* start_addr = page_start(start_page);
 218   os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)));
 219 }
 220 
 221 void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) {
 222   guarantee(is_area_committed(start_page, size_in_pages), "checking");
 223 
 224   size_t end_page = start_page + size_in_pages;
 225   if (_special) {
 226     // Mark that memory is dirty. If committed again the memory might
 227     // need to be cleared explicitly.
 228     _dirty.set_range(start_page, end_page);


   1 /*
   2  * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1NUMA.inline.hpp"
  27 #include "gc/g1/g1PageBasedVirtualSpace.hpp"
  28 #include "gc/shared/workgroup.hpp"
  29 #include "oops/markWord.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/atomic.hpp"
  32 #include "runtime/os.inline.hpp"
  33 #include "services/memTracker.hpp"
  34 #include "utilities/align.hpp"
  35 #include "utilities/bitMap.inline.hpp"
  36 
  37 G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size, MemoryType type) :
  38   _low_boundary(NULL), _high_boundary(NULL), _tail_size(0), _page_size(0),
  39   _committed(mtGC), _dirty(mtGC), _special(false), _executable(false), _numa(NULL) {
  40   initialize_with_page_size(rs, used_size, page_size, type);
  41 }
  42 
  43 void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size, MemoryType type) {
  44   guarantee(rs.is_reserved(), "Given reserved space must have been reserved already.");
  45 
  46   vmassert(_low_boundary == NULL, "VirtualSpace already initialized");
  47   vmassert(page_size > 0, "Page size must be non-zero.");
  48 
  49   guarantee(is_aligned(rs.base(), page_size),
  50             "Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size);
  51   guarantee(is_aligned(used_size, os::vm_page_size()),
  52             "Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size);
  53   guarantee(used_size <= rs.size(),
  54             "Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size());
  55   guarantee(is_aligned(rs.size(), page_size),
  56             "Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size);
  57 
  58   _low_boundary  = rs.base();
  59   _high_boundary = _low_boundary + used_size;
  60 
  61   _special = rs.special();
  62   _executable = rs.executable();
  63 
  64   _page_size = page_size;
  65 
  66   vmassert(_committed.size() == 0, "virtual space initialized more than once");
  67   BitMap::idx_t size_in_pages = rs.size() / page_size;
  68   _committed.initialize(size_in_pages);
  69   if (_special) {
  70     _dirty.initialize(size_in_pages);
  71   }
  72 
  73   _tail_size = used_size % _page_size;
  74 
  75   // Set _numa only if:
  76   //   1) This space is for java heap.
  77   //   2) There are multiple memory nodes because some OSes allow enabling UseNUMA.
  78   if (type == mtJavaHeap && G1MemoryNodeManager::mgr()->num_active_nodes() > 1) {
  79     _numa = G1NUMA::numa();
  80   }
  81 }
  82 
  83 G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() {
  84   // This does not release memory it never reserved.
  85   // Caller must release via rs.release();
  86   _low_boundary           = NULL;
  87   _high_boundary          = NULL;
  88   _special                = false;
  89   _executable             = false;
  90   _page_size              = 0;
  91   _tail_size              = 0;
  92   _numa                   = NULL;
  93 }
  94 
  95 size_t G1PageBasedVirtualSpace::committed_size() const {
  96   size_t result = _committed.count_one_bits() * _page_size;
  97   // The last page might not be in full.
  98   if (is_last_page_partial() && _committed.at(_committed.size() - 1)) {
  99     result -= _page_size - _tail_size;
 100   }
 101   return result;
 102 }
 103 
 104 size_t G1PageBasedVirtualSpace::reserved_size() const {
 105   return pointer_delta(_high_boundary, _low_boundary, sizeof(char));
 106 }
 107 
 108 size_t G1PageBasedVirtualSpace::uncommitted_size()  const {
 109   return reserved_size() - committed_size();
 110 }
 111 
 112 void G1PageBasedVirtualSpace::commit_and_set_special() {


 181   if (pages > 0) {
 182     commit_preferred_pages(start_page, pages);
 183   }
 184 
 185   if (need_to_commit_tail) {
 186     commit_tail();
 187   }
 188 }
 189 
 190 char* G1PageBasedVirtualSpace::bounded_end_addr(size_t end_page) const {
 191   return MIN2(_high_boundary, page_start(end_page));
 192 }
 193 
 194 void G1PageBasedVirtualSpace::pretouch_internal(size_t start_page, size_t end_page) {
 195   guarantee(start_page < end_page,
 196             "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page);
 197 
 198   os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page), _page_size);
 199 }
 200 
 201 bool G1PageBasedVirtualSpace::commit(size_t start_page, size_t size_in_pages, uint node_index) {
 202   // We need to make sure to commit all pages covered by the given area.
 203   guarantee(is_area_uncommitted(start_page, size_in_pages), "Specified area is not uncommitted");
 204 
 205   bool zero_filled = true;
 206   size_t end_page = start_page + size_in_pages;
 207 
 208   if (_special) {
 209     // Check for dirty pages and update zero_filled if any found.
 210     if (_dirty.get_next_one_offset(start_page, end_page) < end_page) {
 211       zero_filled = false;
 212       _dirty.clear_range(start_page, end_page);
 213     }
 214   } else {
 215     commit_internal(start_page, end_page);
 216   }
 217   _committed.set_range(start_page, end_page);
 218 
 219   if (_numa != NULL) {
 220     char* start_addr = page_start(start_page);
 221     size_t size_in_bytes = size_in_pages * _page_size;
 222     _numa->touch_memory((address)start_addr, size_in_bytes, node_index);
 223   }
 224 
 225   return zero_filled;
 226 }
 227 
 228 void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) {
 229   guarantee(start_page < end_page,
 230             "Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page);
 231 
 232   char* start_addr = page_start(start_page);
 233   os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char)));
 234 }
 235 
 236 void G1PageBasedVirtualSpace::uncommit(size_t start_page, size_t size_in_pages) {
 237   guarantee(is_area_committed(start_page, size_in_pages), "checking");
 238 
 239   size_t end_page = start_page + size_in_pages;
 240   if (_special) {
 241     // Mark that memory is dirty. If committed again the memory might
 242     // need to be cleared explicitly.
 243     _dirty.set_range(start_page, end_page);


< prev index next >