1 /*
   2  * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 
  26 #include "precompiled.hpp"
  27 #include "logging/log.hpp"
  28 #include "logging/logStream.hpp"
  29 #include "memory/metaspace.hpp"
  30 #include "memory/metaspace/chunkManager.hpp"
  31 #include "memory/metaspace/metachunk.hpp"
  32 #include "memory/metaspace/metaspaceCommon.hpp"
  33 #include "memory/metaspace/virtualSpaceList.hpp"
  34 #include "memory/metaspace/virtualSpaceNode.hpp"
  35 #include "runtime/atomic.hpp"
  36 #include "runtime/orderAccess.hpp"
  37 #include "runtime/mutexLocker.hpp"
  38 #include "runtime/safepoint.hpp"
  39 
  40 namespace metaspace {
  41 
  42 
  43 VirtualSpaceList::~VirtualSpaceList() {
  44   VirtualSpaceListIterator iter(virtual_space_list());
  45   while (iter.repeat()) {
  46     VirtualSpaceNode* vsl = iter.get_next();
  47     delete vsl;
  48   }
  49 }
  50 
  51 void VirtualSpaceList::inc_reserved_words(size_t v) {
  52   assert_lock_strong(MetaspaceExpand_lock);
  53   _reserved_words = _reserved_words + v;
  54 }
  55 void VirtualSpaceList::dec_reserved_words(size_t v) {
  56   assert_lock_strong(MetaspaceExpand_lock);
  57   _reserved_words = _reserved_words - v;
  58 }
  59 
  60 #define assert_committed_below_limit()                        \
  61   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
  62          "Too much committed memory. Committed: " SIZE_FORMAT \
  63          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
  64           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
  65 
  66 void VirtualSpaceList::inc_committed_words(size_t v) {
  67   assert_lock_strong(MetaspaceExpand_lock);
  68   _committed_words = _committed_words + v;
  69 
  70   assert_committed_below_limit();
  71 }
  72 void VirtualSpaceList::dec_committed_words(size_t v) {
  73   assert_lock_strong(MetaspaceExpand_lock);
  74   _committed_words = _committed_words - v;
  75 
  76   assert_committed_below_limit();
  77 }
  78 
  79 void VirtualSpaceList::inc_virtual_space_count() {
  80   assert_lock_strong(MetaspaceExpand_lock);
  81   _virtual_space_count++;
  82 }
  83 
  84 void VirtualSpaceList::dec_virtual_space_count() {
  85   assert_lock_strong(MetaspaceExpand_lock);
  86   _virtual_space_count--;
  87 }
  88 
  89 // Walk the list of VirtualSpaceNodes and delete
  90 // nodes with a 0 container_count.  Remove Metachunks in
  91 // the node from their respective freelists.
  92 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
  93   assert_lock_strong(MetaspaceExpand_lock);
  94   // Don't use a VirtualSpaceListIterator because this
  95   // list is being changed and a straightforward use of an iterator is not safe.
  96   VirtualSpaceNode* prev_vsl = virtual_space_list();
  97   VirtualSpaceNode* next_vsl = prev_vsl;
  98   int num_purged_nodes = 0;
  99   while (next_vsl != NULL) {
 100     VirtualSpaceNode* vsl = next_vsl;
 101     DEBUG_ONLY(vsl->verify(false);)
 102     next_vsl = vsl->next();
 103     // Don't free the current virtual space since it will likely
 104     // be needed soon.
 105     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
 106       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
 107                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
 108       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
 109       // Unlink it from the list
 110       if (prev_vsl == vsl) {
 111         // This is the case of the current node being the first node.
 112         assert(vsl == virtual_space_list(), "Expected to be the first node");
 113         set_virtual_space_list(vsl->next());
 114       } else {
 115         prev_vsl->set_next(vsl->next());
 116       }
 117 
 118       vsl->purge(chunk_manager);
 119       dec_reserved_words(vsl->reserved_words());
 120       dec_committed_words(vsl->committed_words());
 121       dec_virtual_space_count();
 122       delete vsl;
 123       num_purged_nodes ++;
 124     } else {
 125       prev_vsl = vsl;
 126     }
 127   }
 128 
 129   // Verify list
 130 #ifdef ASSERT
 131   if (num_purged_nodes > 0) {
 132     verify(false);
 133   }
 134 #endif
 135 }
 136 
 137 
 138 // This function looks at the mmap regions in the metaspace without locking.
 139 // The chunks are added with store ordering and not deleted except for at
 140 // unloading time during a safepoint.
 141 VirtualSpaceNode* VirtualSpaceList::find_enclosing_space(const void* ptr) {
 142   // List should be stable enough to use an iterator here because removing virtual
 143   // space nodes is only allowed at a safepoint.
 144   if (is_within_envelope((address)ptr)) {
 145     VirtualSpaceListIterator iter(virtual_space_list());
 146     while (iter.repeat()) {
 147       VirtualSpaceNode* vsn = iter.get_next();
 148       if (vsn->contains(ptr)) {
 149         return vsn;
 150       }
 151     }
 152   }
 153   return NULL;
 154 }
 155 
 156 void VirtualSpaceList::retire_current_virtual_space() {
 157   assert_lock_strong(MetaspaceExpand_lock);
 158 
 159   VirtualSpaceNode* vsn = current_virtual_space();
 160 
 161   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
 162                                   Metaspace::chunk_manager_metadata();
 163 
 164   vsn->retire(cm);
 165 }
 166 
 167 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
 168                                    _virtual_space_list(NULL),
 169                                    _current_virtual_space(NULL),
 170                                    _is_class(false),
 171                                    _reserved_words(0),
 172                                    _committed_words(0),
 173                                    _virtual_space_count(0),
 174                                    _envelope_lo((address)max_uintx),
 175                                    _envelope_hi(NULL) {
 176   MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 177   create_new_virtual_space(word_size);
 178 }
 179 
 180 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
 181                                    _virtual_space_list(NULL),
 182                                    _current_virtual_space(NULL),
 183                                    _is_class(true),
 184                                    _reserved_words(0),
 185                                    _committed_words(0),
 186                                    _virtual_space_count(0),
 187                                    _envelope_lo((address)max_uintx),
 188                                    _envelope_hi(NULL) {
 189   MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 190   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
 191   bool succeeded = class_entry->initialize();
 192   if (succeeded) {
 193     expand_envelope_to_include_node(class_entry);
 194     // ensure lock-free iteration sees fully initialized node
 195     OrderAccess::storestore();
 196     link_vs(class_entry);
 197   }
 198 }
 199 
 200 size_t VirtualSpaceList::free_bytes() {
 201   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
 202 }
 203 
 204 // Allocate another meta virtual space and add it to the list.
 205 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
 206   assert_lock_strong(MetaspaceExpand_lock);
 207 
 208   if (is_class()) {
 209     assert(false, "We currently don't support more than one VirtualSpace for"
 210                   " the compressed class space. The initialization of the"
 211                   " CCS uses another code path and should not hit this path.");
 212     return false;
 213   }
 214 
 215   if (vs_word_size == 0) {
 216     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
 217     return false;
 218   }
 219 
 220   // Reserve the space
 221   size_t vs_byte_size = vs_word_size * BytesPerWord;
 222   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
 223 
 224   // Allocate the meta virtual space and initialize it.
 225   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
 226   if (!new_entry->initialize()) {
 227     delete new_entry;
 228     return false;
 229   } else {
 230     assert(new_entry->reserved_words() == vs_word_size,
 231         "Reserved memory size differs from requested memory size");
 232     expand_envelope_to_include_node(new_entry);
 233     // ensure lock-free iteration sees fully initialized node
 234     OrderAccess::storestore();
 235     link_vs(new_entry);
 236     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
 237     return true;
 238   }
 239 
 240   DEBUG_ONLY(verify(false);)
 241 
 242 }
 243 
 244 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
 245   if (virtual_space_list() == NULL) {
 246       set_virtual_space_list(new_entry);
 247   } else {
 248     current_virtual_space()->set_next(new_entry);
 249   }
 250   set_current_virtual_space(new_entry);
 251   inc_reserved_words(new_entry->reserved_words());
 252   inc_committed_words(new_entry->committed_words());
 253   inc_virtual_space_count();
 254 #ifdef ASSERT
 255   new_entry->mangle();
 256 #endif
 257   LogTarget(Trace, gc, metaspace) lt;
 258   if (lt.is_enabled()) {
 259     LogStream ls(lt);
 260     VirtualSpaceNode* vsl = current_virtual_space();
 261     ResourceMark rm;
 262     vsl->print_on(&ls);
 263   }
 264 }
 265 
 266 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
 267                                       size_t min_words,
 268                                       size_t preferred_words) {
 269   size_t before = node->committed_words();
 270 
 271   bool result = node->expand_by(min_words, preferred_words);
 272 
 273   size_t after = node->committed_words();
 274 
 275   // after and before can be the same if the memory was pre-committed.
 276   assert(after >= before, "Inconsistency");
 277   inc_committed_words(after - before);
 278 
 279   return result;
 280 }
 281 
 282 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
 283   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
 284   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
 285   assert(min_words <= preferred_words, "Invalid arguments");
 286 
 287   const char* const class_or_not = (is_class() ? "class" : "non-class");
 288 
 289   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
 290     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
 291               class_or_not);
 292     return  false;
 293   }
 294 
 295   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
 296   if (allowed_expansion_words < min_words) {
 297     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
 298               class_or_not);
 299     return false;
 300   }
 301 
 302   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
 303 
 304   // Commit more memory from the the current virtual space.
 305   bool vs_expanded = expand_node_by(current_virtual_space(),
 306                                     min_words,
 307                                     max_expansion_words);
 308   if (vs_expanded) {
 309      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
 310                class_or_not);
 311      return true;
 312   }
 313   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
 314             class_or_not);
 315   retire_current_virtual_space();
 316 
 317   // Get another virtual space.
 318   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
 319   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
 320 
 321   if (create_new_virtual_space(grow_vs_words)) {
 322     if (current_virtual_space()->is_pre_committed()) {
 323       // The memory was pre-committed, so we are done here.
 324       assert(min_words <= current_virtual_space()->committed_words(),
 325           "The new VirtualSpace was pre-committed, so it"
 326           "should be large enough to fit the alloc request.");
 327       return true;
 328     }
 329 
 330     return expand_node_by(current_virtual_space(),
 331                           min_words,
 332                           max_expansion_words);
 333   }
 334 
 335   return false;
 336 }
 337 
 338 // Given a chunk, calculate the largest possible padding space which
 339 // could be required when allocating it.
 340 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
 341   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
 342   if (chunk_type != HumongousIndex) {
 343     // Normal, non-humongous chunks are allocated at chunk size
 344     // boundaries, so the largest padding space required would be that
 345     // minus the smallest chunk size.
 346     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 347     return chunk_word_size - smallest_chunk_size;
 348   } else {
 349     // Humongous chunks are allocated at smallest-chunksize
 350     // boundaries, so there is no padding required.
 351     return 0;
 352   }
 353 }
 354 
 355 
 356 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
 357 
 358   // Allocate a chunk out of the current virtual space.
 359   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
 360 
 361   if (next != NULL) {
 362     return next;
 363   }
 364 
 365   // The expand amount is currently only determined by the requested sizes
 366   // and not how much committed memory is left in the current virtual space.
 367 
 368   // We must have enough space for the requested size and any
 369   // additional reqired padding chunks.
 370   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
 371 
 372   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
 373   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
 374   if (min_word_size >= preferred_word_size) {
 375     // Can happen when humongous chunks are allocated.
 376     preferred_word_size = min_word_size;
 377   }
 378 
 379   bool expanded = expand_by(min_word_size, preferred_word_size);
 380   if (expanded) {
 381     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
 382     assert(next != NULL, "The allocation was expected to succeed after the expansion");
 383   }
 384 
 385    return next;
 386 }
 387 
 388 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
 389   st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
 390       _virtual_space_count, p2i(_current_virtual_space));
 391   VirtualSpaceListIterator iter(virtual_space_list());
 392   while (iter.repeat()) {
 393     st->cr();
 394     VirtualSpaceNode* node = iter.get_next();
 395     node->print_on(st, scale);
 396   }
 397 }
 398 
 399 void VirtualSpaceList::print_map(outputStream* st) const {
 400   VirtualSpaceNode* list = virtual_space_list();
 401   VirtualSpaceListIterator iter(list);
 402   unsigned i = 0;
 403   while (iter.repeat()) {
 404     st->print_cr("Node %u:", i);
 405     VirtualSpaceNode* node = iter.get_next();
 406     node->print_map(st, this->is_class());
 407     i ++;
 408   }
 409 }
 410 
 411 // Given a node, expand range such that it includes the node.
 412 void VirtualSpaceList::expand_envelope_to_include_node(const VirtualSpaceNode* node) {
 413   _envelope_lo = MIN2(_envelope_lo, (address)node->low_boundary());
 414   _envelope_hi = MAX2(_envelope_hi, (address)node->high_boundary());
 415 }
 416 
 417 
 418 #ifdef ASSERT
 419 void VirtualSpaceList::verify(bool slow) {
 420   VirtualSpaceNode* list = virtual_space_list();
 421   VirtualSpaceListIterator iter(list);
 422   size_t reserved = 0;
 423   size_t committed = 0;
 424   size_t node_count = 0;
 425   while (iter.repeat()) {
 426     VirtualSpaceNode* node = iter.get_next();
 427     if (slow) {
 428       node->verify(true);
 429     }
 430     // Check that the node resides fully within our envelope.
 431     assert((address)node->low_boundary() >= _envelope_lo && (address)node->high_boundary() <= _envelope_hi,
 432            "Node " SIZE_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT ") outside envelope [" PTR_FORMAT ", " PTR_FORMAT ").",
 433            node_count, p2i(node->low_boundary()), p2i(node->high_boundary()), p2i(_envelope_lo), p2i(_envelope_hi));
 434     reserved += node->reserved_words();
 435     committed += node->committed_words();
 436     node_count ++;
 437   }
 438   assert(reserved == reserved_words() && committed == committed_words() && node_count == _virtual_space_count,
 439       "Mismatch: reserved real: " SIZE_FORMAT " expected: " SIZE_FORMAT
 440       ", committed real: " SIZE_FORMAT " expected: " SIZE_FORMAT
 441       ", node count real: " SIZE_FORMAT " expected: " SIZE_FORMAT ".",
 442       reserved, reserved_words(), committed, committed_words(),
 443       node_count, _virtual_space_count);
 444 }
 445 #endif // ASSERT
 446 
 447 } // namespace metaspace