1 /*
   2  * virtualSpaceList.cpp
   3  *
   4  *  Created on: May 6, 2018
   5  *      Author: thomas
   6  */
   7 
   8 
   9 #include "precompiled.hpp"
  10 #include "logging/log.hpp"
  11 #include "logging/logStream.hpp"
  12 #include "memory/metaspace.hpp"
  13 #include "memory/metaspace/chunkManager.hpp"
  14 #include "memory/metaspace/metachunk.hpp"
  15 #include "memory/metaspace/metaspaceCommon.hpp"
  16 #include "memory/metaspace/virtualSpaceList.hpp"
  17 #include "memory/metaspace/virtualSpaceNode.hpp"
  18 #include "runtime/orderAccess.inline.hpp"
  19 #include "runtime/mutexLocker.hpp"
  20 #include "runtime/safepoint.hpp"
  21 
  22 namespace metaspace {
  23 namespace internals {
  24 
  25 
  26 VirtualSpaceList::~VirtualSpaceList() {
  27   VirtualSpaceListIterator iter(virtual_space_list());
  28   while (iter.repeat()) {
  29     VirtualSpaceNode* vsl = iter.get_next();
  30     delete vsl;
  31   }
  32 }
  33 
  34 void VirtualSpaceList::inc_reserved_words(size_t v) {
  35   assert_lock_strong(MetaspaceExpand_lock);
  36   _reserved_words = _reserved_words + v;
  37 }
  38 void VirtualSpaceList::dec_reserved_words(size_t v) {
  39   assert_lock_strong(MetaspaceExpand_lock);
  40   _reserved_words = _reserved_words - v;
  41 }
  42 
  43 #define assert_committed_below_limit()                        \
  44   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
  45          "Too much committed memory. Committed: " SIZE_FORMAT \
  46          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
  47           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
  48 
  49 void VirtualSpaceList::inc_committed_words(size_t v) {
  50   assert_lock_strong(MetaspaceExpand_lock);
  51   _committed_words = _committed_words + v;
  52 
  53   assert_committed_below_limit();
  54 }
  55 void VirtualSpaceList::dec_committed_words(size_t v) {
  56   assert_lock_strong(MetaspaceExpand_lock);
  57   _committed_words = _committed_words - v;
  58 
  59   assert_committed_below_limit();
  60 }
  61 
  62 void VirtualSpaceList::inc_virtual_space_count() {
  63   assert_lock_strong(MetaspaceExpand_lock);
  64   _virtual_space_count++;
  65 }
  66 
  67 void VirtualSpaceList::dec_virtual_space_count() {
  68   assert_lock_strong(MetaspaceExpand_lock);
  69   _virtual_space_count--;
  70 }
  71 
  72 // Walk the list of VirtualSpaceNodes and delete
  73 // nodes with a 0 container_count.  Remove Metachunks in
  74 // the node from their respective freelists.
  75 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
  76   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
  77   assert_lock_strong(MetaspaceExpand_lock);
  78   // Don't use a VirtualSpaceListIterator because this
  79   // list is being changed and a straightforward use of an iterator is not safe.
  80   VirtualSpaceNode* purged_vsl = NULL;
  81   VirtualSpaceNode* prev_vsl = virtual_space_list();
  82   VirtualSpaceNode* next_vsl = prev_vsl;
  83   while (next_vsl != NULL) {
  84     VirtualSpaceNode* vsl = next_vsl;
  85     DEBUG_ONLY(vsl->verify_container_count();)
  86     next_vsl = vsl->next();
  87     // Don't free the current virtual space since it will likely
  88     // be needed soon.
  89     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
  90       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
  91                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
  92       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
  93       // Unlink it from the list
  94       if (prev_vsl == vsl) {
  95         // This is the case of the current node being the first node.
  96         assert(vsl == virtual_space_list(), "Expected to be the first node");
  97         set_virtual_space_list(vsl->next());
  98       } else {
  99         prev_vsl->set_next(vsl->next());
 100       }
 101 
 102       vsl->purge(chunk_manager);
 103       dec_reserved_words(vsl->reserved_words());
 104       dec_committed_words(vsl->committed_words());
 105       dec_virtual_space_count();
 106       purged_vsl = vsl;
 107       delete vsl;
 108     } else {
 109       prev_vsl = vsl;
 110     }
 111   }
 112 #ifdef ASSERT
 113   if (purged_vsl != NULL) {
 114     // List should be stable enough to use an iterator here.
 115     VirtualSpaceListIterator iter(virtual_space_list());
 116     while (iter.repeat()) {
 117       VirtualSpaceNode* vsl = iter.get_next();
 118       assert(vsl != purged_vsl, "Purge of vsl failed");
 119     }
 120   }
 121 #endif
 122 }
 123 
 124 
 125 // This function looks at the mmap regions in the metaspace without locking.
 126 // The chunks are added with store ordering and not deleted except for at
 127 // unloading time during a safepoint.
 128 bool VirtualSpaceList::contains(const void* ptr) {
 129   // List should be stable enough to use an iterator here because removing virtual
 130   // space nodes is only allowed at a safepoint.
 131   VirtualSpaceListIterator iter(virtual_space_list());
 132   while (iter.repeat()) {
 133     VirtualSpaceNode* vsn = iter.get_next();
 134     if (vsn->contains(ptr)) {
 135       return true;
 136     }
 137   }
 138   return false;
 139 }
 140 
 141 void VirtualSpaceList::retire_current_virtual_space() {
 142   assert_lock_strong(MetaspaceExpand_lock);
 143 
 144   VirtualSpaceNode* vsn = current_virtual_space();
 145 
 146   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
 147                                   Metaspace::chunk_manager_metadata();
 148 
 149   vsn->retire(cm);
 150 }
 151 
 152 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
 153                                    _is_class(false),
 154                                    _virtual_space_list(NULL),
 155                                    _current_virtual_space(NULL),
 156                                    _reserved_words(0),
 157                                    _committed_words(0),
 158                                    _virtual_space_count(0) {
 159   MutexLockerEx cl(MetaspaceExpand_lock,
 160                    Mutex::_no_safepoint_check_flag);
 161   create_new_virtual_space(word_size);
 162 }
 163 
 164 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
 165                                    _is_class(true),
 166                                    _virtual_space_list(NULL),
 167                                    _current_virtual_space(NULL),
 168                                    _reserved_words(0),
 169                                    _committed_words(0),
 170                                    _virtual_space_count(0) {
 171   MutexLockerEx cl(MetaspaceExpand_lock,
 172                    Mutex::_no_safepoint_check_flag);
 173   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
 174   bool succeeded = class_entry->initialize();
 175   if (succeeded) {
 176     link_vs(class_entry);
 177   }
 178 }
 179 
 180 size_t VirtualSpaceList::free_bytes() {
 181   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
 182 }
 183 
 184 // Allocate another meta virtual space and add it to the list.
 185 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
 186   assert_lock_strong(MetaspaceExpand_lock);
 187 
 188   if (is_class()) {
 189     assert(false, "We currently don't support more than one VirtualSpace for"
 190                   " the compressed class space. The initialization of the"
 191                   " CCS uses another code path and should not hit this path.");
 192     return false;
 193   }
 194 
 195   if (vs_word_size == 0) {
 196     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
 197     return false;
 198   }
 199 
 200   // Reserve the space
 201   size_t vs_byte_size = vs_word_size * BytesPerWord;
 202   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
 203 
 204   // Allocate the meta virtual space and initialize it.
 205   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
 206   if (!new_entry->initialize()) {
 207     delete new_entry;
 208     return false;
 209   } else {
 210     assert(new_entry->reserved_words() == vs_word_size,
 211         "Reserved memory size differs from requested memory size");
 212     // ensure lock-free iteration sees fully initialized node
 213     OrderAccess::storestore();
 214     link_vs(new_entry);
 215     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
 216     return true;
 217   }
 218 }
 219 
 220 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
 221   if (virtual_space_list() == NULL) {
 222       set_virtual_space_list(new_entry);
 223   } else {
 224     current_virtual_space()->set_next(new_entry);
 225   }
 226   set_current_virtual_space(new_entry);
 227   inc_reserved_words(new_entry->reserved_words());
 228   inc_committed_words(new_entry->committed_words());
 229   inc_virtual_space_count();
 230 #ifdef ASSERT
 231   new_entry->mangle();
 232 #endif
 233   LogTarget(Trace, gc, metaspace) lt;
 234   if (lt.is_enabled()) {
 235     LogStream ls(lt);
 236     VirtualSpaceNode* vsl = current_virtual_space();
 237     ResourceMark rm;
 238     vsl->print_on(&ls);
 239   }
 240 }
 241 
 242 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
 243                                       size_t min_words,
 244                                       size_t preferred_words) {
 245   size_t before = node->committed_words();
 246 
 247   bool result = node->expand_by(min_words, preferred_words);
 248 
 249   size_t after = node->committed_words();
 250 
 251   // after and before can be the same if the memory was pre-committed.
 252   assert(after >= before, "Inconsistency");
 253   inc_committed_words(after - before);
 254 
 255   return result;
 256 }
 257 
 258 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
 259   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
 260   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
 261   assert(min_words <= preferred_words, "Invalid arguments");
 262 
 263   const char* const class_or_not = (is_class() ? "class" : "non-class");
 264 
 265   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
 266     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
 267               class_or_not);
 268     return  false;
 269   }
 270 
 271   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
 272   if (allowed_expansion_words < min_words) {
 273     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
 274               class_or_not);
 275     return false;
 276   }
 277 
 278   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
 279 
 280   // Commit more memory from the the current virtual space.
 281   bool vs_expanded = expand_node_by(current_virtual_space(),
 282                                     min_words,
 283                                     max_expansion_words);
 284   if (vs_expanded) {
 285      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
 286                class_or_not);
 287      return true;
 288   }
 289   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
 290             class_or_not);
 291   retire_current_virtual_space();
 292 
 293   // Get another virtual space.
 294   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
 295   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
 296 
 297   if (create_new_virtual_space(grow_vs_words)) {
 298     if (current_virtual_space()->is_pre_committed()) {
 299       // The memory was pre-committed, so we are done here.
 300       assert(min_words <= current_virtual_space()->committed_words(),
 301           "The new VirtualSpace was pre-committed, so it"
 302           "should be large enough to fit the alloc request.");
 303       return true;
 304     }
 305 
 306     return expand_node_by(current_virtual_space(),
 307                           min_words,
 308                           max_expansion_words);
 309   }
 310 
 311   return false;
 312 }
 313 
 314 // Given a chunk, calculate the largest possible padding space which
 315 // could be required when allocating it.
 316 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
 317   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
 318   if (chunk_type != HumongousIndex) {
 319     // Normal, non-humongous chunks are allocated at chunk size
 320     // boundaries, so the largest padding space required would be that
 321     // minus the smallest chunk size.
 322     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 323     return chunk_word_size - smallest_chunk_size;
 324   } else {
 325     // Humongous chunks are allocated at smallest-chunksize
 326     // boundaries, so there is no padding required.
 327     return 0;
 328   }
 329 }
 330 
 331 
 332 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
 333 
 334   // Allocate a chunk out of the current virtual space.
 335   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
 336 
 337   if (next != NULL) {
 338     return next;
 339   }
 340 
 341   // The expand amount is currently only determined by the requested sizes
 342   // and not how much committed memory is left in the current virtual space.
 343 
 344   // We must have enough space for the requested size and any
 345   // additional reqired padding chunks.
 346   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
 347 
 348   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
 349   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
 350   if (min_word_size >= preferred_word_size) {
 351     // Can happen when humongous chunks are allocated.
 352     preferred_word_size = min_word_size;
 353   }
 354 
 355   bool expanded = expand_by(min_word_size, preferred_word_size);
 356   if (expanded) {
 357     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
 358     assert(next != NULL, "The allocation was expected to succeed after the expansion");
 359   }
 360 
 361    return next;
 362 }
 363 
 364 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
 365   st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
 366       _virtual_space_count, p2i(_current_virtual_space));
 367   VirtualSpaceListIterator iter(virtual_space_list());
 368   while (iter.repeat()) {
 369     st->cr();
 370     VirtualSpaceNode* node = iter.get_next();
 371     node->print_on(st, scale);
 372   }
 373 }
 374 
 375 void VirtualSpaceList::print_map(outputStream* st) const {
 376   VirtualSpaceNode* list = virtual_space_list();
 377   VirtualSpaceListIterator iter(list);
 378   unsigned i = 0;
 379   while (iter.repeat()) {
 380     st->print_cr("Node %u:", i);
 381     VirtualSpaceNode* node = iter.get_next();
 382     node->print_map(st, this->is_class());
 383     i ++;
 384   }
 385 }
 386 
 387 } // namespace metaspace
 388 } // namespace internals