1 /*
   2  * virtualSpaceList.cpp
   3  *
   4  *  Created on: May 6, 2018
   5  *      Author: thomas
   6  */
   7 
   8 
   9 #include "precompiled.hpp"
  10 #include "logging/log.hpp"
  11 #include "logging/logStream.hpp"
  12 #include "memory/metaspace.hpp"
  13 #include "memory/metaspace/chunkManager.hpp"
  14 #include "memory/metaspace/metachunk.hpp"
  15 #include "memory/metaspace/metaspaceCommon.hpp"
  16 #include "memory/metaspace/virtualSpaceList.hpp"
  17 #include "memory/metaspace/virtualSpaceNode.hpp"
  18 #include "runtime/orderAccess.inline.hpp"
  19 #include "runtime/mutexLocker.hpp"
  20 #include "runtime/safepoint.hpp"
  21 
  22 namespace metaspace {
  23 
  24 
  25 VirtualSpaceList::~VirtualSpaceList() {
  26   VirtualSpaceListIterator iter(virtual_space_list());
  27   while (iter.repeat()) {
  28     VirtualSpaceNode* vsl = iter.get_next();
  29     delete vsl;
  30   }
  31 }
  32 
  33 void VirtualSpaceList::inc_reserved_words(size_t v) {
  34   assert_lock_strong(MetaspaceExpand_lock);
  35   _reserved_words = _reserved_words + v;
  36 }
  37 void VirtualSpaceList::dec_reserved_words(size_t v) {
  38   assert_lock_strong(MetaspaceExpand_lock);
  39   _reserved_words = _reserved_words - v;
  40 }
  41 
  42 #define assert_committed_below_limit()                        \
  43   assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \
  44          "Too much committed memory. Committed: " SIZE_FORMAT \
  45          " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
  46           MetaspaceUtils::committed_bytes(), MaxMetaspaceSize);
  47 
  48 void VirtualSpaceList::inc_committed_words(size_t v) {
  49   assert_lock_strong(MetaspaceExpand_lock);
  50   _committed_words = _committed_words + v;
  51 
  52   assert_committed_below_limit();
  53 }
  54 void VirtualSpaceList::dec_committed_words(size_t v) {
  55   assert_lock_strong(MetaspaceExpand_lock);
  56   _committed_words = _committed_words - v;
  57 
  58   assert_committed_below_limit();
  59 }
  60 
  61 void VirtualSpaceList::inc_virtual_space_count() {
  62   assert_lock_strong(MetaspaceExpand_lock);
  63   _virtual_space_count++;
  64 }
  65 
  66 void VirtualSpaceList::dec_virtual_space_count() {
  67   assert_lock_strong(MetaspaceExpand_lock);
  68   _virtual_space_count--;
  69 }
  70 
  71 // Walk the list of VirtualSpaceNodes and delete
  72 // nodes with a 0 container_count.  Remove Metachunks in
  73 // the node from their respective freelists.
  74 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
  75   assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
  76   assert_lock_strong(MetaspaceExpand_lock);
  77   // Don't use a VirtualSpaceListIterator because this
  78   // list is being changed and a straightforward use of an iterator is not safe.
  79   VirtualSpaceNode* purged_vsl = NULL;
  80   VirtualSpaceNode* prev_vsl = virtual_space_list();
  81   VirtualSpaceNode* next_vsl = prev_vsl;
  82   while (next_vsl != NULL) {
  83     VirtualSpaceNode* vsl = next_vsl;
  84     DEBUG_ONLY(vsl->verify_container_count();)
  85     next_vsl = vsl->next();
  86     // Don't free the current virtual space since it will likely
  87     // be needed soon.
  88     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
  89       log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT
  90                                          ", used: " SIZE_FORMAT ").", p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs());
  91       DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged));
  92       // Unlink it from the list
  93       if (prev_vsl == vsl) {
  94         // This is the case of the current node being the first node.
  95         assert(vsl == virtual_space_list(), "Expected to be the first node");
  96         set_virtual_space_list(vsl->next());
  97       } else {
  98         prev_vsl->set_next(vsl->next());
  99       }
 100 
 101       vsl->purge(chunk_manager);
 102       dec_reserved_words(vsl->reserved_words());
 103       dec_committed_words(vsl->committed_words());
 104       dec_virtual_space_count();
 105       purged_vsl = vsl;
 106       delete vsl;
 107     } else {
 108       prev_vsl = vsl;
 109     }
 110   }
 111 #ifdef ASSERT
 112   if (purged_vsl != NULL) {
 113     // List should be stable enough to use an iterator here.
 114     VirtualSpaceListIterator iter(virtual_space_list());
 115     while (iter.repeat()) {
 116       VirtualSpaceNode* vsl = iter.get_next();
 117       assert(vsl != purged_vsl, "Purge of vsl failed");
 118     }
 119   }
 120 #endif
 121 }
 122 
 123 
 124 // This function looks at the mmap regions in the metaspace without locking.
 125 // The chunks are added with store ordering and not deleted except for at
 126 // unloading time during a safepoint.
 127 bool VirtualSpaceList::contains(const void* ptr) {
 128   // List should be stable enough to use an iterator here because removing virtual
 129   // space nodes is only allowed at a safepoint.
 130   VirtualSpaceListIterator iter(virtual_space_list());
 131   while (iter.repeat()) {
 132     VirtualSpaceNode* vsn = iter.get_next();
 133     if (vsn->contains(ptr)) {
 134       return true;
 135     }
 136   }
 137   return false;
 138 }
 139 
 140 void VirtualSpaceList::retire_current_virtual_space() {
 141   assert_lock_strong(MetaspaceExpand_lock);
 142 
 143   VirtualSpaceNode* vsn = current_virtual_space();
 144 
 145   ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() :
 146                                   Metaspace::chunk_manager_metadata();
 147 
 148   vsn->retire(cm);
 149 }
 150 
 151 VirtualSpaceList::VirtualSpaceList(size_t word_size) :
 152                                    _is_class(false),
 153                                    _virtual_space_list(NULL),
 154                                    _current_virtual_space(NULL),
 155                                    _reserved_words(0),
 156                                    _committed_words(0),
 157                                    _virtual_space_count(0) {
 158   MutexLockerEx cl(MetaspaceExpand_lock,
 159                    Mutex::_no_safepoint_check_flag);
 160   create_new_virtual_space(word_size);
 161 }
 162 
 163 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
 164                                    _is_class(true),
 165                                    _virtual_space_list(NULL),
 166                                    _current_virtual_space(NULL),
 167                                    _reserved_words(0),
 168                                    _committed_words(0),
 169                                    _virtual_space_count(0) {
 170   MutexLockerEx cl(MetaspaceExpand_lock,
 171                    Mutex::_no_safepoint_check_flag);
 172   VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs);
 173   bool succeeded = class_entry->initialize();
 174   if (succeeded) {
 175     link_vs(class_entry);
 176   }
 177 }
 178 
 179 size_t VirtualSpaceList::free_bytes() {
 180   return current_virtual_space()->free_words_in_vs() * BytesPerWord;
 181 }
 182 
 183 // Allocate another meta virtual space and add it to the list.
 184 bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
 185   assert_lock_strong(MetaspaceExpand_lock);
 186 
 187   if (is_class()) {
 188     assert(false, "We currently don't support more than one VirtualSpace for"
 189                   " the compressed class space. The initialization of the"
 190                   " CCS uses another code path and should not hit this path.");
 191     return false;
 192   }
 193 
 194   if (vs_word_size == 0) {
 195     assert(false, "vs_word_size should always be at least _reserve_alignment large.");
 196     return false;
 197   }
 198 
 199   // Reserve the space
 200   size_t vs_byte_size = vs_word_size * BytesPerWord;
 201   assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment());
 202 
 203   // Allocate the meta virtual space and initialize it.
 204   VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size);
 205   if (!new_entry->initialize()) {
 206     delete new_entry;
 207     return false;
 208   } else {
 209     assert(new_entry->reserved_words() == vs_word_size,
 210         "Reserved memory size differs from requested memory size");
 211     // ensure lock-free iteration sees fully initialized node
 212     OrderAccess::storestore();
 213     link_vs(new_entry);
 214     DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created));
 215     return true;
 216   }
 217 }
 218 
 219 void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
 220   if (virtual_space_list() == NULL) {
 221       set_virtual_space_list(new_entry);
 222   } else {
 223     current_virtual_space()->set_next(new_entry);
 224   }
 225   set_current_virtual_space(new_entry);
 226   inc_reserved_words(new_entry->reserved_words());
 227   inc_committed_words(new_entry->committed_words());
 228   inc_virtual_space_count();
 229 #ifdef ASSERT
 230   new_entry->mangle();
 231 #endif
 232   LogTarget(Trace, gc, metaspace) lt;
 233   if (lt.is_enabled()) {
 234     LogStream ls(lt);
 235     VirtualSpaceNode* vsl = current_virtual_space();
 236     ResourceMark rm;
 237     vsl->print_on(&ls);
 238   }
 239 }
 240 
 241 bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
 242                                       size_t min_words,
 243                                       size_t preferred_words) {
 244   size_t before = node->committed_words();
 245 
 246   bool result = node->expand_by(min_words, preferred_words);
 247 
 248   size_t after = node->committed_words();
 249 
 250   // after and before can be the same if the memory was pre-committed.
 251   assert(after >= before, "Inconsistency");
 252   inc_committed_words(after - before);
 253 
 254   return result;
 255 }
 256 
 257 bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
 258   assert_is_aligned(min_words,       Metaspace::commit_alignment_words());
 259   assert_is_aligned(preferred_words, Metaspace::commit_alignment_words());
 260   assert(min_words <= preferred_words, "Invalid arguments");
 261 
 262   const char* const class_or_not = (is_class() ? "class" : "non-class");
 263 
 264   if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
 265     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list.",
 266               class_or_not);
 267     return  false;
 268   }
 269 
 270   size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
 271   if (allowed_expansion_words < min_words) {
 272     log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first).",
 273               class_or_not);
 274     return false;
 275   }
 276 
 277   size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
 278 
 279   // Commit more memory from the the current virtual space.
 280   bool vs_expanded = expand_node_by(current_virtual_space(),
 281                                     min_words,
 282                                     max_expansion_words);
 283   if (vs_expanded) {
 284      log_trace(gc, metaspace, freelist)("Expanded %s virtual space list.",
 285                class_or_not);
 286      return true;
 287   }
 288   log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node.",
 289             class_or_not);
 290   retire_current_virtual_space();
 291 
 292   // Get another virtual space.
 293   size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
 294   grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words());
 295 
 296   if (create_new_virtual_space(grow_vs_words)) {
 297     if (current_virtual_space()->is_pre_committed()) {
 298       // The memory was pre-committed, so we are done here.
 299       assert(min_words <= current_virtual_space()->committed_words(),
 300           "The new VirtualSpace was pre-committed, so it"
 301           "should be large enough to fit the alloc request.");
 302       return true;
 303     }
 304 
 305     return expand_node_by(current_virtual_space(),
 306                           min_words,
 307                           max_expansion_words);
 308   }
 309 
 310   return false;
 311 }
 312 
 313 // Given a chunk, calculate the largest possible padding space which
 314 // could be required when allocating it.
 315 static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) {
 316   const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class);
 317   if (chunk_type != HumongousIndex) {
 318     // Normal, non-humongous chunks are allocated at chunk size
 319     // boundaries, so the largest padding space required would be that
 320     // minus the smallest chunk size.
 321     const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk;
 322     return chunk_word_size - smallest_chunk_size;
 323   } else {
 324     // Humongous chunks are allocated at smallest-chunksize
 325     // boundaries, so there is no padding required.
 326     return 0;
 327   }
 328 }
 329 
 330 
 331 Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) {
 332 
 333   // Allocate a chunk out of the current virtual space.
 334   Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size);
 335 
 336   if (next != NULL) {
 337     return next;
 338   }
 339 
 340   // The expand amount is currently only determined by the requested sizes
 341   // and not how much committed memory is left in the current virtual space.
 342 
 343   // We must have enough space for the requested size and any
 344   // additional reqired padding chunks.
 345   const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class());
 346 
 347   size_t min_word_size       = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words());
 348   size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words());
 349   if (min_word_size >= preferred_word_size) {
 350     // Can happen when humongous chunks are allocated.
 351     preferred_word_size = min_word_size;
 352   }
 353 
 354   bool expanded = expand_by(min_word_size, preferred_word_size);
 355   if (expanded) {
 356     next = current_virtual_space()->get_chunk_vs(chunk_word_size);
 357     assert(next != NULL, "The allocation was expected to succeed after the expansion");
 358   }
 359 
 360    return next;
 361 }
 362 
 363 void VirtualSpaceList::print_on(outputStream* st, size_t scale) const {
 364   st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT,
 365       _virtual_space_count, p2i(_current_virtual_space));
 366   VirtualSpaceListIterator iter(virtual_space_list());
 367   while (iter.repeat()) {
 368     st->cr();
 369     VirtualSpaceNode* node = iter.get_next();
 370     node->print_on(st, scale);
 371   }
 372 }
 373 
 374 void VirtualSpaceList::print_map(outputStream* st) const {
 375   VirtualSpaceNode* list = virtual_space_list();
 376   VirtualSpaceListIterator iter(list);
 377   unsigned i = 0;
 378   while (iter.repeat()) {
 379     st->print_cr("Node %u:", i);
 380     VirtualSpaceNode* node = iter.get_next();
 381     node->print_map(st, this->is_class());
 382     i ++;
 383   }
 384 }
 385 
 386 } // namespace metaspace
 387