1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2018, 2020 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "logging/log.hpp"
  29 
  30 #include "memory/metaspace/msChunkHeaderPool.hpp"
  31 #include "memory/metaspace/msChunklevel.hpp"
  32 #include "memory/metaspace/msCommitLimiter.hpp"
  33 #include "memory/metaspace/msCommon.hpp"
  34 #include "memory/metaspace/msCounter.hpp"
  35 #include "memory/metaspace/msFreeChunkList.hpp"
  36 #include "memory/metaspace/msInternalStats.hpp"
  37 #include "memory/metaspace/msMetachunk.hpp"
  38 #include "memory/metaspace/msRootChunkArea.hpp"
  39 #include "memory/metaspace/msRunningCounters.hpp"
  40 #include "memory/metaspace/msSettings.hpp"
  41 #include "memory/metaspace/msVirtualSpaceNode.hpp"
  42 #include "memory/metaspace.hpp"
  43 
  44 #include "runtime/globals.hpp"
  45 #include "runtime/mutexLocker.hpp"
  46 #include "runtime/os.hpp"
  47 
  48 #include "utilities/align.hpp"
  49 #include "utilities/debug.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 #include "utilities/ostream.hpp"
  52 
  53 namespace metaspace {
  54 
  55 #define LOGFMT         "VsListNode @" PTR_FORMAT " base " PTR_FORMAT " "
  56 #define LOGFMT_ARGS    p2i(this), p2i(_base)
  57 
  58 #ifdef ASSERT
  59 void check_pointer_is_aligned_to_commit_granule(const MetaWord* p) {
  60   assert(is_aligned(p, Settings::commit_granule_bytes()),
  61          "Pointer not aligned to commit granule size: " PTR_FORMAT ".",
  62          p2i(p));
  63 }
  64 void check_word_size_is_aligned_to_commit_granule(size_t word_size) {
  65   assert(is_aligned(word_size, Settings::commit_granule_words()),
  66          "Not aligned to commit granule size: " SIZE_FORMAT ".", word_size);
  67 }
  68 #endif
  69 
  70 // Given an address range, ensure it is committed.
  71 //
  72 // The range has to be aligned to granule size.
  73 //
  74 // Function will:
  75 // - check how many granules in that region are uncommitted; If all are committed, it
  76 //    returns true immediately.
  77 // - check if committing those uncommitted granules would bring us over the commit limit
  78 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
  79 // - commit the memory.
  80 // - mark the range as committed in the commit mask
  81 //
  82 // Returns true if success, false if it did hit a commit limit.
  83 bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {
  84 
  85   DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
  86   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
  87   assert_lock_strong(MetaspaceExpand_lock);
  88 
  89   // First calculate how large the committed regions in this range are
  90   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
  91   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
  92 
  93   // By how much words we would increase commit charge
  94   //  were we to commit the given address range completely.
  95   const size_t commit_increase_words = word_size - committed_words_in_range;
  96 
  97   UL2(debug, "committing range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
  98       p2i(p), p2i(p + word_size), word_size);
  99 
 100   if (commit_increase_words == 0) {
 101     UL(debug, "... already fully committed.");
 102     return true; // Already fully committed, nothing to do.
 103   }
 104 
 105   // Before committing any more memory, check limits.
 106   if (_commit_limiter->possible_expansion_words() < commit_increase_words) {
 107     UL(debug, "... cannot commit (limit).");
 108     return false;
 109   }
 110 
 111   // Commit...
 112   if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) {
 113     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace.");
 114   }
 115 
 116   if (AlwaysPreTouch) {
 117     os::pretouch_memory(p, p + word_size);
 118   }
 119 
 120   UL2(debug, "... committed " SIZE_FORMAT " additional words.", commit_increase_words);
 121 
 122   // ... tell commit limiter...
 123   _commit_limiter->increase_committed(commit_increase_words);
 124 
 125   // ... update counters in containing vslist ...
 126   _total_committed_words_counter->increment_by(commit_increase_words);
 127 
 128   // ... and update the commit mask.
 129   _commit_mask.mark_range_as_committed(p, word_size);
 130 
 131 #ifdef ASSERT
 132   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
 133   // in both class and non-class vslist (outside gtests).
 134   if (_commit_limiter == CommitLimiter::globalLimiter()) {
 135     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
 136   }
 137 #endif
 138 
 139   InternalStats::inc_num_space_committed();
 140 
 141   return true;
 142 
 143 }
 144 
 145 // Given an address range, ensure it is committed.
 146 //
 147 // The range does not have to be aligned to granule size. However, the function will always commit
 148 // whole granules.
 149 //
 150 // Function will:
 151 // - check how many granules in that region are uncommitted; If all are committed, it
 152 //    returns true immediately.
 153 // - check if committing those uncommitted granules would bring us over the commit limit
 154 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
 155 // - commit the memory.
 156 // - mark the range as committed in the commit mask
 157 //
 158 // !! Careful:
 159 //    calling ensure_range_is_committed on a range which contains both committed and uncommitted
 160 //    areas will commit the whole area, thus erase the content in the existing committed parts.
 161 //    Make sure you never call this on an address range containing live data. !!
 162 //
 163 // Returns true if success, false if it did hit a commit limit.
 164 bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) {
 165 
 166   assert_lock_strong(MetaspaceExpand_lock);
 167   assert(p != NULL && word_size > 0, "Sanity");
 168 
 169   MetaWord* p_start = align_down(p, Settings::commit_granule_bytes());
 170   MetaWord* p_end = align_up(p + word_size, Settings::commit_granule_bytes());
 171 
 172   // Todo: simple for now. Make it more intelligent late
 173   return commit_range(p_start, p_end - p_start);
 174 
 175 }
 176 
 177 // Given an address range (which has to be aligned to commit granule size):
 178 //  - uncommit it
 179 //  - mark it as uncommitted in the commit mask
 180 void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
 181 
 182   DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
 183   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
 184   assert_lock_strong(MetaspaceExpand_lock);
 185 
 186   // First calculate how large the committed regions in this range are
 187   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
 188   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
 189 
 190   UL2(debug, "uncommitting range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
 191       p2i(p), p2i(p + word_size), word_size);
 192 
 193   if (committed_words_in_range == 0) {
 194     UL(debug, "... already fully uncommitted.");
 195     return; // Already fully uncommitted, nothing to do.
 196   }
 197 
 198   // Uncommit...
 199   if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
 200     // Note: this can actually happen, since uncommit may increase the number of mappings.
 201     fatal("Failed to uncommit metaspace.");
 202   }
 203 
 204   UL2(debug, "... uncommitted " SIZE_FORMAT " words.", committed_words_in_range);
 205 
 206   // ... tell commit limiter...
 207   _commit_limiter->decrease_committed(committed_words_in_range);
 208 
 209   // ... and global counters...
 210   _total_committed_words_counter->decrement_by(committed_words_in_range);
 211 
 212    // ... and update the commit mask.
 213   _commit_mask.mark_range_as_uncommitted(p, word_size);
 214 
 215 #ifdef ASSERT
 216   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
 217   // in both class and non-class vslist (outside gtests).
 218   if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario
 219     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
 220   }
 221 #endif
 222 
 223   InternalStats::inc_num_space_uncommitted();
 224 
 225 }
 226 
 227 //// creation, destruction ////
 228 
 229 VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs, bool owns_rs, CommitLimiter* limiter,
 230                                    SizeCounter* reserve_counter, SizeCounter* commit_counter)
 231   : _next(NULL),
 232     _rs(rs),
 233     _owns_rs(owns_rs),
 234     _base((MetaWord*)rs.base()),
 235     _word_size(rs.size() / BytesPerWord),
 236     _used_words(0),
 237     _commit_mask((MetaWord*)rs.base(), rs.size() / BytesPerWord),
 238     _root_chunk_area_lut((MetaWord*)rs.base(), rs.size() / BytesPerWord),
 239     _commit_limiter(limiter),
 240     _total_reserved_words_counter(reserve_counter),
 241     _total_committed_words_counter(commit_counter)
 242 {
 243   UL2(debug, "born (word_size " SIZE_FORMAT ").", _word_size);
 244 
 245   // Update reserved counter in vslist
 246   _total_reserved_words_counter->increment_by(_word_size);
 247 
 248   assert_is_aligned(_base, chunklevel::MAX_CHUNK_BYTE_SIZE);
 249   assert_is_aligned(_word_size, chunklevel::MAX_CHUNK_WORD_SIZE);
 250 
 251 }
 252 
 253 // Create a node of a given size (it will create its own space).
 254 VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
 255                                                 CommitLimiter* limiter, SizeCounter* reserve_words_counter,
 256                                                 SizeCounter* commit_words_counter)
 257 {
 258 
 259   DEBUG_ONLY(assert_is_aligned(word_size, chunklevel::MAX_CHUNK_WORD_SIZE);)
 260 
 261   ReservedSpace rs(word_size * BytesPerWord,
 262                    Settings::virtual_space_node_reserve_alignment_words() * BytesPerWord,
 263                    false // large
 264                    );
 265 
 266   if (!rs.is_reserved()) {
 267     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
 268   }
 269 
 270   assert_is_aligned(rs.base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
 271 
 272   InternalStats::inc_num_vsnodes_births();
 273   return new VirtualSpaceNode(rs, true, limiter, reserve_words_counter, commit_words_counter);
 274 
 275 }
 276 
 277 // Create a node over an existing space
 278 VirtualSpaceNode* VirtualSpaceNode::create_node(ReservedSpace rs, CommitLimiter* limiter,
 279                                                 SizeCounter* reserve_words_counter, SizeCounter* commit_words_counter)
 280 {
 281   InternalStats::inc_num_vsnodes_births();
 282   return new VirtualSpaceNode(rs, false, limiter, reserve_words_counter, commit_words_counter);
 283 }
 284 
 285 VirtualSpaceNode::~VirtualSpaceNode() {
 286 
 287   DEBUG_ONLY(verify_locked();)
 288 
 289   UL(debug, ": dies.");
 290 
 291   if (_owns_rs) {
 292     _rs.release();
 293   }
 294 
 295   // Update counters in vslist
 296   size_t committed = committed_words();
 297   _total_committed_words_counter->decrement_by(committed);
 298   _total_reserved_words_counter->decrement_by(_word_size);
 299 
 300   // ... and tell commit limiter
 301   _commit_limiter->decrease_committed(committed);
 302 
 303   InternalStats::inc_num_vsnodes_deaths();
 304 
 305 }
 306 
 307 //// Chunk allocation, splitting, merging /////
 308 
 309 // Allocate a root chunk from this node. Will fail and return NULL if the node is full
 310 //  - if we used up the whole address space of this node's memory region.
 311 //    (in case this node backs compressed class space, this is how we hit
 312 //     CompressedClassSpaceSize).
 313 // Note that this just returns reserved memory; caller must take care of committing this
 314 //  chunk before using it.
 315 Metachunk* VirtualSpaceNode::allocate_root_chunk() {
 316 
 317   assert_lock_strong(MetaspaceExpand_lock);
 318 
 319   assert_is_aligned(free_words(), chunklevel::MAX_CHUNK_WORD_SIZE);
 320 
 321   if (free_words() >= chunklevel::MAX_CHUNK_WORD_SIZE) {
 322 
 323     MetaWord* loc = _base + _used_words;
 324     _used_words += chunklevel::MAX_CHUNK_WORD_SIZE;
 325 
 326     RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(loc);
 327 
 328     // Create a root chunk header and initialize it;
 329     Metachunk* c = rca->alloc_root_chunk_header(this);
 330 
 331     assert(c->base() == loc && c->vsnode() == this &&
 332            c->is_free(), "Sanity");
 333 
 334     DEBUG_ONLY(c->verify();)
 335 
 336     UL2(debug, "new root chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
 337 
 338     return c;
 339 
 340   }
 341 
 342   return NULL; // Node is full.
 343 
 344 }
 345 
 346 // Given a chunk c, split it recursively until you get a chunk of the given target_level.
 347 //
 348 // The resulting target chunk resides at the same address as the original chunk.
 349 // The resulting splinters are added to freelists.
 350 void VirtualSpaceNode::split(chunklevel_t target_level, Metachunk* c, FreeChunkListVector* freelists) {
 351 
 352   assert_lock_strong(MetaspaceExpand_lock);
 353 
 354   // Get the area associated with this chunk and let it handle the splitting
 355   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
 356 
 357   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
 358 
 359   rca->split(target_level, c, freelists);
 360 
 361 }
 362 
 363 // Given a chunk, attempt to merge it recursively with its neighboring chunks.
 364 //
 365 // If successful (merged at least once), returns address of
 366 // the merged chunk; NULL otherwise.
 367 //
 368 // The merged chunks are removed from the freelists.
 369 //
 370 // !!! Please note that if this method returns a non-NULL value, the
 371 // original chunk will be invalid and should not be accessed anymore! !!!
 372 Metachunk* VirtualSpaceNode::merge(Metachunk* c, FreeChunkListVector* freelists) {
 373 
 374   assert(c != NULL && c->is_free(), "Sanity");
 375   assert_lock_strong(MetaspaceExpand_lock);
 376 
 377   // Get the rca associated with this chunk and let it handle the merging
 378   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
 379 
 380   Metachunk* c2 = rca->merge(c, freelists);
 381 
 382   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
 383 
 384   return c2;
 385 
 386 }
 387 
 388 // Given a chunk c, which must be "in use" and must not be a root chunk, attempt to
 389 // enlarge it in place by claiming its trailing buddy.
 390 //
 391 // This will only work if c is the leader of the buddy pair and the trailing buddy is free.
 392 //
 393 // If successful, the follower chunk will be removed from the freelists, the leader chunk c will
 394 // double in size (level decreased by one).
 395 //
 396 // On success, true is returned, false otherwise.
 397 bool VirtualSpaceNode::attempt_enlarge_chunk(Metachunk* c, FreeChunkListVector* freelists) {
 398 
 399   assert(c != NULL && c->is_in_use() && !c->is_root_chunk(), "Sanity");
 400   assert_lock_strong(MetaspaceExpand_lock);
 401 
 402   // Get the rca associated with this chunk and let it handle the merging
 403   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
 404 
 405   bool rc = rca->attempt_enlarge_chunk(c, freelists);
 406 
 407   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
 408 
 409   if (rc) {
 410     InternalStats::inc_num_chunks_enlarged();
 411   }
 412 
 413   return rc;
 414 
 415 }
 416 
 417 // Attempts to purge the node:
 418 //
 419 // If all chunks living in this node are free, they will all be removed from
 420 //  the freelist they currently reside in. Then, the node will be deleted.
 421 //
 422 // Returns true if the node has been deleted, false if not.
 423 // !! If this returns true, do not access the node from this point on. !!
 424 bool VirtualSpaceNode::attempt_purge(FreeChunkListVector* freelists) {
 425 
 426   assert_lock_strong(MetaspaceExpand_lock);
 427 
 428   if (!_owns_rs) {
 429     // We do not allow purging of nodes if we do not own the
 430     // underlying ReservedSpace (CompressClassSpace case).
 431     return false;
 432   }
 433 
 434   // First find out if all areas are empty. Since empty chunks collapse to root chunk
 435   // size, if all chunks in this node are free root chunks we are good to go.
 436   if (!_root_chunk_area_lut.is_free()) {
 437     return false;
 438   }
 439 
 440   UL(debug, ": purging.");
 441 
 442   // Okay, we can purge. Before we can do this, we need to remove all chunks from the freelist.
 443   for (int narea = 0; narea < _root_chunk_area_lut.number_of_areas(); narea++) {
 444     RootChunkArea* ra = _root_chunk_area_lut.get_area_by_index(narea);
 445     Metachunk* c = ra->first_chunk();
 446     if (c != NULL) {
 447       UL2(trace, "removing chunk from to-be-purged node: "
 448           METACHUNK_FULL_FORMAT ".", METACHUNK_FULL_FORMAT_ARGS(c));
 449       assert(c->is_free() && c->is_root_chunk(), "Sanity");
 450       freelists->remove(c);
 451     }
 452   }
 453 
 454   // Now, delete the node, then right away return since this object is invalid.
 455   delete this;
 456 
 457   return true;
 458 
 459 }
 460 
 461 void VirtualSpaceNode::print_on(outputStream* st) const {
 462 
 463   size_t scale = K;
 464 
 465   st->print("base " PTR_FORMAT ": ", p2i(base()));
 466   st->print("reserved=");
 467   print_scaled_words(st, word_size(), scale);
 468   st->print(", committed=");
 469   print_scaled_words_and_percentage(st, committed_words(), word_size(), scale);
 470   st->print(", used=");
 471   print_scaled_words_and_percentage(st, used_words(), word_size(), scale);
 472 
 473   st->cr();
 474   _root_chunk_area_lut.print_on(st);
 475   _commit_mask.print_on(st);
 476 
 477 }
 478 
 479 // Returns size, in words, of committed space in this node alone.
 480 // Note: iterates over commit mask and hence may be a tad expensive on large nodes.
 481 size_t VirtualSpaceNode::committed_words() const {
 482   return _commit_mask.get_committed_size();
 483 }
 484 
 485 #ifdef ASSERT
 486 void VirtualSpaceNode::verify() const {
 487   MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 488   verify_locked();
 489 }
 490 
 491 volatile int test_access = 0;
 492 
 493 // Verify counters and basic structure. Slow mode: verify all chunks in depth
 494 void VirtualSpaceNode::verify_locked() const {
 495 
 496   assert_lock_strong(MetaspaceExpand_lock);
 497 
 498   assert(base() != NULL, "Invalid base");
 499   assert(base() == (MetaWord*)_rs.base() &&
 500          word_size() == _rs.size() / BytesPerWord,
 501          "Sanity");
 502   assert_is_aligned(base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
 503   assert(used_words() <= word_size(), "Sanity");
 504 
 505   // Since we only ever hand out root chunks from a vsnode, top should always be aligned
 506   // to root chunk size.
 507   assert_is_aligned(used_words(), chunklevel::MAX_CHUNK_WORD_SIZE);
 508 
 509   _commit_mask.verify();
 510 
 511   // Verify memory against commit mask.
 512   SOMETIMES(
 513     for (MetaWord* p = base(); p < base() + used_words(); p += os::vm_page_size()) {
 514       if (_commit_mask.is_committed_address(p)) {
 515         test_access += *(int*)p;
 516       }
 517     }
 518   )
 519 
 520   assert(committed_words() <= word_size(), "Sanity");
 521   assert_is_aligned(committed_words(), Settings::commit_granule_words());
 522   _root_chunk_area_lut.verify();
 523 
 524 }
 525 
 526 #endif
 527 
 528 } // namespace metaspace