1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2018, 2020 SAP SE. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "logging/log.hpp"
  29 
  30 #include "memory/metaspace/chunkLevel.hpp"
  31 #include "memory/metaspace/chunkHeaderPool.hpp"
  32 #include "memory/metaspace/commitLimiter.hpp"
  33 #include "memory/metaspace/counter.hpp"
  34 #include "memory/metaspace/freeChunkList.hpp"
  35 #include "memory/metaspace/internStat.hpp"
  36 #include "memory/metaspace/metachunk.hpp"
  37 #include "memory/metaspace/metaspaceCommon.hpp"
  38 #include "memory/metaspace/rootChunkArea.hpp"
  39 #include "memory/metaspace/runningCounters.hpp"
  40 #include "memory/metaspace/settings.hpp"
  41 #include "memory/metaspace/virtualSpaceNode.hpp"
  42 #include "memory/metaspace.hpp"
  43 
  44 #include "runtime/globals.hpp"
  45 #include "runtime/mutexLocker.hpp"
  46 #include "runtime/os.hpp"
  47 
  48 #include "utilities/align.hpp"
  49 #include "utilities/debug.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 #include "utilities/ostream.hpp"
  52 
  53 namespace metaspace {
  54 
  55 #define LOGFMT         "VsListNode @" PTR_FORMAT " base " PTR_FORMAT " "
  56 #define LOGFMT_ARGS    p2i(this), p2i(_base)
  57 
  58 #ifdef ASSERT
  59 void check_pointer_is_aligned_to_commit_granule(const MetaWord* p) {
  60   assert(is_aligned(p, Settings::commit_granule_bytes()),
  61          "Pointer not aligned to commit granule size: " PTR_FORMAT ".",
  62          p2i(p));
  63 }
  64 void check_word_size_is_aligned_to_commit_granule(size_t word_size) {
  65   assert(is_aligned(word_size, Settings::commit_granule_words()),
  66          "Not aligned to commit granule size: " SIZE_FORMAT ".", word_size);
  67 }
  68 #endif
  69 
  70 
  71 // Given an address range, ensure it is committed.
  72 //
  73 // The range has to be aligned to granule size.
  74 //
  75 // Function will:
  76 // - check how many granules in that region are uncommitted; If all are committed, it
  77 //    returns true immediately.
  78 // - check if committing those uncommitted granules would bring us over the commit limit
  79 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
  80 // - commit the memory.
  81 // - mark the range as committed in the commit mask
  82 //
  83 // Returns true if success, false if it did hit a commit limit.
  84 bool VirtualSpaceNode::commit_range(MetaWord* p, size_t word_size) {
  85 
  86   DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
  87   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
  88   assert_lock_strong(MetaspaceExpand_lock);
  89 
  90   // First calculate how large the committed regions in this range are
  91   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
  92   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
  93 
  94   // By how much words we would increase commit charge
  95   //  were we to commit the given address range completely.
  96   const size_t commit_increase_words = word_size - committed_words_in_range;
  97 
  98   UL2(debug, "committing range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
  99       p2i(p), p2i(p + word_size), word_size);
 100 
 101   if (commit_increase_words == 0) {
 102     UL(debug, "... already fully committed.");
 103     return true; // Already fully committed, nothing to do.
 104   }
 105 
 106   // Before committing any more memory, check limits.
 107   if (_commit_limiter->possible_expansion_words() < commit_increase_words) {
 108     UL(debug, "... cannot commit (limit).");
 109     return false;
 110   }
 111 
 112   // Commit...
 113   if (os::commit_memory((char*)p, word_size * BytesPerWord, false) == false) {
 114     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to commit metaspace.");
 115   }
 116 
 117   if (AlwaysPreTouch) {
 118     os::pretouch_memory(p, p + word_size);
 119   }
 120 
 121   UL2(debug, "... committed " SIZE_FORMAT " additional words.", commit_increase_words);
 122 
 123   // ... tell commit limiter...
 124   _commit_limiter->increase_committed(commit_increase_words);
 125 
 126   // ... update counters in containing vslist ...
 127   _total_committed_words_counter->increment_by(commit_increase_words);
 128 
 129   // ... and update the commit mask.
 130   _commit_mask.mark_range_as_committed(p, word_size);
 131 
 132 #ifdef ASSERT
 133   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
 134   // in both class and non-class vslist (outside gtests).
 135   if (_commit_limiter == CommitLimiter::globalLimiter()) {
 136     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
 137   }
 138 #endif
 139 
 140   InternalStats::inc_num_space_committed();
 141 
 142   return true;
 143 
 144 }
 145 
 146 // Given an address range, ensure it is committed.
 147 //
 148 // The range does not have to be aligned to granule size. However, the function will always commit
 149 // whole granules.
 150 //
 151 // Function will:
 152 // - check how many granules in that region are uncommitted; If all are committed, it
 153 //    returns true immediately.
 154 // - check if committing those uncommitted granules would bring us over the commit limit
 155 //    (GC threshold, MaxMetaspaceSize). If true, it returns false.
 156 // - commit the memory.
 157 // - mark the range as committed in the commit mask
 158 //
 159 // !! Careful:
 160 //    calling ensure_range_is_committed on a range which contains both committed and uncommitted
 161 //    areas will commit the whole area, thus erase the content in the existing committed parts.
 162 //    Make sure you never call this on an address range containing live data. !!
 163 //
 164 // Returns true if success, false if it did hit a commit limit.
 165 bool VirtualSpaceNode::ensure_range_is_committed(MetaWord* p, size_t word_size) {
 166 
 167   assert_lock_strong(MetaspaceExpand_lock);
 168   assert(p != NULL && word_size > 0, "Sanity");
 169 
 170   MetaWord* p_start = align_down(p, Settings::commit_granule_bytes());
 171   MetaWord* p_end = align_up(p + word_size, Settings::commit_granule_bytes());
 172 
 173   // Todo: simple for now. Make it more intelligent late
 174   return commit_range(p_start, p_end - p_start);
 175 
 176 }
 177 
 178 // Given an address range (which has to be aligned to commit granule size):
 179 //  - uncommit it
 180 //  - mark it as uncommitted in the commit mask
 181 void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
 182 
 183   DEBUG_ONLY(check_pointer_is_aligned_to_commit_granule(p);)
 184   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(word_size);)
 185   assert_lock_strong(MetaspaceExpand_lock);
 186 
 187   // First calculate how large the committed regions in this range are
 188   const size_t committed_words_in_range = _commit_mask.get_committed_size_in_range(p, word_size);
 189   DEBUG_ONLY(check_word_size_is_aligned_to_commit_granule(committed_words_in_range);)
 190 
 191   UL2(debug, "uncommitting range " PTR_FORMAT ".." PTR_FORMAT "(" SIZE_FORMAT " words)",
 192       p2i(p), p2i(p + word_size), word_size);
 193 
 194   if (committed_words_in_range == 0) {
 195     UL(debug, "... already fully uncommitted.");
 196     return; // Already fully uncommitted, nothing to do.
 197   }
 198 
 199   // Uncommit...
 200   if (os::uncommit_memory((char*)p, word_size * BytesPerWord) == false) {
 201     // Note: this can actually happen, since uncommit may increase the number of mappings.
 202     fatal("Failed to uncommit metaspace.");
 203   }
 204 
 205   UL2(debug, "... uncommitted " SIZE_FORMAT " words.", committed_words_in_range);
 206 
 207   // ... tell commit limiter...
 208   _commit_limiter->decrease_committed(committed_words_in_range);
 209 
 210   // ... and global counters...
 211   _total_committed_words_counter->decrement_by(committed_words_in_range);
 212 
 213    // ... and update the commit mask.
 214   _commit_mask.mark_range_as_uncommitted(p, word_size);
 215 
 216 #ifdef ASSERT
 217   // The commit boundary maintained in the CommitLimiter should be equal the sum of committed words
 218   // in both class and non-class vslist (outside gtests).
 219   if (_commit_limiter == CommitLimiter::globalLimiter()) { // We are outside a test scenario
 220     assert(_commit_limiter->committed_words() == RunningCounters::committed_words(), "counter mismatch");
 221   }
 222 #endif
 223 
 224   InternalStats::inc_num_space_uncommitted();
 225 
 226 }
 227 
 228 //// creation, destruction ////
 229 
 230 VirtualSpaceNode::VirtualSpaceNode(ReservedSpace rs, bool owns_rs, CommitLimiter* limiter,
 231                                    SizeCounter* reserve_counter, SizeCounter* commit_counter)
 232   : _next(NULL),
 233     _rs(rs),
 234     _owns_rs(owns_rs),
 235     _base((MetaWord*)rs.base()),
 236     _word_size(rs.size() / BytesPerWord),
 237     _used_words(0),
 238     _commit_mask((MetaWord*)rs.base(), rs.size() / BytesPerWord),
 239     _root_chunk_area_lut((MetaWord*)rs.base(), rs.size() / BytesPerWord),
 240     _commit_limiter(limiter),
 241     _total_reserved_words_counter(reserve_counter),
 242     _total_committed_words_counter(commit_counter)
 243 {
 244   UL2(debug, "born (word_size " SIZE_FORMAT ").", _word_size);
 245 
 246   // Update reserved counter in vslist
 247   _total_reserved_words_counter->increment_by(_word_size);
 248 
 249   assert_is_aligned(_base, chunklevel::MAX_CHUNK_BYTE_SIZE);
 250   assert_is_aligned(_word_size, chunklevel::MAX_CHUNK_WORD_SIZE);
 251 
 252 }
 253 
 254 
 255 // Create a node of a given size (it will create its own space).
 256 VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
 257                                                 CommitLimiter* limiter, SizeCounter* reserve_words_counter,
 258                                                 SizeCounter* commit_words_counter)
 259 {
 260 
 261   DEBUG_ONLY(assert_is_aligned(word_size, chunklevel::MAX_CHUNK_WORD_SIZE);)
 262 
 263   ReservedSpace rs(word_size * BytesPerWord,
 264                    Settings::virtual_space_node_reserve_alignment_words() * BytesPerWord,
 265                    false // large
 266                    );
 267 
 268   if (!rs.is_reserved()) {
 269     vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
 270   }
 271 
 272   assert_is_aligned(rs.base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
 273 
 274   InternalStats::inc_num_vsnodes_births();
 275   return new VirtualSpaceNode(rs, true, limiter, reserve_words_counter, commit_words_counter);
 276 
 277 }
 278 
 279 // Create a node over an existing space
 280 VirtualSpaceNode* VirtualSpaceNode::create_node(ReservedSpace rs, CommitLimiter* limiter,
 281                                                 SizeCounter* reserve_words_counter, SizeCounter* commit_words_counter)
 282 {
 283   InternalStats::inc_num_vsnodes_births();
 284   return new VirtualSpaceNode(rs, false, limiter, reserve_words_counter, commit_words_counter);
 285 }
 286 
 287 VirtualSpaceNode::~VirtualSpaceNode() {
 288 
 289   DEBUG_ONLY(verify_locked(true);)
 290 
 291   UL(debug, ": dies.");
 292 
 293   if (_owns_rs) {
 294     _rs.release();
 295   }
 296 
 297   // Update counters in vslist
 298   size_t committed = committed_words();
 299   _total_committed_words_counter->decrement_by(committed);
 300   _total_reserved_words_counter->decrement_by(_word_size);
 301 
 302   // ... and tell commit limiter
 303   _commit_limiter->decrease_committed(committed);
 304 
 305   InternalStats::inc_num_vsnodes_deaths();
 306 
 307 }
 308 
 309 //// Chunk allocation, splitting, merging /////
 310 
 311 // Allocate a root chunk from this node. Will fail and return NULL if the node is full
 312 //  - if we used up the whole address space of this node's memory region.
 313 //    (in case this node backs compressed class space, this is how we hit
 314 //     CompressedClassSpaceSize).
 315 // Note that this just returns reserved memory; caller must take care of committing this
 316 //  chunk before using it.
 317 Metachunk* VirtualSpaceNode::allocate_root_chunk() {
 318 
 319   assert_lock_strong(MetaspaceExpand_lock);
 320 
 321   assert_is_aligned(free_words(), chunklevel::MAX_CHUNK_WORD_SIZE);
 322 
 323   if (free_words() >= chunklevel::MAX_CHUNK_WORD_SIZE) {
 324 
 325     MetaWord* loc = _base + _used_words;
 326     _used_words += chunklevel::MAX_CHUNK_WORD_SIZE;
 327 
 328     RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(loc);
 329 
 330     // Create a root chunk header and initialize it;
 331     Metachunk* c = rca->alloc_root_chunk_header(this);
 332 
 333     assert(c->base() == loc && c->vsnode() == this &&
 334            c->is_free(), "Sanity");
 335 
 336     DEBUG_ONLY(c->verify(true);)
 337 
 338     UL2(debug, "new root chunk " METACHUNK_FORMAT ".", METACHUNK_FORMAT_ARGS(c));
 339 
 340     return c;
 341 
 342   }
 343 
 344   return NULL; // Node is full.
 345 
 346 }
 347 
 348 // Given a chunk c, split it recursively until you get a chunk of the given target_level.
 349 //
 350 // The resulting target chunk resides at the same address as the original chunk.
 351 // The resulting splinters are added to freelists.
 352 void VirtualSpaceNode::split(chunklevel_t target_level, Metachunk* c, FreeChunkListVector* freelists) {
 353 
 354   assert_lock_strong(MetaspaceExpand_lock);
 355 
 356   // Get the area associated with this chunk and let it handle the splitting
 357   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
 358 
 359   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
 360 
 361   rca->split(target_level, c, freelists);
 362 
 363 }
 364 
 365 // Given a chunk, attempt to merge it recursively with its neighboring chunks.
 366 //
 367 // If successful (merged at least once), returns address of
 368 // the merged chunk; NULL otherwise.
 369 //
 370 // The merged chunks are removed from the freelists.
 371 //
 372 // !!! Please note that if this method returns a non-NULL value, the
 373 // original chunk will be invalid and should not be accessed anymore! !!!
 374 Metachunk* VirtualSpaceNode::merge(Metachunk* c, FreeChunkListVector* freelists) {
 375 
 376   assert(c != NULL && c->is_free(), "Sanity");
 377   assert_lock_strong(MetaspaceExpand_lock);
 378 
 379   // Get the rca associated with this chunk and let it handle the merging
 380   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
 381 
 382   Metachunk* c2 = rca->merge(c, freelists);
 383 
 384   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
 385 
 386   return c2;
 387 
 388 }
 389 
 390 // Given a chunk c, which must be "in use" and must not be a root chunk, attempt to
 391 // enlarge it in place by claiming its trailing buddy.
 392 //
 393 // This will only work if c is the leader of the buddy pair and the trailing buddy is free.
 394 //
 395 // If successful, the follower chunk will be removed from the freelists, the leader chunk c will
 396 // double in size (level decreased by one).
 397 //
 398 // On success, true is returned, false otherwise.
 399 bool VirtualSpaceNode::attempt_enlarge_chunk(Metachunk* c, FreeChunkListVector* freelists) {
 400 
 401   assert(c != NULL && c->is_in_use() && !c->is_root_chunk(), "Sanity");
 402   assert_lock_strong(MetaspaceExpand_lock);
 403 
 404   // Get the rca associated with this chunk and let it handle the merging
 405   RootChunkArea* rca = _root_chunk_area_lut.get_area_by_address(c->base());
 406 
 407   bool rc = rca->attempt_enlarge_chunk(c, freelists);
 408 
 409   DEBUG_ONLY(rca->verify_area_is_ideally_merged();)
 410 
 411   if (rc) {
 412     InternalStats::inc_num_chunks_enlarged();
 413   }
 414 
 415   return rc;
 416 
 417 }
 418 
 419 // Attempts to purge the node:
 420 //
 421 // If all chunks living in this node are free, they will all be removed from
 422 //  the freelist they currently reside in. Then, the node will be deleted.
 423 //
 424 // Returns true if the node has been deleted, false if not.
 425 // !! If this returns true, do not access the node from this point on. !!
 426 bool VirtualSpaceNode::attempt_purge(FreeChunkListVector* freelists) {
 427 
 428   assert_lock_strong(MetaspaceExpand_lock);
 429 
 430   if (!_owns_rs) {
 431     // We do not allow purging of nodes if we do not own the
 432     // underlying ReservedSpace (CompressClassSpace case).
 433     return false;
 434   }
 435 
 436   // First find out if all areas are empty. Since empty chunks collapse to root chunk
 437   // size, if all chunks in this node are free root chunks we are good to go.
 438   if (!_root_chunk_area_lut.is_free()) {
 439     return false;
 440   }
 441 
 442   UL(debug, ": purging.");
 443 
 444   // Okay, we can purge. Before we can do this, we need to remove all chunks from the freelist.
 445   for (int narea = 0; narea < _root_chunk_area_lut.number_of_areas(); narea ++) {
 446     RootChunkArea* ra = _root_chunk_area_lut.get_area_by_index(narea);
 447     Metachunk* c = ra->first_chunk();
 448     if (c != NULL) {
 449       UL2(trace, "removing chunk from to-be-purged node: "
 450           METACHUNK_FULL_FORMAT ".", METACHUNK_FULL_FORMAT_ARGS(c));
 451       assert(c->is_free() && c->is_root_chunk(), "Sanity");
 452       freelists->remove(c);
 453     }
 454   }
 455 
 456   // Now, delete the node, then right away return since this object is invalid.
 457   delete this;
 458 
 459   return true;
 460 
 461 }
 462 
 463 
 464 void VirtualSpaceNode::print_on(outputStream* st) const {
 465 
 466   size_t scale = K;
 467 
 468   st->print("base " PTR_FORMAT ": ", p2i(base()));
 469   st->print("reserved=");
 470   print_scaled_words(st, word_size(), scale);
 471   st->print(", committed=");
 472   print_scaled_words_and_percentage(st, committed_words(), word_size(), scale);
 473   st->print(", used=");
 474   print_scaled_words_and_percentage(st, used_words(), word_size(), scale);
 475 
 476   st->cr();
 477   _root_chunk_area_lut.print_on(st);
 478   _commit_mask.print_on(st);
 479 
 480 }
 481 
 482 // Returns size, in words, of committed space in this node alone.
 483 // Note: iterates over commit mask and hence may be a tad expensive on large nodes.
 484 size_t VirtualSpaceNode::committed_words() const {
 485   return _commit_mask.get_committed_size();
 486 }
 487 
 488 #ifdef ASSERT
 489 void VirtualSpaceNode::verify(bool slow) const {
 490   MutexLocker fcl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
 491   verify_locked(slow);
 492 }
 493 
 494 // Verify counters and basic structure. Slow mode: verify all chunks in depth
 495 void VirtualSpaceNode::verify_locked(bool slow) const {
 496 
 497   assert_lock_strong(MetaspaceExpand_lock);
 498 
 499   assert(base() != NULL, "Invalid base");
 500   assert(base() == (MetaWord*)_rs.base() &&
 501          word_size() == _rs.size() / BytesPerWord,
 502          "Sanity");
 503   assert_is_aligned(base(), chunklevel::MAX_CHUNK_BYTE_SIZE);
 504   assert(used_words() <= word_size(), "Sanity");
 505 
 506   // Since we only ever hand out root chunks from a vsnode, top should always be aligned
 507   // to root chunk size.
 508   assert_is_aligned(used_words(), chunklevel::MAX_CHUNK_WORD_SIZE);
 509 
 510   _commit_mask.verify(slow);
 511   assert(committed_words() <= word_size(), "Sanity");
 512   assert_is_aligned(committed_words(), Settings::commit_granule_words());
 513   _root_chunk_area_lut.verify(slow);
 514 
 515 }
 516 
 517 #endif
 518 
 519 
 520 } // namespace metaspace