1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 //#include "logging/log.hpp"
  27 #include "memory/metaspace/chunkTree.hpp"
  28 #include "memory/metaspace/metachunk.hpp"
  29 #include "utilities/debug.hpp"
  30 #include "utilities/globalDefinitions.hpp"
  31 
  32 namespace metaspace {
  33 
  34 // Our binary tree has 13 levels (number of chunk levels).
  35 // Since the leafs of this tree are Metachunk structures,
  36 // we need nodes (btnode_t) for the non-leaf nodes and
  37 // Metachunk structures for the leaf nodes. Thus, a fully
  38 // expanded binary tree (representing a 4MB section fragmented
  39 // into 4096 1K chunks) would need 4095 nodes and 4096 metachunk structures.
  40 static const u2 nodepool_max_capacity = 4095;
  41 static const u2 nodepool_capacity_increase = 300;
  42 
  43 static const u2 chunkpool_max_capacity = 4096;
  44 static const u2 chunkpool_capacity_increase = 300;
  45 
  46 ChunkTree::ChunkTree()
  47   : _root((ref_t)0),
  48     _nodePool("node pool", nodepool_max_capacity, nodepool_capacity_increase),
  49     _chunkPool("chunk pool", chunkpool_max_capacity, chunkpool_capacity_increase)
  50 {}
  51 
  52 #ifdef ASSERT
  53 void ChunkTree::check_is_valid_node_ref(ref_t ref) const {
  54   assert(reference_is_node(ref), "invalid node ref");
  55   const u2 raw_idx = get_raw_index_from_reference(ref);
  56   assert(_nodePool.is_valid_index(raw_idx), "invalid node ref");
  57 }
  58 
  59 void ChunkTree::check_is_valid_chunk_ref(ref_t ref) const {
  60   assert(reference_is_chunk(ref), "invalid chunk ref");
  61   const u2 raw_idx = get_raw_index_from_reference(ref);
  62   assert(_chunkPool.is_valid_index(raw_idx), "invalid chunk ref");
  63 }
  64 
  65 void ChunkTree::verify_node(const btnode_t* n) const {
  66 
  67   const ref_t node_ref = encode_reference_for_node(n);
  68 
  69   // Check parent.
  70   const ref_t parent_ref = n->parent;
  71 
  72   if (parent_ref == 0) {
  73     assert(node_ref == _root, "This should be the root node");
  74   } else {
  75     btnode_t* parent_node = resolve_reference_to_node(parent_ref);
  76     assert(parent_node->child[0] == node_ref ||
  77            parent_node->child[1] == node_ref, "Incorrect parent backlink.");
  78   }
  79 
  80   // Check children.
  81   for (int i = 0; i < 2; i ++) {
  82     const ref_t child_ref = n->child[i];
  83     assert(child_ref != 0, "child ref null");
  84 
  85     // May be either a chunk or another node.
  86     if (reference_is_chunk(child_ref)) {
  87       Metachunk* c = resolve_reference_to_chunk(child_ref);
  88       assert(c->tree_node_ref() == node_ref, "Incorrect parent backlink.");
  89     } else {
  90       // This chunk is another node
  91       btnode_t* n = resolve_reference_to_node(child_ref);
  92       assert(n->parent == node_ref, "Incorrect parent backlink.");
  93     }
  94   }
  95 
  96 }
  97 #endif
  98 
  99 
 100 // Initialize: allocate a root node and a root chunk header; return the
 101 // root chunk header. It will be partly initialized.
 102 // Note: this just allocates a memory-less header; memory itself is allocated inside VirtualSpaceNode.
 103 Metachunk* ChunkTree::alloc_root_chunk_header() {
 104 
 105   assert(_root == 0, "already have a root");
 106 
 107   Metachunk* c = _chunkPool.allocate_element();
 108   c->wipe();
 109 
 110   c->set_level(chklvl::ROOT_CHUNK_LEVEL);
 111 
 112   _root = encode_reference_for_chunk(c);
 113 
 114   return c;
 115 
 116 }
 117 
 118 // Given a chunk c, split it once.
 119 //
 120 // The original chunk must not be part of a freelist.
 121 //
 122 // Returns pointer to the result chunk; updates the splinters array to return the splintered off chunk.
 123 //
 124 // Returns NULL if chunk cannot be split any further.
 125 Metachunk* ChunkTree::split_once(Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]) {
 126 
 127   assert(c->is_free(), "Can only split free chunks.");
 128 
 129   DEBUG_ONLY(verify(true, NULL);)
 130 
 131   if (c->level() == chklvl::HIGHEST_CHUNK_LEVEL) {
 132     return NULL;
 133   }
 134 
 135   const ref_t orig_chunk_ref = encode_reference_for_chunk(c);
 136 
 137   // Split chunk into two chunks.
 138   Metachunk* leader = c;
 139   Metachunk* follower = allocate_new_chunk();
 140   follower->wipe();
 141 
 142   const chklvl_t old_level = c->level();
 143   const chklvl_t new_level = old_level + 1;
 144 
 145   const size_t new_word_size = chklvl::word_size_for_level(new_level);
 146   assert(new_word_size == c->word_size() / 2, "Sanity");
 147 
 148   const size_t old_committed_words = c->committed_words();
 149   assert(old_committed_words <= c->word_size(), "Sanity");
 150 
 151   const ref_t old_parent_ref = c->tree_node_ref();
 152 
 153   leader->set_level(new_level);
 154   follower->set_level(new_level);
 155 
 156   follower->set_base(c->base() + new_word_size);
 157 
 158   // Carry over commit boundary to new chunk; this is an optimization
 159   // to avoid too frequent commit requests.
 160   if (old_committed_words > new_word_size) {
 161     // At least half of the old chunk were committed: new leader chunk is committed
 162     // fully, follower chunk as far as old chunk was.
 163     leader->set_committed_words(new_word_size);
 164     follower->set_committed_words(old_committed_words - new_word_size);
 165   } else {
 166     // Less or equal than half of the old chunk were committed: new
 167     // leader chunk is committed as far as old chunk was, new follower chunk
 168     // is not committed at all (that we know of).
 169     leader->set_committed_words(old_committed_words);
 170     follower->set_committed_words(0);
 171   }
 172 
 173   // Create a new parent node for the two new sibling chunks...
 174   btnode_t* const new_parent = allocate_new_node();
 175   const ref_t new_parent_ref = encode_reference_for_node(new_parent);
 176 
 177   // ... wire new siblings up to its new parent ...
 178   new_parent->child[0] = encode_reference_for_chunk(leader);
 179   leader->set_tree_node_ref(new_parent_ref);
 180   new_parent->child[1] = encode_reference_for_chunk(follower);
 181   follower->set_tree_node_ref(new_parent_ref);
 182 
 183   // ... and hang the new parent node into the tree where the old chunk
 184   //     was hanging before ...
 185 
 186   if (_root == orig_chunk_ref) {
 187     // ... which was a root chunk, so wire up root.
 188     assert(old_level == chklvl::ROOT_CHUNK_LEVEL, "Original chunk should have been root chunk.");
 189     assert(old_parent_ref == 0, "Original chunk was root and should not have had a parent.");
 190     _root = new_parent_ref;
 191     new_parent->parent = 0;
 192   } else {
 193     // ... in the old parent (now the grandparent) node, replace the child
 194     // reference to the original chunk with the reference to the new parent
 195     // node.
 196     btnode_t* const old_parent = resolve_reference_to_node(old_parent_ref);
 197     if (old_parent->child[0] == orig_chunk_ref) {
 198       old_parent->child[0] = new_parent_ref;
 199     } else if (old_parent->child[1] == orig_chunk_ref) {
 200       old_parent->child[1] = new_parent_ref;
 201     } else {
 202       ShouldNotReachHere();
 203     }
 204     new_parent->parent = old_parent_ref;
 205   }
 206 
 207   // Finally, remember the splintered off chunk in the splinters array. Since each
 208   // split operation produces exactly one splintered off chunk, and each split
 209   // operation is on a new chunk level, we only ever should have one splintered
 210   // chunk per splinter level.
 211   assert(splinters[new_level] == NULL, "Sanity");
 212   splinters[new_level] = follower;
 213 
 214   DEBUG_ONLY(verify(true, NULL);)
 215 
 216   return c;
 217 
 218 }
 219 
 220 // Given a chunk, attempt to merge it with its sibling if it is free.
 221 // Returns pointer to the result chunk if successful, NULL otherwise.
 222 //
 223 // Returns number of merged chunks, by chunk level, in num_merged array. These numbers
 224 // includes the original chunk.
 225 //
 226 // !!! Please note that if this method returns a non-NULL value, the
 227 // original chunk will be invalid and should not be accessed anymore! !!!
 228 Metachunk* ChunkTree::merge_once(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]) {
 229 
 230 
 231   assert(c->is_free(), "Only call for free chunks.");
 232   DEBUG_ONLY(verify(true, NULL);)
 233 
 234   const chklvl_t orig_level = c->level();
 235 
 236   // If chunk is already a root chunk, we cannot merge any further.
 237   if (orig_level == chklvl::ROOT_CHUNK_LEVEL) {
 238     return NULL;
 239   }
 240 
 241   const ref_t orig_chunk_ref = encode_reference_for_chunk(c);
 242 
 243   // Get parent node and parent ref.
 244   const ref_t parent_ref = c->tree_node_ref();
 245   btnode_t* const parent = resolve_reference_to_node(parent_ref);
 246 
 247   // Check if sibling is free and not splintered.
 248   const int sibling_position = parent->child[0] == orig_chunk_ref ? 1 : 0;
 249   const ref_t sibling_ref = parent->child[sibling_position];
 250   Metachunk* c2 = resolve_reference_to_chunk_or_null(sibling_ref);
 251   if (c2 == NULL) {
 252     // Sibling is a node, not a chunk; hence it is splintered and we cannot
 253     // merge. We are done here.
 254     assert(reference_is_node(sibling_ref), "Sanity");
 255     return NULL;
 256   }
 257   DEBUG_ONLY(c2->verify(true);)
 258   if (!c2->is_free()) {
 259     // sibling is not free. We cannot merge either.
 260     return NULL;
 261   }
 262 
 263   // Merge chunks.
 264 
 265   // At this point, both metachunks should be free and not splintered.
 266   assert(c->is_free() && c2->is_free() && c->level() == c2->level(), "Sanity");
 267 
 268   // Find out who is leader. Reuse the leader structure.
 269   Metachunk* leader = c;
 270   Metachunk* follower = c2;
 271 
 272   if (c->base() > c2->base()) {
 273     leader = c2; follower = c;
 274   }
 275 
 276   // Chunk memory should be adjacent to each other.
 277   assert(leader->base() + leader->word_size() == follower->base(), "Wrong geometry");
 278 
 279   leader->set_level(orig_level - 1);
 280 
 281   // Carry over committed words.
 282   size_t new_committed_words = leader->committed_words();
 283   if (leader->is_fully_committed()) {
 284     new_committed_words += follower->committed_words();
 285   }
 286   assert(new_committed_words <= leader->word_size(), "Sanity");
 287   leader->set_committed_words(new_committed_words);
 288 
 289   const ref_t leader_ref = encode_reference_for_chunk(leader);
 290 
 291   // Re-hang newly merged chunk in tree: it replaces the old parent for the merged siblings.
 292   const ref_t grand_parent_ref = parent->parent;
 293   if (grand_parent_ref == 0) {
 294     // Seems old parent node was root. That means we should have a root chunk now.
 295     assert(leader->level() == chklvl::ROOT_CHUNK_LEVEL, "Sanity");
 296     // Which we just hang under _root...
 297     _root = leader_ref;
 298     leader->set_tree_node_ref(0);
 299   } else {
 300     btnode_t* grand_parent = resolve_reference_to_node(grand_parent_ref);
 301     if (grand_parent->child[0] == parent_ref) {
 302       grand_parent->child[0] = leader_ref;
 303     } else if (grand_parent->child[1] == parent_ref) {
 304       grand_parent->child[1] = leader_ref;
 305     } else {
 306       ShouldNotReachHere();
 307     }
 308     leader->set_tree_node_ref(grand_parent_ref);
 309   }
 310 
 311   // Remove the follower from its free list
 312   follower->remove_from_list();
 313 
 314   // Adjust stats
 315   num_merged[orig_level] ++;
 316 
 317   // Release the superfluous chunk header, and the old parent node.
 318   release_chunk(follower);
 319   release_node(parent);
 320 
 321   DEBUG_ONLY(verify(true, NULL);)
 322 
 323   return leader;
 324 
 325 }
 326 
 327 // Given a chunk c, split it recursively until you get a chunk of the given target_level.
 328 //
 329 // The original chunk must not be part of a freelist.
 330 //
 331 // Returns pointer to the result chunk; returns split off chunks in splinters array.
 332 //
 333 // Returns NULL if chunk cannot be split at least once.
 334 Metachunk* ChunkTree::split(chklvl_t target_level, Metachunk* c, Metachunk* splinters[chklvl::NUM_CHUNK_LEVELS]) {
 335 
 336   DEBUG_ONLY(chklvl::check_valid_level(target_level));
 337   assert(target_level > c->level(), "Wrong target level");
 338 
 339   Metachunk* c_last = split_once(c, splinters);
 340   while (c_last != NULL && c_last->level() < target_level) {
 341     c_last = split_once(c, splinters);
 342   }
 343 
 344   assert(c != NULL, "Sanity");
 345 
 346   return c;
 347 
 348 }
 349 
 350 // Given a chunk, attempt to merge it recursively with its neighboring chunks.
 351 //
 352 // If successful (merged at least once), returns address of
 353 // the merged chunk; NULL otherwise.
 354 //
 355 // The merged chunks are removed from their freelist; the number of merged chunks is
 356 // returned, split by level, in num_merged array. Note that these numbers does not
 357 // include the original chunk.
 358 //
 359 // !!! Please note that if this method returns a non-NULL value, the
 360 // original chunk will be invalid and should not be accessed anymore! !!!
 361 Metachunk* ChunkTree::merge(Metachunk* c, int num_merged[chklvl::NUM_CHUNK_LEVELS]) {
 362 
 363   DEBUG_ONLY(c->verify(false);)
 364   assert(c->is_free(), "Only merge free chunks.");
 365 
 366   assert(c->level() > chklvl::ROOT_CHUNK_LEVEL, "Do not merge root chunks");
 367 
 368   // Original chunk should be outside the chunk manager freelist before attempting to merge.
 369   assert(c->prev() == NULL && c->next() == NULL, "Remove chunk from freelist before merging.");
 370 
 371   Metachunk* c_last = merge_once(c, num_merged);
 372   while (c_last != NULL && c_last->level() > chklvl::ROOT_CHUNK_LEVEL) {
 373     c_last = merge_once(c, num_merged);
 374   }
 375 
 376   return c_last;
 377 }
 378 
 379 
 380 //// tree traversal ////
 381 
 382 bool ChunkTree::iterate_chunks_helper(ref_t ref, ChunkClosure* cc) const {
 383   if (reference_is_chunk(ref)) {
 384     Metachunk* const c = resolve_reference_to_chunk(ref);
 385     return cc->do_chunk(c);
 386   } else {
 387     btnode_t* const n = resolve_reference_to_node(ref);
 388     assert(n->child[0] != 0 && n->child[1] != 0, "nodes have always children");
 389     return iterate_chunks_helper(n->child[0], cc) &&
 390            iterate_chunks_helper(n->child[1], cc);
 391   }
 392 
 393   return true;
 394 }
 395 
 396 // Iterate over all nodes in this tree. Returns true for complete traversal,
 397 // false if traversal was cancelled.
 398 bool ChunkTree::iterate_chunks(ChunkClosure* cc) const {
 399 
 400   if (_root == 0) {
 401     return true;
 402   }
 403 
 404   return iterate_chunks_helper(_root, cc);
 405 }
 406 
 407 
 408 
 409 #ifdef ASSERT
 410 
 411 // Helper for verify()
 412 void ChunkTree::verify_helper(bool slow, ref_t ref, const MetaWord* p, int* num_chunks, int* num_nodes) const {
 413 
 414   if (reference_is_node(ref)) {
 415 
 416     // A node
 417 
 418     const btnode_t* const n = resolve_reference_to_node(ref);
 419 
 420     verify_node(n);
 421 
 422     (*num_nodes) ++;
 423 
 424     // Verify childs recursively.
 425     verify_helper(slow, n->child[0], p, num_chunks, num_nodes);
 426     verify_helper(slow, n->child[1], p, num_chunks, num_nodes);
 427 
 428   } else {
 429 
 430     // A chunk.
 431 
 432     const Metachunk* const c = resolve_reference_to_chunk(ref);
 433 
 434     assert(c->is_free() == reference_is_chunk(ref), "free info mismatch");
 435 
 436     // Chunks in the tree should be ordered by base address of the range
 437     // they represent, and there should be no address holes.
 438     if (p == NULL) {
 439       p = c->base() + c->word_size();
 440     } else {
 441       assert(c->base() == p, "Chunk base address mismatch.");
 442     }
 443 
 444     c->verify(slow);
 445 
 446     (*num_chunks) ++;
 447 
 448   }
 449 
 450 }
 451 
 452 void ChunkTree::verify(bool slow, const MetaWord* base) const {
 453 
 454   int num_chunks = 0;
 455   int num_nodes = 0;
 456 
 457   verify_helper(slow, _root, base, &num_chunks, &num_nodes);
 458 
 459   // Number of chunks in the tree should be equal to the number of chunks
 460   // taken from the pool; same for nodes.
 461   assert(num_chunks == _chunkPool.used(), "chunk count mismatch");
 462   assert(num_nodes == _nodePool.used(), "node count mismatch");
 463 
 464 }
 465 
 466 #endif // ASSERT
 467 
 468 // Returns the footprint of this tree, in words.
 469 size_t ChunkTree::memory_footprint_words() const {
 470   return _nodePool.memory_footprint_words() + _chunkPool.memory_footprint_words() + (sizeof(this) / BytesPerWord);
 471 }
 472 
 473 
 474 
 475 
 476 //// ChunkTreeArray /////////////777
 477 
 478 // Create an array of ChunkTree objects, all initialized to NULL, covering
 479 // a given memory range. Memory range must be aligned to size of root chunks.
 480 ChunkTreeArray::ChunkTreeArray(const MetaWord* base, size_t word_size)
 481   : _base(base), _word_size(word_size),
 482     _arr(NULL), _num(0)
 483 {
 484   assert(is_aligned(_word_size, chklvl::MAX_CHUNK_WORD_SIZE), "not aligned");
 485   _num = _word_size / chklvl::MAX_CHUNK_WORD_SIZE;
 486   _arr = NEW_C_HEAP_ARRAY(ChunkTree*, _num, mtInternal);
 487   for (int i = 0; i < _num; i ++) {
 488     _arr[i] = NULL;
 489   }
 490 }
 491 
 492 ChunkTreeArray::~ChunkTreeArray() {
 493   for (int i = 0; i < _num; i ++) {
 494     if (_arr[i] != NULL) {
 495       delete _arr[i];
 496     }
 497   }
 498   FREE_C_HEAP_ARRAY(ChunkTree*, _arr);
 499 }
 500 
 501 
 502 // Iterate over all nodes in all trees. Returns true for complete traversal,
 503 // false if traversal was cancelled.
 504 bool ChunkTreeArray::iterate_chunks(ChunkClosure* cc) const {
 505   for (int i = 0; i < _num; i ++) {
 506     if (_arr[i] != NULL) {
 507       if (_arr[i]->iterate_chunks(cc) == false) {
 508         return false;
 509       }
 510     }
 511   }
 512   return true;
 513 }
 514 
 515 #ifdef ASSERT
 516 void ChunkTreeArray::verify(bool slow) const {
 517   for (int i = 0; i < _num; i ++) {
 518     if (_arr[i] != NULL) {
 519       _arr[i]->verify(slow, _base);
 520     }
 521   }
 522 }
 523 #endif
 524 
 525 // Returns the footprint of all trees in this array, in words.
 526 size_t ChunkTreeArray::memory_footprint_words() const {
 527   size_t s = 0;
 528   for (int i = 0; i < _num; i ++) {
 529     if (_arr[i] != NULL) {
 530       s += _arr[i]->memory_footprint_words();
 531     }
 532   }
 533   return s;
 534 }
 535 
 536 } // namespace metaspace
 537 
 538