1 #ifdef USE_PRAGMA_IDENT_HDR
   2 #pragma ident "@(#)mutableNUMASpace.cpp 1.8 07/05/05 17:05:35 JVM"
   3 #endif
   4 
   5 /*
   6  * Copyright 2006-2007 Sun Microsystems, Inc.  All Rights Reserved.
   7  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   8  *
   9  * This code is free software; you can redistribute it and/or modify it
  10  * under the terms of the GNU General Public License version 2 only, as
  11  * published by the Free Software Foundation.
  12  *
  13  * This code is distributed in the hope that it will be useful, but WITHOUT
  14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  16  * version 2 for more details (a copy is included in the LICENSE file that
  17  * accompanied this code).
  18  *
  19  * You should have received a copy of the GNU General Public License version
  20  * 2 along with this work; if not, write to the Free Software Foundation,
  21  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  22  *
  23  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  24  * CA 95054 USA or visit www.sun.com if you need additional information or
  25  * have any questions.
  26  *  
  27  */
  28 
  29 # include "incls/_precompiled.incl"
  30 # include "incls/_mutableNUMASpace.cpp.incl"
  31 
  32 
  33 MutableNUMASpace::MutableNUMASpace() {
  34   _lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true);
  35   _page_size = os::vm_page_size();
  36   _adaptation_cycles = 0;
  37   _samples_count = 0;
  38   update_layout(true);
  39 }
  40 
  41 MutableNUMASpace::~MutableNUMASpace() {
  42   for (int i = 0; i < lgrp_spaces()->length(); i++) {
  43     delete lgrp_spaces()->at(i);
  44   }
  45   delete lgrp_spaces();
  46 }
  47 
  48 void MutableNUMASpace::mangle_unused_area() {
  49   for (int i = 0; i < lgrp_spaces()->length(); i++) {
  50     LGRPSpace *ls = lgrp_spaces()->at(i);
  51     MutableSpace *s = ls->space();
  52     HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
  53     if (top < s->end()) {
  54       ls->add_invalid_region(MemRegion(top, s->end()));
  55     }
  56     s->mangle_unused_area();
  57   }
  58 }
  59 
  60 // There may be unallocated holes in the middle chunks
  61 // that should be filled with dead objects to ensure parseability.
  62 void MutableNUMASpace::ensure_parsability() {
  63   for (int i = 0; i < lgrp_spaces()->length(); i++) {
  64     LGRPSpace *ls = lgrp_spaces()->at(i);
  65     MutableSpace *s = ls->space();
  66     if (!s->contains(top())) {
  67       if (s->free_in_words() > 0) {
  68         SharedHeap::fill_region_with_object(MemRegion(s->top(), s->end()));
  69         size_t area_touched_words = pointer_delta(s->end(), s->top(), sizeof(HeapWordSize));
  70 #ifndef ASSERT
  71         if (!ZapUnusedHeapArea) {
  72           area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
  73                                     area_touched_words);
  74         }
  75 #endif
  76         MemRegion invalid;
  77         HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
  78         HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
  79                                                      os::vm_page_size());
  80         if (crossing_start != crossing_end) {
  81           // If object header crossed a small page boundary we mark the area
  82           // as invalid rounding it to a page_size().
  83           HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
  84           HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
  85                                s->end());
  86           invalid = MemRegion(start, end);
  87         }
  88 
  89         ls->add_invalid_region(invalid);
  90         s->set_top(s->end());
  91       }
  92     } else {
  93 #ifdef ASSERT
  94       MemRegion invalid(s->top(), s->end());
  95       ls->add_invalid_region(invalid);
  96 #else
  97       if (ZapUnusedHeapArea) {
  98         MemRegion invalid(s->top(), s->end());
  99         ls->add_invalid_region(invalid);
 100       } else break;
 101 #endif
 102     }
 103   }
 104 }
 105 
 106 size_t MutableNUMASpace::used_in_words() const {
 107   size_t s = 0;
 108   for (int i = 0; i < lgrp_spaces()->length(); i++) {
 109     s += lgrp_spaces()->at(i)->space()->used_in_words();
 110   }
 111   return s;
 112 }
 113 
 114 size_t MutableNUMASpace::free_in_words() const {
 115   size_t s = 0;
 116   for (int i = 0; i < lgrp_spaces()->length(); i++) {
 117     s += lgrp_spaces()->at(i)->space()->free_in_words();
 118   }
 119   return s;
 120 }
 121 
 122 
 123 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
 124   guarantee(thr != NULL, "No thread");
 125   int lgrp_id = thr->lgrp_id();
 126   assert(lgrp_id != -1, "No lgrp_id set");
 127   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
 128   if (i == -1) {
 129     return 0;
 130   }
 131   return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
 132 }
 133 
 134 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
 135   guarantee(thr != NULL, "No thread");
 136   int lgrp_id = thr->lgrp_id();
 137   assert(lgrp_id != -1, "No lgrp_id set");
 138   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
 139   if (i == -1) {
 140     return 0;
 141   }
 142   return lgrp_spaces()->at(i)->space()->free_in_bytes();
 143 }
 144 
 145 // Check if the NUMA topology has changed. Add and remove spaces if needed.
 146 // The update can be forced by setting the force parameter equal to true.
 147 bool MutableNUMASpace::update_layout(bool force) {
 148   // Check if the topology had changed.
 149   bool changed = os::numa_topology_changed();
 150   if (force || changed) {
 151     // Compute lgrp intersection. Add/remove spaces.
 152     int lgrp_limit = (int)os::numa_get_groups_num();
 153     int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
 154     int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
 155     assert(lgrp_num > 0, "There should be at least one locality group");
 156     // Add new spaces for the new nodes
 157     for (int i = 0; i < lgrp_num; i++) {
 158       bool found = false;
 159       for (int j = 0; j < lgrp_spaces()->length(); j++) {
 160         if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) {
 161           found = true;
 162           break;
 163         }
 164       }
 165       if (!found) {
 166         lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i]));
 167       }
 168     }
 169 
 170     // Remove spaces for the removed nodes.
 171     for (int i = 0; i < lgrp_spaces()->length();) {
 172       bool found = false;
 173       for (int j = 0; j < lgrp_num; j++) {
 174         if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
 175           found = true;
 176           break;
 177         }
 178       }
 179       if (!found) {
 180         delete lgrp_spaces()->at(i);
 181         lgrp_spaces()->remove_at(i);
 182       } else {
 183         i++;
 184       }
 185     }
 186 
 187     FREE_C_HEAP_ARRAY(int, lgrp_ids);
 188 
 189     if (changed) {
 190       for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
 191         thread->set_lgrp_id(-1);
 192       }
 193     }
 194     return true;
 195   }
 196   return false;
 197 }
 198 
 199 // Bias region towards the first-touching lgrp. Set the right page sizes.
 200 void MutableNUMASpace::bias_region(MemRegion mr) {
 201   HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
 202   HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
 203   if (end > start) {
 204     MemRegion aligned_region(start, end);
 205     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
 206            (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
 207     assert(region().contains(aligned_region), "Sanity");
 208     os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
 209     os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
 210     os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size());
 211   }
 212 }
 213 
 214 // Free all pages in the region.
 215 void MutableNUMASpace::free_region(MemRegion mr) {
 216   HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
 217   HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
 218   if (end > start) {
 219     MemRegion aligned_region(start, end);
 220     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
 221            (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
 222     assert(region().contains(aligned_region), "Sanity");
 223     os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
 224   }
 225 }
 226 
 227 // Update space layout. Perform adaptation.
 228 void MutableNUMASpace::update() {
 229   if (update_layout(false)) {
 230     // If the topology has changed, make all chunks zero-sized.
 231     for (int i = 0; i < lgrp_spaces()->length(); i++) {
 232       MutableSpace *s = lgrp_spaces()->at(i)->space();
 233       s->set_end(s->bottom());
 234       s->set_top(s->bottom());
 235     }
 236     initialize(region(), true);
 237   } else {
 238     bool should_initialize = false;
 239     for (int i = 0; i < lgrp_spaces()->length(); i++) {
 240       if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
 241         should_initialize = true;
 242         break;
 243       }
 244     }
 245 
 246     if (should_initialize ||
 247         (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
 248       initialize(region(), true);
 249     }
 250   }
 251 
 252   if (NUMAStats) {
 253     for (int i = 0; i < lgrp_spaces()->length(); i++) {
 254       lgrp_spaces()->at(i)->accumulate_statistics(page_size());
 255     }
 256   }
 257 
 258   scan_pages(NUMAPageScanRate);
 259 }
 260 
 261 // Scan pages. Free pages that have smaller size or wrong placement.
 262 void MutableNUMASpace::scan_pages(size_t page_count)
 263 {
 264   size_t pages_per_chunk = page_count / lgrp_spaces()->length();
 265   if (pages_per_chunk > 0) {
 266     for (int i = 0; i < lgrp_spaces()->length(); i++) {
 267       LGRPSpace *ls = lgrp_spaces()->at(i);
 268       ls->scan_pages(page_size(), pages_per_chunk);
 269     }
 270   }
 271 }
 272 
 273 // Accumulate statistics about the allocation rate of each lgrp.
 274 void MutableNUMASpace::accumulate_statistics() {
 275   if (UseAdaptiveNUMAChunkSizing) {
 276     for (int i = 0; i < lgrp_spaces()->length(); i++) {
 277       lgrp_spaces()->at(i)->sample();
 278     }
 279     increment_samples_count();
 280   }
 281 
 282   if (NUMAStats) {
 283     for (int i = 0; i < lgrp_spaces()->length(); i++) {
 284       lgrp_spaces()->at(i)->accumulate_statistics(page_size());
 285     }
 286   }
 287 }
 288 
 289 // Get the current size of a chunk.
 290 // This function computes the size of the chunk based on the
 291 // difference between chunk ends. This allows it to work correctly in
 292 // case the whole space is resized and during the process of adaptive
 293 // chunk resizing.
 294 size_t MutableNUMASpace::current_chunk_size(int i) {
 295   HeapWord *cur_end, *prev_end;
 296   if (i == 0) {
 297     prev_end = bottom();
 298   } else {
 299     prev_end = lgrp_spaces()->at(i - 1)->space()->end();
 300   }
 301   if (i == lgrp_spaces()->length() - 1) {
 302     cur_end = end();
 303   } else {
 304     cur_end = lgrp_spaces()->at(i)->space()->end();
 305   }
 306   if (cur_end > prev_end) {
 307     return pointer_delta(cur_end, prev_end, sizeof(char));
 308   }
 309   return 0;
 310 }
 311 
 312 // Return the default chunk size by equally diving the space.
 313 // page_size() aligned.
 314 size_t MutableNUMASpace::default_chunk_size() {
 315   return base_space_size() / lgrp_spaces()->length() * page_size();
 316 }
 317 
 318 // Produce a new chunk size. page_size() aligned.
 319 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
 320   size_t pages_available = base_space_size();
 321   for (int j = 0; j < i; j++) {
 322     pages_available -= round_down(current_chunk_size(j), page_size()) / page_size();
 323   }
 324   pages_available -= lgrp_spaces()->length() - i - 1;
 325   assert(pages_available > 0, "No pages left");
 326   float alloc_rate = 0;
 327   for (int j = i; j < lgrp_spaces()->length(); j++) {
 328     alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average();
 329   }
 330   size_t chunk_size = 0;
 331   if (alloc_rate > 0) {
 332     LGRPSpace *ls = lgrp_spaces()->at(i);
 333     chunk_size = (size_t)(ls->alloc_rate()->average() * pages_available / alloc_rate) * page_size();
 334   }
 335   chunk_size = MAX2(chunk_size, page_size());
 336 
 337   if (limit > 0) {
 338     limit = round_down(limit, page_size());
 339     if (chunk_size > current_chunk_size(i)) {
 340       chunk_size = MIN2((off_t)chunk_size, (off_t)current_chunk_size(i) + (off_t)limit);
 341     } else {
 342       chunk_size = MAX2((off_t)chunk_size, (off_t)current_chunk_size(i) - (off_t)limit);
 343     }
 344   }
 345   assert(chunk_size <= pages_available * page_size(), "Chunk size out of range");
 346   return chunk_size;
 347 }
 348 
 349 
 350 // Return the bottom_region and the top_region. Align them to page_size() boundary.
 351 // |------------------new_region---------------------------------|
 352 // |----bottom_region--|---intersection---|------top_region------|
 353 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
 354                                     MemRegion* bottom_region, MemRegion *top_region) {
 355   // Is there bottom?
 356   if (new_region.start() < intersection.start()) { // Yes
 357     // Try to coalesce small pages into a large one.
 358     if (UseLargePages && page_size() >= os::large_page_size()) {
 359       HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), os::large_page_size());
 360       if (new_region.contains(p)
 361           && pointer_delta(p, new_region.start(), sizeof(char)) >= os::large_page_size()) {
 362         if (intersection.contains(p)) {
 363           intersection = MemRegion(p, intersection.end());
 364         } else {
 365           intersection = MemRegion(p, p);
 366         }
 367       }
 368     }
 369     *bottom_region = MemRegion(new_region.start(), intersection.start());
 370   } else {
 371     *bottom_region = MemRegion();
 372   }
 373 
 374   // Is there top?
 375   if (intersection.end() < new_region.end()) { // Yes
 376     // Try to coalesce small pages into a large one.
 377     if (UseLargePages && page_size() >= os::large_page_size()) {
 378       HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), os::large_page_size());
 379       if (new_region.contains(p)
 380           && pointer_delta(new_region.end(), p, sizeof(char)) >= os::large_page_size()) {
 381         if (intersection.contains(p)) {
 382           intersection = MemRegion(intersection.start(), p);
 383         } else {
 384           intersection = MemRegion(p, p);
 385         }
 386       }
 387     }
 388     *top_region = MemRegion(intersection.end(), new_region.end()); 
 389   } else {
 390     *top_region = MemRegion();
 391   }
 392 }
 393 
 394 // Try to merge the invalid region with the bottom or top region by decreasing
 395 // the intersection area. Return the invalid_region aligned to the page_size()
 396 // boundary if it's inside the intersection. Return non-empty invalid_region
 397 // if it lies inside the intersection (also page-aligned).
 398 // |------------------new_region---------------------------------|
 399 // |----------------|-------invalid---|--------------------------|
 400 // |----bottom_region--|---intersection---|------top_region------|
 401 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection,
 402                                      MemRegion *invalid_region) {
 403   if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) {
 404     *intersection = MemRegion(invalid_region->end(), intersection->end());
 405     *invalid_region = MemRegion();
 406   } else
 407     if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) {
 408       *intersection = MemRegion(intersection->start(), invalid_region->start());
 409       *invalid_region = MemRegion();
 410     } else
 411       if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) {
 412         *intersection = MemRegion(new_region.start(), new_region.start());
 413         *invalid_region = MemRegion();
 414       } else
 415         if (intersection->contains(invalid_region)) {
 416             // That's the only case we have to make an additional bias_region() call.
 417             HeapWord* start = invalid_region->start();
 418             HeapWord* end = invalid_region->end();
 419             if (UseLargePages && page_size() >= os::large_page_size()) {
 420               HeapWord *p = (HeapWord*)round_down((intptr_t) start, os::large_page_size());
 421               if (new_region.contains(p)) {
 422                 start = p;
 423               }
 424               p = (HeapWord*)round_to((intptr_t) end, os::large_page_size());
 425               if (new_region.contains(end)) {
 426                 end = p;
 427               }
 428             }
 429             if (intersection->start() > start) {
 430               *intersection = MemRegion(start, intersection->end());
 431             }
 432             if (intersection->end() < end) {
 433               *intersection = MemRegion(intersection->start(), end);
 434             }
 435             *invalid_region = MemRegion(start, end);
 436         }
 437 }
 438 
 439 void MutableNUMASpace::initialize(MemRegion mr, bool clear_space) {
 440   assert(clear_space, "Reallocation will destory data!");
 441   assert(lgrp_spaces()->length() > 0, "There should be at least one space");
 442 
 443   MemRegion old_region = region(), new_region;
 444   set_bottom(mr.start());
 445   set_end(mr.end());
 446   MutableSpace::set_top(bottom());
 447 
 448   // Compute chunk sizes
 449   size_t prev_page_size = page_size();
 450   set_page_size(UseLargePages ? os::large_page_size() : os::vm_page_size());
 451   HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
 452   HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
 453   size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
 454 
 455   // Try small pages if the chunk size is too small
 456   if (base_space_size_pages / lgrp_spaces()->length() == 0
 457       && page_size() > (size_t)os::vm_page_size()) {
 458     set_page_size(os::vm_page_size());
 459     rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
 460     rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
 461     base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
 462   }
 463   guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
 464   set_base_space_size(base_space_size_pages);
 465 
 466   // Handle space resize
 467   MemRegion top_region, bottom_region;
 468   if (!old_region.equals(region())) {
 469     new_region = MemRegion(rounded_bottom, rounded_end);
 470     MemRegion intersection = new_region.intersection(old_region);
 471     if (intersection.start() == NULL || 
 472         intersection.end() == NULL   || 
 473         prev_page_size > page_size()) { // If the page size got smaller we have to change 
 474                                         // the page size preference for the whole space. 
 475       intersection = MemRegion(new_region.start(), new_region.start());
 476     }
 477     select_tails(new_region, intersection, &bottom_region, &top_region);
 478     bias_region(bottom_region);
 479     bias_region(top_region);
 480   }
 481 
 482   // Check if the space layout has changed significantly?
 483   // This happens when the space has been resized so that either head or tail
 484   // chunk became less than a page.
 485   bool layout_valid = UseAdaptiveNUMAChunkSizing          &&
 486                       current_chunk_size(0) > page_size() &&
 487                       current_chunk_size(lgrp_spaces()->length() - 1) > page_size();
 488 
 489 
 490   for (int i = 0; i < lgrp_spaces()->length(); i++) {
 491     LGRPSpace *ls = lgrp_spaces()->at(i);
 492     MutableSpace *s = ls->space();
 493     old_region = s->region();
 494 
 495     size_t chunk_byte_size = 0, old_chunk_byte_size = 0;
 496     if (i < lgrp_spaces()->length() - 1) {
 497       if (!UseAdaptiveNUMAChunkSizing                                ||
 498           (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) ||
 499            samples_count() < AdaptiveSizePolicyReadyThreshold) {
 500         // No adaptation. Divide the space equally.
 501         chunk_byte_size = default_chunk_size();
 502       } else
 503         if (!layout_valid || NUMASpaceResizeRate == 0) {
 504           // Fast adaptation. If no space resize rate is set, resize
 505           // the chunks instantly.
 506           chunk_byte_size = adaptive_chunk_size(i, 0);
 507         } else {
 508           // Slow adaptation. Resize the chunks moving no more than
 509           // NUMASpaceResizeRate bytes per collection.
 510           size_t limit = NUMASpaceResizeRate /
 511                          (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2);
 512           chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size()));
 513         }
 514 
 515       assert(chunk_byte_size >= page_size(), "Chunk size too small");
 516       assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check");
 517     }
 518 
 519     if (i == 0) { // Bottom chunk
 520       if (i != lgrp_spaces()->length() - 1) {
 521         new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
 522       } else {
 523         new_region = MemRegion(bottom(), end());
 524       }
 525     } else
 526       if (i < lgrp_spaces()->length() - 1) { // Middle chunks
 527         MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
 528         new_region = MemRegion(ps->end(),
 529                                ps->end() + (chunk_byte_size >> LogHeapWordSize));
 530       } else { // Top chunk
 531         MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
 532         new_region = MemRegion(ps->end(), end());
 533       }
 534     guarantee(region().contains(new_region), "Region invariant");
 535 
 536 
 537     // The general case:
 538     // |---------------------|--invalid---|--------------------------|
 539     // |------------------new_region---------------------------------|
 540     // |----bottom_region--|---intersection---|------top_region------|
 541     //                     |----old_region----|
 542     // The intersection part has all pages in place we don't need to migrate them.
 543     // Pages for the top and bottom part should be freed and then reallocated.
 544 
 545     MemRegion intersection = old_region.intersection(new_region);
 546 
 547     if (intersection.start() == NULL || intersection.end() == NULL) {
 548       intersection = MemRegion(new_region.start(), new_region.start());
 549     }
 550 
 551     MemRegion invalid_region = ls->invalid_region().intersection(new_region);
 552     if (!invalid_region.is_empty()) {
 553       merge_regions(new_region, &intersection, &invalid_region);
 554       free_region(invalid_region);
 555     }
 556     select_tails(new_region, intersection, &bottom_region, &top_region);
 557     free_region(bottom_region);
 558     free_region(top_region);
 559 
 560     // If we clear the region, we would mangle it in debug. That would cause page
 561     // allocation in a different place. Hence setting the top directly.
 562     s->initialize(new_region, false);
 563     s->set_top(s->bottom());
 564 
 565     ls->set_invalid_region(MemRegion());
 566 
 567     set_adaptation_cycles(samples_count());
 568   }
 569 }
 570 
 571 // Set the top of the whole space. 
 572 // Mark the the holes in chunks below the top() as invalid.
 573 void MutableNUMASpace::set_top(HeapWord* value) {
 574   bool found_top = false;
 575   for (int i = 0; i < lgrp_spaces()->length(); i++) {
 576     LGRPSpace *ls = lgrp_spaces()->at(i);
 577     MutableSpace *s = ls->space();
 578     HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
 579 
 580     if (s->contains(value)) {
 581       if (top < value && top < s->end()) {
 582         ls->add_invalid_region(MemRegion(top, value));
 583       }
 584       s->set_top(value);
 585       found_top = true;
 586     } else {
 587         if (found_top) {
 588             s->set_top(s->bottom());
 589         } else {
 590             if (top < s->end()) {
 591               ls->add_invalid_region(MemRegion(top, s->end()));
 592             }
 593             s->set_top(s->end());
 594         }
 595     }
 596   }
 597   MutableSpace::set_top(value);
 598 }
 599 
 600 void MutableNUMASpace::clear() {
 601   MutableSpace::set_top(bottom());
 602   for (int i = 0; i < lgrp_spaces()->length(); i++) {
 603     lgrp_spaces()->at(i)->space()->clear();
 604   }
 605 }
 606 
 607 HeapWord* MutableNUMASpace::allocate(size_t size) {
 608   int lgrp_id = Thread::current()->lgrp_id();
 609   if (lgrp_id == -1) {
 610     lgrp_id = os::numa_get_group_id();
 611     Thread::current()->set_lgrp_id(lgrp_id);
 612   }
 613 
 614   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
 615 
 616   // It is possible that a new CPU has been hotplugged and
 617   // we haven't reshaped the space accordingly.
 618   if (i == -1) {
 619     i = os::random() % lgrp_spaces()->length();
 620   }
 621 
 622   MutableSpace *s = lgrp_spaces()->at(i)->space();
 623   HeapWord *p = s->allocate(size);
 624 
 625   if (p != NULL && s->free_in_words() < (size_t)oopDesc::header_size()) {
 626     s->set_top(s->top() - size);
 627     p = NULL;
 628   }
 629   if (p != NULL) {
 630     if (top() < s->top()) { // Keep _top updated.
 631       MutableSpace::set_top(s->top());
 632     }
 633   }
 634   // Make the page allocation happen here.
 635   if (p != NULL) {
 636     for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
 637       *(int*)i = 0;
 638     }
 639   }
 640 
 641   return p;
 642 }
 643 
 644 // This version is lock-free.
 645 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
 646   int lgrp_id = Thread::current()->lgrp_id();
 647   if (lgrp_id == -1) {
 648     lgrp_id = os::numa_get_group_id();
 649     Thread::current()->set_lgrp_id(lgrp_id);
 650   }
 651 
 652   int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
 653   // It is possible that a new CPU has been hotplugged and
 654   // we haven't reshaped the space accordingly.
 655   if (i == -1) {
 656     i = os::random() % lgrp_spaces()->length();
 657   }
 658   MutableSpace *s = lgrp_spaces()->at(i)->space();
 659   HeapWord *p = s->cas_allocate(size);
 660   if (p != NULL && s->free_in_words() < (size_t)oopDesc::header_size()) {
 661     if (s->cas_deallocate(p, size)) {
 662       // We were the last to allocate and created a fragment less than
 663       // a minimal object.
 664       p = NULL;
 665     }
 666   }
 667   if (p != NULL) {
 668     HeapWord* cur_top, *cur_chunk_top = p + size;
 669     while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
 670       if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
 671         break;
 672       }
 673     }
 674   }
 675 
 676   // Make the page allocation happen here.
 677   if (p != NULL) {
 678     for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
 679       *(int*)i = 0;
 680     }
 681   }
 682   return p;
 683 }
 684 
 685 void MutableNUMASpace::print_short_on(outputStream* st) const {
 686   MutableSpace::print_short_on(st);
 687   st->print(" (");
 688   for (int i = 0; i < lgrp_spaces()->length(); i++) {
 689     st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id());
 690     lgrp_spaces()->at(i)->space()->print_short_on(st);
 691     if (i < lgrp_spaces()->length() - 1) {
 692       st->print(", ");
 693     }
 694   }
 695   st->print(")");
 696 }
 697 
 698 void MutableNUMASpace::print_on(outputStream* st) const {
 699   MutableSpace::print_on(st);
 700   for (int i = 0; i < lgrp_spaces()->length(); i++) {
 701     LGRPSpace *ls = lgrp_spaces()->at(i);
 702     st->print("    lgrp %d", ls->lgrp_id());
 703     ls->space()->print_on(st);
 704     if (NUMAStats) {
 705       st->print("    local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n",
 706                 ls->space_stats()->_local_space / K,
 707                 ls->space_stats()->_remote_space / K,
 708                 ls->space_stats()->_unbiased_space / K,
 709                 ls->space_stats()->_uncommited_space / K,
 710                 ls->space_stats()->_large_pages,
 711                 ls->space_stats()->_small_pages);
 712     }
 713   }
 714 }
 715 
 716 void MutableNUMASpace::verify(bool allow_dirty) const {
 717  for (int i = 0; i < lgrp_spaces()->length(); i++) {
 718     lgrp_spaces()->at(i)->space()->verify(allow_dirty);
 719   }
 720 }
 721 
 722 // Scan pages and gather stats about page placement and size.
 723 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) {
 724   clear_space_stats();
 725   char *start = (char*)round_to((intptr_t) space()->bottom(), page_size);
 726   char* end = (char*)round_down((intptr_t) space()->end(), page_size);
 727   if (start < end) {
 728     for (char *p = start; p < end;) {
 729       os::page_info info;
 730       if (os::get_page_info(p, &info)) {
 731         if (info.size > 0) {
 732           if (info.size > (size_t)os::vm_page_size()) {
 733             space_stats()->_large_pages++;
 734           } else {
 735             space_stats()->_small_pages++;
 736           }
 737           if (info.lgrp_id == lgrp_id()) {
 738             space_stats()->_local_space += info.size;
 739           } else {
 740             space_stats()->_remote_space += info.size;
 741           }
 742           p += info.size;
 743         } else {
 744           p += os::vm_page_size();
 745           space_stats()->_uncommited_space += os::vm_page_size();
 746         }
 747       } else {
 748         return;
 749       }
 750     }
 751   }
 752   space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) +
 753                                    pointer_delta(space()->end(), end, sizeof(char));
 754 
 755 }
 756 
 757 // Scan page_count pages and verify if they have the right size and right placement.
 758 // If invalid pages are found they are freed in hope that subsequent reallocation
 759 // will be more successful.
 760 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count)
 761 {
 762   char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size);
 763   char* range_end = (char*)round_down((intptr_t) space()->end(), page_size);
 764 
 765   if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
 766     set_last_page_scanned(range_start);
 767   }
 768 
 769   char *scan_start = last_page_scanned();
 770   char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
 771 
 772   os::page_info page_expected, page_found;
 773   page_expected.size = page_size;
 774   page_expected.lgrp_id = lgrp_id();
 775 
 776   char *s = scan_start;
 777   while (s < scan_end) {
 778     char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
 779     if (e == NULL) {
 780       break;
 781     }
 782     if (e != scan_end) {
 783       if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
 784           && page_expected.size != 0) {
 785         os::free_memory(s, pointer_delta(e, s, sizeof(char)));
 786       }
 787       page_expected = page_found;
 788     }
 789     s = e;
 790   }
 791 
 792   set_last_page_scanned(scan_end);
 793 }