src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File linux-numa-bad-mmap Sdiff src/share/vm/gc_implementation/shared

src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp

Print this page




 265       }
 266     }
 267     return true;
 268   }
 269   return false;
 270 }
 271 
 272 // Bias region towards the first-touching lgrp. Set the right page sizes.
 273 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
 274   HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
 275   HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
 276   if (end > start) {
 277     MemRegion aligned_region(start, end);
 278     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
 279            (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
 280     assert(region().contains(aligned_region), "Sanity");
 281     // First we tell the OS which page size we want in the given range. The underlying
 282     // large page can be broken down if we require small pages.
 283     os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
 284     // Then we uncommit the pages in the range.
 285     os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
 286     // And make them local/first-touch biased.
 287     os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
 288   }
 289 }
 290 
 291 // Free all pages in the region.
 292 void MutableNUMASpace::free_region(MemRegion mr) {
 293   HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
 294   HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
 295   if (end > start) {
 296     MemRegion aligned_region(start, end);
 297     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
 298            (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
 299     assert(region().contains(aligned_region), "Sanity");
 300     os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
 301   }
 302 }
 303 
 304 // Update space layout. Perform adaptation.
 305 void MutableNUMASpace::update() {
 306   if (update_layout(false)) {
 307     // If the topology has changed, make all chunks zero-sized.
 308     // And clear the alloc-rate statistics.
 309     // In future we may want to handle this more gracefully in order
 310     // to avoid the reallocation of the pages as much as possible.
 311     for (int i = 0; i < lgrp_spaces()->length(); i++) {
 312       LGRPSpace *ls = lgrp_spaces()->at(i);
 313       MutableSpace *s = ls->space();
 314       s->set_end(s->bottom());
 315       s->set_top(s->bottom());
 316       ls->clear_alloc_rate();
 317     }
 318     // A NUMA space is never mangled
 319     initialize(region(),
 320                SpaceDecorator::Clear,


 937   if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
 938     set_last_page_scanned(range_start);
 939   }
 940 
 941   char *scan_start = last_page_scanned();
 942   char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
 943 
 944   os::page_info page_expected, page_found;
 945   page_expected.size = page_size;
 946   page_expected.lgrp_id = lgrp_id();
 947 
 948   char *s = scan_start;
 949   while (s < scan_end) {
 950     char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
 951     if (e == NULL) {
 952       break;
 953     }
 954     if (e != scan_end) {
 955       if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
 956           && page_expected.size != 0) {
 957         os::free_memory(s, pointer_delta(e, s, sizeof(char)));
 958       }
 959       page_expected = page_found;
 960     }
 961     s = e;
 962   }
 963 
 964   set_last_page_scanned(scan_end);
 965 }


 265       }
 266     }
 267     return true;
 268   }
 269   return false;
 270 }
 271 
 272 // Bias region towards the first-touching lgrp. Set the right page sizes.
 273 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
 274   HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
 275   HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
 276   if (end > start) {
 277     MemRegion aligned_region(start, end);
 278     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
 279            (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
 280     assert(region().contains(aligned_region), "Sanity");
 281     // First we tell the OS which page size we want in the given range. The underlying
 282     // large page can be broken down if we require small pages.
 283     os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
 284     // Then we uncommit the pages in the range.
 285     os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
 286     // And make them local/first-touch biased.
 287     os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
 288   }
 289 }
 290 
 291 // Free all pages in the region.
 292 void MutableNUMASpace::free_region(MemRegion mr) {
 293   HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
 294   HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
 295   if (end > start) {
 296     MemRegion aligned_region(start, end);
 297     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
 298            (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
 299     assert(region().contains(aligned_region), "Sanity");
 300     os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
 301   }
 302 }
 303 
 304 // Update space layout. Perform adaptation.
 305 void MutableNUMASpace::update() {
 306   if (update_layout(false)) {
 307     // If the topology has changed, make all chunks zero-sized.
 308     // And clear the alloc-rate statistics.
 309     // In future we may want to handle this more gracefully in order
 310     // to avoid the reallocation of the pages as much as possible.
 311     for (int i = 0; i < lgrp_spaces()->length(); i++) {
 312       LGRPSpace *ls = lgrp_spaces()->at(i);
 313       MutableSpace *s = ls->space();
 314       s->set_end(s->bottom());
 315       s->set_top(s->bottom());
 316       ls->clear_alloc_rate();
 317     }
 318     // A NUMA space is never mangled
 319     initialize(region(),
 320                SpaceDecorator::Clear,


 937   if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
 938     set_last_page_scanned(range_start);
 939   }
 940 
 941   char *scan_start = last_page_scanned();
 942   char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
 943 
 944   os::page_info page_expected, page_found;
 945   page_expected.size = page_size;
 946   page_expected.lgrp_id = lgrp_id();
 947 
 948   char *s = scan_start;
 949   while (s < scan_end) {
 950     char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
 951     if (e == NULL) {
 952       break;
 953     }
 954     if (e != scan_end) {
 955       if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
 956           && page_expected.size != 0) {
 957         os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size);
 958       }
 959       page_expected = page_found;
 960     }
 961     s = e;
 962   }
 963 
 964   set_last_page_scanned(scan_end);
 965 }
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File