src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File
*** old/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Tue Dec 27 14:58:51 2011
--- new/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Tue Dec 27 14:58:51 2011

*** 280,290 **** --- 280,290 ---- assert(region().contains(aligned_region), "Sanity"); // First we tell the OS which page size we want in the given range. The underlying // large page can be broken down if we require small pages. os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); // Then we uncommit the pages in the range. ! os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); // And make them local/first-touch biased. os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id); } }
*** 295,305 **** --- 295,305 ---- if (end > start) { MemRegion aligned_region(start, end); assert((intptr_t)aligned_region.start() % page_size() == 0 && (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); assert(region().contains(aligned_region), "Sanity"); ! os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); } } // Update space layout. Perform adaptation. void MutableNUMASpace::update() {
*** 952,962 **** --- 952,962 ---- break; } if (e != scan_end) { if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id()) && page_expected.size != 0) { ! os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size); } page_expected = page_found; } s = e; }

src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File