< prev index next >

src/hotspot/share/gc/z/zPhysicalMemory.cpp

Print this page




 267 }
 268 
 269 void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
 270   const size_t nsegments = pmem.nsegments();
 271   size_t size = 0;
 272 
 273   // Map segments
 274   for (size_t i = 0; i < nsegments; i++) {
 275     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 276     _backing.map(addr + size, segment.size(), segment.start());
 277     size += segment.size();
 278   }
 279 
 280   // Setup NUMA interleaving for large pages
 281   if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) {
 282     // To get granule-level NUMA interleaving when using large pages,
 283     // we simply let the kernel interleave the memory for us at page
 284     // fault time.
 285     os::numa_make_global((char*)addr, size);
 286   }
 287 
 288   // Setup transparent large pages
 289   if (ZLargePages::is_transparent()) {
 290     os::realign_memory((char*)addr, size, os::large_page_size());
 291   }
 292 }
 293 
 294 void ZPhysicalMemoryManager::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
 295   _backing.unmap(addr, pmem.size());
 296 }
 297 
 298 void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
 299   if (ZVerifyViews) {
 300     // Pre-touch good view
 301     pretouch_view(ZAddress::good(offset), size);
 302   } else {
 303     // Pre-touch all views
 304     pretouch_view(ZAddress::marked0(offset), size);
 305     pretouch_view(ZAddress::marked1(offset), size);
 306     pretouch_view(ZAddress::remapped(offset), size);
 307   }
 308 }
 309 
 310 void ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 311   if (ZVerifyViews) {




 267 }
 268 
 269 void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
 270   const size_t nsegments = pmem.nsegments();
 271   size_t size = 0;
 272 
 273   // Map segments
 274   for (size_t i = 0; i < nsegments; i++) {
 275     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 276     _backing.map(addr + size, segment.size(), segment.start());
 277     size += segment.size();
 278   }
 279 
 280   // Setup NUMA interleaving for large pages
 281   if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) {
 282     // To get granule-level NUMA interleaving when using large pages,
 283     // we simply let the kernel interleave the memory for us at page
 284     // fault time.
 285     os::numa_make_global((char*)addr, size);
 286   }





 287 }
 288 
 289 void ZPhysicalMemoryManager::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
 290   _backing.unmap(addr, pmem.size());
 291 }
 292 
 293 void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
 294   if (ZVerifyViews) {
 295     // Pre-touch good view
 296     pretouch_view(ZAddress::good(offset), size);
 297   } else {
 298     // Pre-touch all views
 299     pretouch_view(ZAddress::marked0(offset), size);
 300     pretouch_view(ZAddress::marked1(offset), size);
 301     pretouch_view(ZAddress::remapped(offset), size);
 302   }
 303 }
 304 
 305 void ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 306   if (ZVerifyViews) {


< prev index next >