< prev index next >

src/hotspot/share/gc/parallel/mutableNUMASpace.cpp

Print this page
rev 47287 : Port 09.17.Thread_SMR_logging_update from JDK9 to JDK10
rev 47289 : eosterlund, stefank CR - refactor code into threadSMR.cpp and threadSMR.hpp
rev 47292 : stefank, coleenp CR - refactor most JavaThreadIterator usage to use JavaThreadIteratorWithHandle.


 271     // Remove spaces for the removed nodes.
 272     for (int i = 0; i < lgrp_spaces()->length();) {
 273       bool found = false;
 274       for (int j = 0; j < lgrp_num; j++) {
 275         if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
 276           found = true;
 277           break;
 278         }
 279       }
 280       if (!found) {
 281         delete lgrp_spaces()->at(i);
 282         lgrp_spaces()->remove_at(i);
 283       } else {
 284         i++;
 285       }
 286     }
 287 
 288     FREE_C_HEAP_ARRAY(int, lgrp_ids);
 289 
 290     if (changed) {
 291       ThreadsListHandle tlh;
 292       JavaThreadIterator jti(tlh.list());
 293       for (JavaThread *thread = jti.first(); thread != NULL; thread = jti.next()) {
 294         thread->set_lgrp_id(-1);
 295       }
 296     }
 297     return true;
 298   }
 299   return false;
 300 }
 301 
 302 // Bias region towards the first-touching lgrp. Set the right page sizes.
 303 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
 304   HeapWord *start = align_up(mr.start(), page_size());
 305   HeapWord *end = align_down(mr.end(), page_size());
 306   if (end > start) {
 307     MemRegion aligned_region(start, end);
 308     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
 309            (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
 310     assert(region().contains(aligned_region), "Sanity");
 311     // First we tell the OS which page size we want in the given range. The underlying
 312     // large page can be broken down if we require small pages.
 313     os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());




 271     // Remove spaces for the removed nodes.
 272     for (int i = 0; i < lgrp_spaces()->length();) {
 273       bool found = false;
 274       for (int j = 0; j < lgrp_num; j++) {
 275         if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
 276           found = true;
 277           break;
 278         }
 279       }
 280       if (!found) {
 281         delete lgrp_spaces()->at(i);
 282         lgrp_spaces()->remove_at(i);
 283       } else {
 284         i++;
 285       }
 286     }
 287 
 288     FREE_C_HEAP_ARRAY(int, lgrp_ids);
 289 
 290     if (changed) {
 291       for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {


 292         thread->set_lgrp_id(-1);
 293       }
 294     }
 295     return true;
 296   }
 297   return false;
 298 }
 299 
 300 // Bias region towards the first-touching lgrp. Set the right page sizes.
 301 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
 302   HeapWord *start = align_up(mr.start(), page_size());
 303   HeapWord *end = align_down(mr.end(), page_size());
 304   if (end > start) {
 305     MemRegion aligned_region(start, end);
 306     assert((intptr_t)aligned_region.start()     % page_size() == 0 &&
 307            (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
 308     assert(region().contains(aligned_region), "Sanity");
 309     // First we tell the OS which page size we want in the given range. The underlying
 310     // large page can be broken down if we require small pages.
 311     os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());


< prev index next >