563 assert(clear_space, "Reallocation will destroy data!");
564 assert(lgrp_spaces()->length() > 0, "There should be at least one space");
565
566 MemRegion old_region = region(), new_region;
567 set_bottom(mr.start());
568 set_end(mr.end());
569 // Must always clear the space
570 clear(SpaceDecorator::DontMangle);
571
572 // Compute chunk sizes
573 size_t prev_page_size = page_size();
574 set_page_size(UseLargePages ? alignment() : os::vm_page_size());
575 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
576 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
577 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
578
579 // Try small pages if the chunk size is too small
580 if (base_space_size_pages / lgrp_spaces()->length() == 0
581 && page_size() > (size_t)os::vm_page_size()) {
582 #ifdef LINUX
583 // If we are using pin region, we cannot change the page size to default size
584 // as we could free memory which is not expected for pin region in Linux.
585 if (UseLargePages && !os::can_commit_large_page_memory()) {
586 vm_exit_during_initialization("Failed initializing NUMA. Too small heap size");
587 }
588 #endif // LINUX
589 set_page_size(os::vm_page_size());
590 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
591 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
592 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
593 }
594 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
595 set_base_space_size(base_space_size_pages);
596
597 // Handle space resize
598 MemRegion top_region, bottom_region;
599 if (!old_region.equals(region())) {
600 new_region = MemRegion(rounded_bottom, rounded_end);
601 MemRegion intersection = new_region.intersection(old_region);
602 if (intersection.start() == NULL ||
603 intersection.end() == NULL ||
604 prev_page_size > page_size()) { // If the page size got smaller we have to change
605 // the page size preference for the whole space.
606 intersection = MemRegion(new_region.start(), new_region.start());
|
563 assert(clear_space, "Reallocation will destroy data!");
564 assert(lgrp_spaces()->length() > 0, "There should be at least one space");
565
566 MemRegion old_region = region(), new_region;
567 set_bottom(mr.start());
568 set_end(mr.end());
569 // Must always clear the space
570 clear(SpaceDecorator::DontMangle);
571
572 // Compute chunk sizes
573 size_t prev_page_size = page_size();
574 set_page_size(UseLargePages ? alignment() : os::vm_page_size());
575 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
576 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
577 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
578
579 // Try small pages if the chunk size is too small
580 if (base_space_size_pages / lgrp_spaces()->length() == 0
581 && page_size() > (size_t)os::vm_page_size()) {
582 #ifdef LINUX
583 // Changing the page size below can lead to freeing of memory. When using large pages
584 // and the memory has been both reserved and committed, Linux does not support
585 // freeing parts of it. So we fail initialization.
586 if (UseLargePages && !os::can_commit_large_page_memory()) {
587 vm_exit_during_initialization("Failed initializing NUMA with large pages. Too small heap size");
588 }
589 #endif // LINUX
590 set_page_size(os::vm_page_size());
591 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
592 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
593 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
594 }
595 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
596 set_base_space_size(base_space_size_pages);
597
598 // Handle space resize
599 MemRegion top_region, bottom_region;
600 if (!old_region.equals(region())) {
601 new_region = MemRegion(rounded_bottom, rounded_end);
602 MemRegion intersection = new_region.intersection(old_region);
603 if (intersection.start() == NULL ||
604 intersection.end() == NULL ||
605 prev_page_size > page_size()) { // If the page size got smaller we have to change
606 // the page size preference for the whole space.
607 intersection = MemRegion(new_region.start(), new_region.start());
|