< prev index next >
src/hotspot/share/gc/z/zHeap.cpp
Print this page
@@ -43,10 +43,11 @@
#include "gc/z/zVirtualMemory.inline.hpp"
#include "gc/z/zWorkers.inline.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/arguments.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
@@ -60,11 +61,11 @@
ZHeap* ZHeap::_heap = NULL;
ZHeap::ZHeap() :
_workers(),
_object_allocator(_workers.nworkers()),
- _page_allocator(heap_min_size(), heap_max_size(), heap_max_reserve_size()),
+ _page_allocator(heap_min_size(), heap_initial_size(), heap_max_size(), heap_max_reserve_size()),
_page_table(),
_forwarding_table(),
_mark(&_workers, &_page_table),
_reference_processor(&_workers),
_weak_roots_processor(&_workers),
@@ -79,12 +80,17 @@
// Update statistics
ZStatHeap::set_at_initialize(heap_max_size(), heap_max_reserve_size());
}
size_t ZHeap::heap_min_size() const {
- const size_t aligned_min_size = align_up(InitialHeapSize, ZGranuleSize);
- return MIN2(aligned_min_size, heap_max_size());
+ const size_t aligned_min_size = align_up(Arguments::min_heap_size(), ZGranuleSize);
+ return MAX2(MIN2(aligned_min_size, heap_max_size()), heap_max_reserve_size());
+}
+
+size_t ZHeap::heap_initial_size() const {
+ const size_t aligned_initial_size = align_up(InitialHeapSize, ZGranuleSize);
+ return MAX2(MIN2(aligned_initial_size, heap_max_size()), heap_min_size());
}
size_t ZHeap::heap_max_size() const {
const size_t aligned_max_size = align_up(MaxHeapSize, ZGranuleSize);
return MIN2(aligned_max_size, ZAddressOffsetMax);
@@ -100,11 +106,11 @@
bool ZHeap::is_initialized() const {
return _page_allocator.is_initialized() && _mark.is_initialized();
}
size_t ZHeap::min_capacity() const {
- return heap_min_size();
+ return _page_allocator.min_capacity();
}
size_t ZHeap::max_capacity() const {
return _page_allocator.max_capacity();
}
@@ -248,10 +254,14 @@
// Free page
_page_allocator.free_page(page, reclaimed);
}
+uint64_t ZHeap::uncommit(uint64_t delay) {
+ return _page_allocator.uncommit(delay);
+}
+
void ZHeap::before_flip() {
if (ZVerifyViews) {
// Unmap all pages
_page_allocator.unmap_all_pages();
}
< prev index next >