/* * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "oops/oop.hpp" #include "oops/oop.inline.hpp" #include "gc_implementation/epsilon/epsilonCollectedHeap.hpp" jint EpsilonCollectedHeap::initialize() { CollectedHeap::pre_initialize(); size_t init_byte_size = _policy->initial_heap_byte_size(); size_t max_byte_size = _policy->max_heap_byte_size(); size_t align = _policy->heap_alignment(); ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align); _virtual_space.initialize(heap_rs, init_byte_size); MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary()); _reserved.set_word_size(0); _reserved.set_start((HeapWord*)heap_rs.base()); _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size())); _space = new ContiguousSpace(); _space->initialize(committed_region, true, true); EpsilonBarrierSet* bs = new EpsilonBarrierSet(); set_barrier_set(bs); _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), EpsilonMaxTLABSize / HeapWordSize); _monitoring_support = new EpsilonMonitoringSupport(this); _last_counter_update = 0; if (init_byte_size != max_byte_size) { gclog_or_tty->print_cr("Initialized with " SIZE_FORMAT "M heap, resizeable to up to " SIZE_FORMAT "M heap with " SIZE_FORMAT "M steps", init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M); } else { gclog_or_tty->print_cr("Initialized with " SIZE_FORMAT "M non-resizeable heap", init_byte_size / M); } if (UseTLAB) { gclog_or_tty->print_cr("Using TLAB allocation; min: " SIZE_FORMAT "K, max: " SIZE_FORMAT "K", ThreadLocalAllocBuffer::min_size()*HeapWordSize / K, _max_tlab_size*HeapWordSize / K); } else { gclog_or_tty->print_cr("Not using TLAB allocation"); } return JNI_OK; } size_t EpsilonCollectedHeap::unsafe_max_tlab_alloc(Thread *thr) const { // This is the only way we can control TLAB sizes without having safepoints. // Implement exponential expansion within [MinTLABSize; _max_tlab_size], based // on previously "used" TLAB size. size_t size = MIN2(_max_tlab_size * HeapWordSize, MAX2(MinTLABSize, thr->tlab().used() * HeapWordSize * 2)); // if (log_is_enabled(Trace, gc)) { // ResourceMark rm; // log_trace(gc)( // "Selecting TLAB size for \"%s\" (Desired: " SIZE_FORMAT "K, Used: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K", // Thread::current()->name(), // thr->tlab().desired_size() * HeapWordSize / K, // thr->tlab().used() * HeapWordSize / K, // size / K); // } return size; } EpsilonCollectedHeap* EpsilonCollectedHeap::heap() { CollectedHeap* heap = Universe::heap(); assert(heap != NULL, "Uninitialized access to EpsilonCollectedHeap::heap()"); assert(heap->kind() == CollectedHeap::EpsilonCollectedHeap, "Not a EpsilonCollectedHeap"); return (EpsilonCollectedHeap*)heap; } HeapWord* EpsilonCollectedHeap::allocate_work(size_t size) { HeapWord* res = _space->par_allocate(size); while (res == NULL) { // Allocation failed, attempt expansion, and retry: MutexLockerEx ml(Heap_lock); if (!_virtual_space.expand_by(MAX2(size, EpsilonMinHeapExpand))) { return NULL; } _space->set_end((HeapWord *) _virtual_space.high()); res = _space->par_allocate(size); } size_t used = _space->used(); if (used - _last_counter_update >= 1024 * 1024) { _last_counter_update = used; _monitoring_support->update_counters(); } return res; } HeapWord* EpsilonCollectedHeap::allocate_new_tlab(size_t size) { return allocate_work(size); } HeapWord* EpsilonCollectedHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) { *gc_overhead_limit_was_exceeded = false; return allocate_work(size); } void EpsilonCollectedHeap::collect(GCCause::Cause cause) { gclog_or_tty->print_cr("GC was triggered with cause \"%s\". Ignoring.", GCCause::to_string(cause)); _monitoring_support->update_counters(); } void EpsilonCollectedHeap::do_full_collection(bool clear_all_soft_refs) { gclog_or_tty->print_cr("Full GC was triggered with cause \"%s\". Ignoring.", GCCause::to_string(gc_cause())); _monitoring_support->update_counters(); } void EpsilonCollectedHeap::safe_object_iterate(ObjectClosure *cl) { _space->safe_object_iterate(cl); } void EpsilonCollectedHeap::print_on(outputStream *st) const { st->print_cr("Epsilon Heap"); // Cast away constness: ((VirtualSpace)_virtual_space).print_on(st); st->print_cr("Allocation space:"); _space->print_on(st); } void EpsilonCollectedHeap::print_tracing_info() const { size_t allocated_kb = used() / K; gclog_or_tty->print_cr("Total allocated: " SIZE_FORMAT " KB", allocated_kb); gclog_or_tty->print_cr("Average allocation rate: " SIZE_FORMAT " KB/sec", allocated_kb * NANOSECS_PER_SEC / os::elapsed_counter()); }