30 #include "memory/resourceArea.hpp"
31
32 jint EpsilonHeap::initialize() {
33 size_t align = _policy->heap_alignment();
34 size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
35 size_t max_byte_size = align_up(_policy->max_heap_byte_size(), align);
36
37 // Initialize backing storage
38 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
39 _virtual_space.initialize(heap_rs, init_byte_size);
40
41 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
42 MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
43
44 initialize_reserved_region(reserved_region.start(), reserved_region.end());
45
46 _space = new ContiguousSpace();
47 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
48
49 // Precompute hot fields
50 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), EpsilonMaxTLABSize / HeapWordSize);
51 _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
52 _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
53 _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
54
55 // Enable monitoring
56 _monitoring_support = new EpsilonMonitoringSupport(this);
57 _last_counter_update = 0;
58 _last_heap_print = 0;
59
60 // Install barrier set
61 BarrierSet::set_barrier_set(new EpsilonBarrierSet());
62
63 // All done, print out the configuration
64 if (init_byte_size != max_byte_size) {
65 log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
66 init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
67 } else {
68 log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M);
69 }
70
199 // and then started allocating only sporadically.
200 if (last_time != 0 && (time - last_time > _decay_time_ns)) {
201 ergo_tlab = 0;
202 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
203 }
204 }
205
206 // If we can fit the allocation under current TLAB size, do so.
207 // Otherwise, we want to elastically increase the TLAB size.
208 fits = (requested_size <= ergo_tlab);
209 if (!fits) {
210 size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
211 }
212 }
213
214 // Always honor boundaries
215 size = MAX2(min_size, MIN2(_max_tlab_size, size));
216
217 // Always honor alignment
218 size = align_up(size, MinObjAlignment);
219
220 if (log_is_enabled(Trace, gc)) {
221 ResourceMark rm;
222 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
223 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
224 thread->name(),
225 requested_size * HeapWordSize / K,
226 min_size * HeapWordSize / K,
227 _max_tlab_size * HeapWordSize / K,
228 ergo_tlab * HeapWordSize / K,
229 size * HeapWordSize / K);
230 }
231
232 // All prepared, let's do it!
233 HeapWord* res = allocate_work(size);
234
235 if (res != NULL) {
236 // Allocation successful
237 *actual_size = size;
238 if (EpsilonElasticTLABDecay) {
|
30 #include "memory/resourceArea.hpp"
31
32 jint EpsilonHeap::initialize() {
33 size_t align = _policy->heap_alignment();
34 size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
35 size_t max_byte_size = align_up(_policy->max_heap_byte_size(), align);
36
37 // Initialize backing storage
38 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
39 _virtual_space.initialize(heap_rs, init_byte_size);
40
41 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
42 MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
43
44 initialize_reserved_region(reserved_region.start(), reserved_region.end());
45
46 _space = new ContiguousSpace();
47 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
48
49 // Precompute hot fields
50 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
51 _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
52 _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
53 _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
54
55 // Enable monitoring
56 _monitoring_support = new EpsilonMonitoringSupport(this);
57 _last_counter_update = 0;
58 _last_heap_print = 0;
59
60 // Install barrier set
61 BarrierSet::set_barrier_set(new EpsilonBarrierSet());
62
63 // All done, print out the configuration
64 if (init_byte_size != max_byte_size) {
65 log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M",
66 init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M);
67 } else {
68 log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M", init_byte_size / M);
69 }
70
199 // and then started allocating only sporadically.
200 if (last_time != 0 && (time - last_time > _decay_time_ns)) {
201 ergo_tlab = 0;
202 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
203 }
204 }
205
206 // If we can fit the allocation under current TLAB size, do so.
207 // Otherwise, we want to elastically increase the TLAB size.
208 fits = (requested_size <= ergo_tlab);
209 if (!fits) {
210 size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
211 }
212 }
213
214 // Always honor boundaries
215 size = MAX2(min_size, MIN2(_max_tlab_size, size));
216
217 // Always honor alignment
218 size = align_up(size, MinObjAlignment);
219
220 // Check that adjustments did not break local and global invariants
221 assert(is_object_aligned(size),
222 "Size honors object alignment: " SIZE_FORMAT, size);
223 assert(min_size <= size,
224 "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
225 assert(size <= _max_tlab_size,
226 "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
227 assert(size <= CollectedHeap::max_tlab_size(),
228 "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
229
230 if (log_is_enabled(Trace, gc)) {
231 ResourceMark rm;
232 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
233 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
234 thread->name(),
235 requested_size * HeapWordSize / K,
236 min_size * HeapWordSize / K,
237 _max_tlab_size * HeapWordSize / K,
238 ergo_tlab * HeapWordSize / K,
239 size * HeapWordSize / K);
240 }
241
242 // All prepared, let's do it!
243 HeapWord* res = allocate_work(size);
244
245 if (res != NULL) {
246 // Allocation successful
247 *actual_size = size;
248 if (EpsilonElasticTLABDecay) {
|