101 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
102 GrowableArray<MemoryPool*> memory_pools(1);
103 memory_pools.append(_pool);
104 return memory_pools;
105 }
106
107 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
108 // Return max allocatable TLAB size, and let allocation path figure out
109 // the actual TLAB allocation size.
110 return _max_tlab_size;
111 }
112
113 EpsilonHeap* EpsilonHeap::heap() {
114 CollectedHeap* heap = Universe::heap();
115 assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
116 assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap");
117 return (EpsilonHeap*)heap;
118 }
119
120 HeapWord* EpsilonHeap::allocate_work(size_t size) {
121 HeapWord* res = _space->par_allocate(size);
122
123 while (res == NULL) {
124 // Allocation failed, attempt expansion, and retry:
125 MutexLockerEx ml(Heap_lock);
126
127 size_t space_left = max_capacity() - capacity();
128 size_t want_space = MAX2(size, EpsilonMinHeapExpand);
129
130 if (want_space < space_left) {
131 // Enough space to expand in bulk:
132 bool expand = _virtual_space.expand_by(want_space);
133 assert(expand, "Should be able to expand");
134 } else if (size < space_left) {
135 // No space to expand in bulk, and this allocation is still possible,
136 // take all the remaining space:
137 bool expand = _virtual_space.expand_by(space_left);
138 assert(expand, "Should be able to expand");
139 } else {
140 // No space left:
151 {
152 size_t last = _last_counter_update;
153 if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) {
154 _monitoring_support->update_counters();
155 }
156 }
157
158 // ...and print the occupancy line, if needed
159 {
160 size_t last = _last_heap_print;
161 if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) {
162 log_info(gc)("Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M (%.2f%%) committed, " SIZE_FORMAT "M (%.2f%%) used",
163 max_capacity() / M,
164 capacity() / M,
165 capacity() * 100.0 / max_capacity(),
166 used / M,
167 used * 100.0 / max_capacity());
168 }
169 }
170
171 return res;
172 }
173
174 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
175 size_t requested_size,
176 size_t* actual_size) {
177 Thread* thread = Thread::current();
178
179 // Defaults in case elastic paths are not taken
180 bool fits = true;
181 size_t size = requested_size;
182 size_t ergo_tlab = requested_size;
183 int64_t time = 0;
184
185 if (EpsilonElasticTLAB) {
186 ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
187
188 if (EpsilonElasticTLABDecay) {
189 int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
190 time = (int64_t) os::javaTimeNanos();
193
194 // If the thread had not allocated recently, retract the ergonomic size.
195 // This conserves memory when the thread had initial burst of allocations,
196 // and then started allocating only sporadically.
197 if (last_time != 0 && (time - last_time > _decay_time_ns)) {
198 ergo_tlab = 0;
199 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
200 }
201 }
202
203 // If we can fit the allocation under current TLAB size, do so.
204 // Otherwise, we want to elastically increase the TLAB size.
205 fits = (requested_size <= ergo_tlab);
206 if (!fits) {
207 size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
208 }
209 }
210
211 // Always honor boundaries
212 size = MAX2(min_size, MIN2(_max_tlab_size, size));
213
214 if (log_is_enabled(Trace, gc)) {
215 ResourceMark rm;
216 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
217 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
218 thread->name(),
219 requested_size * HeapWordSize / K,
220 min_size * HeapWordSize / K,
221 _max_tlab_size * HeapWordSize / K,
222 ergo_tlab * HeapWordSize / K,
223 size * HeapWordSize / K);
224 }
225
226 // All prepared, let's do it!
227 HeapWord* res = allocate_work(size);
228
229 if (res != NULL) {
230 // Allocation successful
231 *actual_size = size;
232 if (EpsilonElasticTLABDecay) {
|
101 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
102 GrowableArray<MemoryPool*> memory_pools(1);
103 memory_pools.append(_pool);
104 return memory_pools;
105 }
106
107 size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
108 // Return max allocatable TLAB size, and let allocation path figure out
109 // the actual TLAB allocation size.
110 return _max_tlab_size;
111 }
112
113 EpsilonHeap* EpsilonHeap::heap() {
114 CollectedHeap* heap = Universe::heap();
115 assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()");
116 assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap");
117 return (EpsilonHeap*)heap;
118 }
119
120 HeapWord* EpsilonHeap::allocate_work(size_t size) {
121 assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size);
122
123 HeapWord* res = _space->par_allocate(size);
124
125 while (res == NULL) {
126 // Allocation failed, attempt expansion, and retry:
127 MutexLockerEx ml(Heap_lock);
128
129 size_t space_left = max_capacity() - capacity();
130 size_t want_space = MAX2(size, EpsilonMinHeapExpand);
131
132 if (want_space < space_left) {
133 // Enough space to expand in bulk:
134 bool expand = _virtual_space.expand_by(want_space);
135 assert(expand, "Should be able to expand");
136 } else if (size < space_left) {
137 // No space to expand in bulk, and this allocation is still possible,
138 // take all the remaining space:
139 bool expand = _virtual_space.expand_by(space_left);
140 assert(expand, "Should be able to expand");
141 } else {
142 // No space left:
153 {
154 size_t last = _last_counter_update;
155 if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) {
156 _monitoring_support->update_counters();
157 }
158 }
159
160 // ...and print the occupancy line, if needed
161 {
162 size_t last = _last_heap_print;
163 if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) {
164 log_info(gc)("Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M (%.2f%%) committed, " SIZE_FORMAT "M (%.2f%%) used",
165 max_capacity() / M,
166 capacity() / M,
167 capacity() * 100.0 / max_capacity(),
168 used / M,
169 used * 100.0 / max_capacity());
170 }
171 }
172
173 assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res));
174 return res;
175 }
176
177 HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size,
178 size_t requested_size,
179 size_t* actual_size) {
180 Thread* thread = Thread::current();
181
182 // Defaults in case elastic paths are not taken
183 bool fits = true;
184 size_t size = requested_size;
185 size_t ergo_tlab = requested_size;
186 int64_t time = 0;
187
188 if (EpsilonElasticTLAB) {
189 ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread);
190
191 if (EpsilonElasticTLABDecay) {
192 int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread);
193 time = (int64_t) os::javaTimeNanos();
196
197 // If the thread had not allocated recently, retract the ergonomic size.
198 // This conserves memory when the thread had initial burst of allocations,
199 // and then started allocating only sporadically.
200 if (last_time != 0 && (time - last_time > _decay_time_ns)) {
201 ergo_tlab = 0;
202 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
203 }
204 }
205
206 // If we can fit the allocation under current TLAB size, do so.
207 // Otherwise, we want to elastically increase the TLAB size.
208 fits = (requested_size <= ergo_tlab);
209 if (!fits) {
210 size = (size_t) (ergo_tlab * EpsilonTLABElasticity);
211 }
212 }
213
214 // Always honor boundaries
215 size = MAX2(min_size, MIN2(_max_tlab_size, size));
216
217 // Always honor alignment
218 size = align_up(size, MinObjAlignment);
219
220 if (log_is_enabled(Trace, gc)) {
221 ResourceMark rm;
222 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
223 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
224 thread->name(),
225 requested_size * HeapWordSize / K,
226 min_size * HeapWordSize / K,
227 _max_tlab_size * HeapWordSize / K,
228 ergo_tlab * HeapWordSize / K,
229 size * HeapWordSize / K);
230 }
231
232 // All prepared, let's do it!
233 HeapWord* res = allocate_work(size);
234
235 if (res != NULL) {
236 // Allocation successful
237 *actual_size = size;
238 if (EpsilonElasticTLABDecay) {
|