5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "gc/epsilon/epsilonHeap.hpp"
26 #include "gc/epsilon/epsilonMemoryPool.hpp"
27 #include "gc/epsilon/epsilonThreadLocalData.hpp"
28 #include "memory/allocation.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "memory/resourceArea.hpp"
31
32 jint EpsilonHeap::initialize() {
33 size_t align = _policy->heap_alignment();
34 size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
35 size_t max_byte_size = align_up(_policy->max_heap_byte_size(), align);
36
37 // Initialize backing storage
38 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
39 _virtual_space.initialize(heap_rs, init_byte_size);
40
41 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
42 MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
43
44 initialize_reserved_region(reserved_region.start(), reserved_region.end());
45
46 _space = new ContiguousSpace();
47 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
48
49 // Precompute hot fields
50 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
219 assert(min_size <= size,
220 "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
221 assert(size <= _max_tlab_size,
222 "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
223 assert(size <= CollectedHeap::max_tlab_size(),
224 "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
225
226 if (log_is_enabled(Trace, gc)) {
227 ResourceMark rm;
228 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
229 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
230 thread->name(),
231 requested_size * HeapWordSize / K,
232 min_size * HeapWordSize / K,
233 _max_tlab_size * HeapWordSize / K,
234 ergo_tlab * HeapWordSize / K,
235 size * HeapWordSize / K);
236 }
237
238 // All prepared, let's do it!
239 HeapWord* res = allocate_work(size);
240
241 if (res != NULL) {
242 // Allocation successful
243 *actual_size = size;
244 if (EpsilonElasticTLABDecay) {
245 EpsilonThreadLocalData::set_last_tlab_time(thread, time);
246 }
247 if (EpsilonElasticTLAB && !fits) {
248 // If we requested expansion, this is our new ergonomic TLAB size
249 EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
250 }
251 } else {
252 // Allocation failed, reset ergonomics to try and fit smaller TLABs
253 if (EpsilonElasticTLAB) {
254 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
255 }
256 }
257
258 return res;
259 }
260
261 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
262 *gc_overhead_limit_was_exceeded = false;
263 return allocate_work(size);
264 }
265
266 void EpsilonHeap::collect(GCCause::Cause cause) {
267 switch (cause) {
268 case GCCause::_metadata_GC_threshold:
269 case GCCause::_metadata_GC_clear_soft_refs:
270 // Receiving these causes means the VM itself entered the safepoint for metadata collection.
271 // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
272 // re-enter the safepoint again very soon.
273
274 assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
275 log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
276 MetaspaceGC::compute_new_size();
277 print_metaspace_info();
278 break;
279 default:
280 log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
281 }
282 _monitoring_support->update_counters();
283 }
284
285 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
286 collect(gc_cause());
287 }
288
289 void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
290 _space->safe_object_iterate(cl);
291 }
292
293 void EpsilonHeap::print_on(outputStream *st) const {
294 st->print_cr("Epsilon Heap");
295
296 // Cast away constness:
297 ((VirtualSpace)_virtual_space).print_on(st);
298
299 st->print_cr("Allocation space:");
300 _space->print_on(st);
301
323 log_info(gc)("Heap: no reliable data");
324 }
325 }
326
327 void EpsilonHeap::print_metaspace_info() const {
328 size_t reserved = MetaspaceUtils::reserved_bytes();
329 size_t committed = MetaspaceUtils::committed_bytes();
330 size_t used = MetaspaceUtils::used_bytes();
331
332 if (reserved != 0) {
333 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
334 SIZE_FORMAT "%s (%.2f%%) used",
335 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
336 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
337 committed * 100.0 / reserved,
338 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
339 used * 100.0 / reserved);
340 } else {
341 log_info(gc, metaspace)("Metaspace: no reliable data");
342 }
343 }
|
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "classfile/classLoaderDataGraph.hpp"
26 #include "classfile/stringTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/epsilon/epsilonHeap.hpp"
30 #include "gc/epsilon/epsilonMemoryPool.hpp"
31 #include "gc/epsilon/epsilonThreadLocalData.hpp"
32 #include "gc/shared/barrierSet.inline.hpp"
33 #include "gc/shared/gcTraceTime.inline.hpp"
34 #include "gc/shared/strongRootsScope.hpp"
35 #include "gc/shared/preservedMarks.inline.hpp"
36 #include "gc/shared/weakProcessor.hpp"
37 #include "memory/allocation.inline.hpp"
38 #include "memory/iterator.inline.hpp"
39 #include "memory/resourceArea.hpp"
40 #include "oops/compressedOops.inline.hpp"
41 #include "oops/markOop.inline.hpp"
42 #include "runtime/biasedLocking.hpp"
43 #include "runtime/objectMonitor.inline.hpp"
44 #include "runtime/thread.hpp"
45 #include "runtime/vmOperations.hpp"
46 #include "runtime/vmThread.hpp"
47 #include "utilities/stack.inline.hpp"
48 #include "services/management.hpp"
49
50 jint EpsilonHeap::initialize() {
51 size_t align = _policy->heap_alignment();
52 size_t init_byte_size = align_up(_policy->initial_heap_byte_size(), align);
53 size_t max_byte_size = align_up(_policy->max_heap_byte_size(), align);
54
55 // Initialize backing storage
56 ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
57 _virtual_space.initialize(heap_rs, init_byte_size);
58
59 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
60 MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary());
61
62 initialize_reserved_region(reserved_region.start(), reserved_region.end());
63
64 _space = new ContiguousSpace();
65 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
66
67 // Precompute hot fields
68 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
237 assert(min_size <= size,
238 "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
239 assert(size <= _max_tlab_size,
240 "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
241 assert(size <= CollectedHeap::max_tlab_size(),
242 "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
243
244 if (log_is_enabled(Trace, gc)) {
245 ResourceMark rm;
246 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
247 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
248 thread->name(),
249 requested_size * HeapWordSize / K,
250 min_size * HeapWordSize / K,
251 _max_tlab_size * HeapWordSize / K,
252 ergo_tlab * HeapWordSize / K,
253 size * HeapWordSize / K);
254 }
255
256 // All prepared, let's do it!
257 HeapWord* res = allocate_or_collect_work(size);
258
259 if (res != NULL) {
260 // Allocation successful
261 *actual_size = size;
262 if (EpsilonElasticTLABDecay) {
263 EpsilonThreadLocalData::set_last_tlab_time(thread, time);
264 }
265 if (EpsilonElasticTLAB && !fits) {
266 // If we requested expansion, this is our new ergonomic TLAB size
267 EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
268 }
269 } else {
270 // Allocation failed, reset ergonomics to try and fit smaller TLABs
271 if (EpsilonElasticTLAB) {
272 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
273 }
274 }
275
276 return res;
277 }
278
279 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
280 *gc_overhead_limit_was_exceeded = false;
281 return allocate_or_collect_work(size);
282 }
283
284 void EpsilonHeap::collect(GCCause::Cause cause) {
285 switch (cause) {
286 case GCCause::_metadata_GC_threshold:
287 case GCCause::_metadata_GC_clear_soft_refs:
288 // Receiving these causes means the VM itself entered the safepoint for metadata collection.
289 // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
290 // re-enter the safepoint again very soon.
291
292 assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
293 log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
294 MetaspaceGC::compute_new_size();
295 print_metaspace_info();
296 break;
297 default:
298 if (EpsilonWhyNotGCAnyway) {
299 if (SafepointSynchronize::is_at_safepoint()) {
300 entry_collect(cause);
301 } else {
302 vmentry_collect(cause);
303 }
304 } else {
305 log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
306 }
307 }
308 _monitoring_support->update_counters();
309 }
310
311 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
312 collect(gc_cause());
313 }
314
315 void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) {
316 _space->safe_object_iterate(cl);
317 }
318
319 void EpsilonHeap::print_on(outputStream *st) const {
320 st->print_cr("Epsilon Heap");
321
322 // Cast away constness:
323 ((VirtualSpace)_virtual_space).print_on(st);
324
325 st->print_cr("Allocation space:");
326 _space->print_on(st);
327
349 log_info(gc)("Heap: no reliable data");
350 }
351 }
352
353 void EpsilonHeap::print_metaspace_info() const {
354 size_t reserved = MetaspaceUtils::reserved_bytes();
355 size_t committed = MetaspaceUtils::committed_bytes();
356 size_t used = MetaspaceUtils::used_bytes();
357
358 if (reserved != 0) {
359 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
360 SIZE_FORMAT "%s (%.2f%%) used",
361 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
362 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
363 committed * 100.0 / reserved,
364 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
365 used * 100.0 / reserved);
366 } else {
367 log_info(gc, metaspace)("Metaspace: no reliable data");
368 }
369 }
370
371 // ------------------------------- EXPERIMENTAL MARK-COMPACT --------------------------------------------
372 //
373 // This implements a trivial Lisp2-style sliding collector:
374 // https://en.wikipedia.org/wiki/Mark-compact_algorithm#LISP2_algorithm
375 //
376 // The goal for this implementation is to be as trivial as possible, ignoring even the
377 // basic and obvious performance optimizations.
378 //
379
380 // VM operation that executes collection cycle under safepoint
381 class VM_EpsilonCollect: public VM_Operation {
382 private:
383 const GCCause::Cause _cause;
384 public:
385 VM_EpsilonCollect(GCCause::Cause cause) : VM_Operation(), _cause(cause) {};
386 VM_Operation::VMOp_Type type() const { return VMOp_EpsilonCollect; }
387 const char* name() const { return "Epsilon Mark-Compact Collection"; }
388 virtual void doit() {
389 EpsilonHeap* heap = EpsilonHeap::heap();
390 heap->entry_collect(_cause);
391 if (EpsilonWhyNotGCAnywayAgain) {
392 heap->entry_collect(_cause);
393 }
394 }
395 };
396
397 // Utility to enter the safepoint for GC
398 void EpsilonHeap::vmentry_collect(GCCause::Cause cause) {
399 VM_EpsilonCollect vmop(cause);
400 VMThread::execute(&vmop);
401 }
402
403 HeapWord* EpsilonHeap::allocate_or_collect_work(size_t size) {
404 HeapWord* res = allocate_work(size);
405 if (res == NULL && EpsilonWhyNotGCAnyway) {
406 vmentry_collect(GCCause::_allocation_failure);
407 res = allocate_work(size);
408 }
409 return res;
410 }
411
412 typedef Stack<oop, mtGC> EpsilonMarkStack;
413
414 void EpsilonHeap::do_roots(OopClosure* cl, bool everything) {
415 // Need to adapt passed closure for some root types
416 CLDToOopClosure clds(cl, ClassLoaderData::_claim_none);
417 MarkingCodeBlobClosure blobs(cl, CodeBlobToOopClosure::FixRelocations);
418
419 // Need to tell runtime we are about to walk the roots with 1 thread
420 StrongRootsScope scope(1);
421
422 // Need locks to walk some roots
423 MutexLockerEx lock_cc(CodeCache_lock, Mutex::_no_safepoint_check_flag);
424 MutexLockerEx lock_cldg(ClassLoaderDataGraph_lock);
425
426 // Walk all these different parts of runtime roots
427 CodeCache::blobs_do(&blobs);
428 ClassLoaderDataGraph::cld_do(&clds);
429 Universe::oops_do(cl);
430 Management::oops_do(cl);
431 JvmtiExport::oops_do(cl);
432 JNIHandles::oops_do(cl);
433 WeakProcessor::oops_do(cl);
434 ObjectSynchronizer::oops_do(cl);
435 SystemDictionary::oops_do(cl);
436 Threads::possibly_parallel_oops_do(false, cl, &blobs);
437
438 // This is implicitly handled by other roots, and we only want to
439 // touch these during verification.
440 if (everything) {
441 StringTable::oops_do(cl);
442 }
443 }
444
445 // Walk the parsable heap and call object closure on every marked object
446 void EpsilonHeap::walk_heap(ObjectClosure* cl, bool only_marked) {
447 HeapWord* cur = _space->bottom();
448 HeapWord* limit = _space->top();
449 do {
450 oop o = (oop)cur;
451 cur += o->size();
452 if (only_marked && o->is_gc_marked()) {
453 cl->do_object(o);
454 }
455 } while (cur < limit);
456 }
457
458 class EpsilonScanOopClosure : public BasicOopIterateClosure {
459 private:
460 EpsilonMarkStack* const _stack;
461 PreservedMarks* const _preserved_marks;
462
463 template <class T>
464 void do_oop_work(T* p) {
465 T o = RawAccess<>::oop_load(p);
466 if (!CompressedOops::is_null(o)) {
467 oop obj = CompressedOops::decode_not_null(o);
468 markOop mark = obj->mark_raw();
469 if (!mark->is_marked()) {
470 if (mark->must_be_preserved(obj)) {
471 _preserved_marks->push(obj, mark);
472 }
473 obj->set_mark_raw(markOopDesc::prototype()->set_marked());
474 _stack->push(obj);
475 }
476 }
477 }
478
479 public:
480 EpsilonScanOopClosure(EpsilonMarkStack* stack, PreservedMarks* preserved_marks) :
481 _stack(stack), _preserved_marks(preserved_marks) {}
482 virtual void do_oop(oop* p) { do_oop_work(p); }
483 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
484 };
485
486 class EpsilonCalcNewLocationObjectClosure : public ObjectClosure {
487 private:
488 HeapWord* _compact_point;
489
490 public:
491 EpsilonCalcNewLocationObjectClosure(HeapWord* bottom) : _compact_point(bottom) {}
492
493 void do_object(oop obj) {
494 obj->forward_to(oop(_compact_point));
495 _compact_point += obj->size();
496 }
497
498 HeapWord* compact_point() {
499 return _compact_point;
500 }
501 };
502
503 class EpsilonAdjustPointersOopClosure : public BasicOopIterateClosure {
504 private:
505 template <class T>
506 void do_oop_work(T* p) {
507 T o = RawAccess<>::oop_load(p);
508 if (!CompressedOops::is_null(o)) {
509 oop obj = CompressedOops::decode_not_null(o);
510 oop fwd = obj->forwardee();
511 if (!oopDesc::equals_raw(obj, fwd)) {
512 RawAccess<>::oop_store(p, fwd);
513 }
514 }
515 }
516
517 public:
518 virtual void do_oop(oop* p) { do_oop_work(p); }
519 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
520 };
521
522 class EpsilonAdjustPointersObjectClosure : public ObjectClosure {
523 public:
524 void do_object(oop obj) {
525 EpsilonAdjustPointersOopClosure cl;
526 obj->oop_iterate(&cl);
527 }
528 };
529
530 class EpsilonMoveObjects : public ObjectClosure {
531 public:
532 void do_object(oop obj) {
533 oop fwd = obj->forwardee();
534 if (!oopDesc::equals_raw(obj, fwd)) {
535 Copy::aligned_conjoint_words((HeapWord *) obj, (HeapWord *) fwd, obj->size());
536 fwd->init_mark_raw();
537 } else {
538 obj->init_mark_raw();
539 }
540 }
541 };
542
543 class EpsilonVerifyOopClosure : public BasicOopIterateClosure {
544 private:
545 EpsilonHeap* const _heap;
546
547 template <class T>
548 void do_oop_work(T* p) {
549 T o = RawAccess<>::oop_load(p);
550 if (!CompressedOops::is_null(o)) {
551 oop obj = CompressedOops::decode_not_null(o);
552 guarantee(_heap->is_in(obj), "Is in heap: " PTR_FORMAT, p2i(obj));
553 guarantee(oopDesc::is_oop(obj), "Is an object: " PTR_FORMAT, p2i(obj));
554 guarantee(!obj->mark()->is_marked(), "Mark is gone: " PTR_FORMAT, p2i(obj));
555 }
556 }
557
558 public:
559 EpsilonVerifyOopClosure() : _heap(EpsilonHeap::heap()) {}
560 virtual void do_oop(oop* p) { do_oop_work(p); }
561 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
562 };
563
564 class EpsilonVerifyObjectClosure : public ObjectClosure {
565 private:
566 EpsilonHeap* const _heap;
567 public:
568 void do_object(oop obj) {
569 guarantee(_heap->is_in(obj), "Is in heap: " PTR_FORMAT, p2i(obj));
570 guarantee(oopDesc::is_oop(obj), "Is an object: " PTR_FORMAT, p2i(obj));
571 guarantee(!obj->mark()->is_marked(), "Mark is gone: " PTR_FORMAT, p2i(obj));
572 EpsilonVerifyOopClosure cl;
573 obj->oop_iterate(&cl);
574 }
575
576 EpsilonVerifyObjectClosure() : _heap(EpsilonHeap::heap()) {}
577 };
578
579 void EpsilonHeap::entry_collect(GCCause::Cause cause) {
580 GCIdMark mark;
581 GCTraceTime(Info, gc) time("Lisp2-style Mark-Compact", NULL, cause, true);
582
583 {
584 GCTraceTime(Info, gc) time("Step 0: Prologue", NULL);
585
586 // We need parsable heap to walk it.
587 ensure_parsability(true);
588
589 // Tell various parts of runtime we are doing GC.
590 CodeCache::gc_prologue();
591 BiasedLocking::preserve_marks();
592 DerivedPointerTable::clear();
593 DerivedPointerTable::set_active(false);
594 }
595
596 // We are going to store marking information (whether the object was reachable)
597 // and forwarding information (where the new copy resides) in mark words.
598 // Some of those mark words need to be carefully preserved. This is an utility
599 // that maintains the list of those special mark words.
600 PreservedMarks preserved_marks;
601
602 {
603 GCTraceTime(Info, gc) time("Step 1: Mark", NULL);
604
605 // Marking stack and the closure that does most of the work.
606 // The closure would scan the outgoing references, mark them,
607 // and push newly-marked objects to stack for further processing.
608 EpsilonMarkStack stack;
609 EpsilonScanOopClosure cl(&stack, &preserved_marks);
610
611 // Seed the marking with roots.
612 process_roots(&cl);
613
614 // Scan the rest of the heap until we run out of objects.
615 // Termination is guaranteed, because all reachable threads would
616 // be marked eventually.
617 while (!stack.is_empty()) {
618 oop obj = stack.pop();
619 obj->oop_iterate(&cl);
620 }
621 }
622
623 // New top of the allocated space.
624 HeapWord* new_top;
625
626 {
627 GCTraceTime(Info, gc) time("Step 2: Calculate new locations", NULL);
628
629 // Walk all alive objects, compute their new addresses and store those addresses
630 // in mark words. Optionally preserve some marks.
631 EpsilonCalcNewLocationObjectClosure cl(_space->bottom());
632 walk_heap(&cl, /* only_marked = */ true);
633
634 // After addresses are calculated, we know the new top for the allocated space.
635 // We cannot set it just yet, because some asserts check that objects are "in heap"
636 // based on current "top".
637 new_top = cl.compact_point();
638 }
639
640 {
641 GCTraceTime(Info, gc) time("Step 3: Adjust pointers", NULL);
642
643 // Walk all alive objects _and their reference fields_, and put "new addresses"
644 // there. We know the new addresses from the forwarding data in mark words.
645 // Take care of the heap objects first.
646 EpsilonAdjustPointersObjectClosure cl;
647 walk_heap(&cl, /* only_marked = */ true);
648
649 // Now do the same, but for all VM roots, which reference the objects on
650 // their own: their references should also be updated.
651 EpsilonAdjustPointersOopClosure cli;
652 process_roots(&cli);
653
654 // Finally, make sure preserved marks know the objects are about to move.
655 preserved_marks.adjust_during_full_gc();
656 }
657
658 {
659 GCTraceTime(Info, gc) time("Step 4: Move objects", NULL);
660
661 // Move all alive objects to their new locations. All the references are already
662 // adjusted at previous step.
663 EpsilonMoveObjects cl;
664 walk_heap(&cl, /* only_marked = */ true);
665
666 // Now we moved all objects to their relevant locations, we can retract the "top"
667 // of the allocation space to the end of the compacted prefix.
668 _space->set_top(new_top);
669 }
670
671 {
672 GCTraceTime(Info, gc) time("Step 5: Epilogue", NULL);
673
674 // Restore all special mark words.
675 preserved_marks.restore();
676
677 // Tell the rest of runtime we have finished the GC.
678 DerivedPointerTable::update_pointers();
679 BiasedLocking::restore_marks();
680 CodeCache::gc_epilogue();
681 JvmtiExport::gc_epilogue();
682 }
683
684 if (EpsilonVerify) {
685 GCTraceTime(Info, gc) time("Step 6: Verify", NULL);
686
687 // Verify all roots are correct.
688 EpsilonVerifyOopClosure cl;
689 process_all_roots(&cl);
690
691 // Verify all objects in heap are correct. Since we have compacted everything
692 // to be beginning, the heap is parsable right now, and we can just walk all
693 // objects and verify them.
694 EpsilonVerifyObjectClosure ocl;
695 walk_heap(&ocl, /* only_marked = */ false);
696 }
697
698 }
|