8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25
26 #include "code/codeCache.hpp"
27 #include "gc/shared/gcTraceTime.inline.hpp"
28 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
29 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
30 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
31 #include "gc/shenandoah/shenandoahFreeSet.hpp"
32 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
33 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
34 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
35 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
36 #include "gc/shenandoah/shenandoahHeuristics.hpp"
37 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
38 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
39 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
40 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
41 #include "gc/shenandoah/shenandoahUtils.hpp"
42 #include "gc/shenandoah/shenandoahVerifier.hpp"
43 #include "gc/shenandoah/shenandoahVMOperations.hpp"
44 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
45 #include "memory/metaspace.hpp"
46 #include "oops/oop.inline.hpp"
47 #include "runtime/thread.hpp"
48 #include "utilities/copy.hpp"
211 ShenandoahConcurrentMark* cm = heap->concurrent_mark();
212
213 heap->set_process_references(heap->heuristics()->can_process_references());
214 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
215
216 ReferenceProcessor* rp = heap->ref_processor();
217 // enable ("weak") refs discovery
218 rp->enable_discovery(true /*verify_no_refs*/);
219 rp->setup_policy(true); // forcefully purge all soft references
220 rp->set_active_mt_degree(heap->workers()->active_workers());
221
222 cm->update_roots(ShenandoahPhaseTimings::full_gc_roots);
223 cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots);
224 cm->finish_mark_from_roots(/* full_gc = */ true);
225
226 heap->mark_complete_marking_context();
227 }
228
229 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
230 private:
231 ShenandoahHeap* const _heap;
232 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
233 int _empty_regions_pos;
234 ShenandoahHeapRegion* _to_region;
235 ShenandoahHeapRegion* _from_region;
236 HeapWord* _compact_point;
237
238 public:
239 ShenandoahPrepareForCompactionObjectClosure(GrowableArray<ShenandoahHeapRegion*>& empty_regions, ShenandoahHeapRegion* to_region) :
240 _heap(ShenandoahHeap::heap()),
241 _empty_regions(empty_regions),
242 _empty_regions_pos(0),
243 _to_region(to_region),
244 _from_region(NULL),
245 _compact_point(to_region->bottom()) {}
246
247 void set_from_region(ShenandoahHeapRegion* from_region) {
248 _from_region = from_region;
249 }
250
251 void finish_region() {
252 assert(_to_region != NULL, "should not happen");
253 _to_region->set_new_top(_compact_point);
254 }
255
256 bool is_compact_same_region() {
257 return _from_region == _to_region;
258 }
259
260 int empty_regions_pos() {
261 return _empty_regions_pos;
262 }
263
264 void do_object(oop p) {
265 assert(_from_region != NULL, "must set before work");
266 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
267 assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked");
268
269 size_t obj_size = p->size() + ShenandoahForwarding::word_size();
270 if (_compact_point + obj_size > _to_region->end()) {
271 finish_region();
272
273 // Object doesn't fit. Pick next empty region and start compacting there.
274 ShenandoahHeapRegion* new_to_region;
275 if (_empty_regions_pos < _empty_regions.length()) {
276 new_to_region = _empty_regions.at(_empty_regions_pos);
277 _empty_regions_pos++;
278 } else {
279 // Out of empty region? Compact within the same region.
280 new_to_region = _from_region;
281 }
282
283 assert(new_to_region != _to_region, "must not reuse same to-region");
284 assert(new_to_region != NULL, "must not be NULL");
285 _to_region = new_to_region;
286 _compact_point = _to_region->bottom();
287 }
288
289 // Object fits into current region, record new location:
290 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
291 shenandoah_assert_not_forwarded(NULL, p);
292 ShenandoahForwarding::set_forwardee_raw(p, _compact_point + ShenandoahForwarding::word_size());
293 _compact_point += obj_size;
294 }
295 };
296
297 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
298 private:
299 ShenandoahHeap* const _heap;
300 ShenandoahHeapRegionSet** const _worker_slices;
301 ShenandoahRegionIterator _heap_regions;
302
303 ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* slice) {
304 ShenandoahHeapRegion* from_region = _heap_regions.next();
305
306 while (from_region != NULL && (!from_region->is_move_allowed() || from_region->is_humongous())) {
307 from_region = _heap_regions.next();
308 }
309
310 if (from_region != NULL) {
311 assert(slice != NULL, "sanity");
312 assert(!from_region->is_humongous(), "this path cannot handle humongous regions");
313 assert(from_region->is_move_allowed(), "only regions that can be moved in mark-compact");
314 slice->add_region(from_region);
315 }
316
317 return from_region;
318 }
319
320 public:
321 ShenandoahPrepareForCompactionTask(ShenandoahHeapRegionSet** worker_slices) :
322 AbstractGangTask("Shenandoah Prepare For Compaction Task"),
323 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
324 }
325
326 void work(uint worker_id) {
327 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
328 ShenandoahHeapRegion* from_region = next_from_region(slice);
329 // No work?
330 if (from_region == NULL) {
331 return;
332 }
333
334 // Sliding compaction. Walk all regions in the slice, and compact them.
335 // Remember empty regions and reuse them as needed.
336 ResourceMark rm;
337 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
338 ShenandoahPrepareForCompactionObjectClosure cl(empty_regions, from_region);
339 while (from_region != NULL) {
340 cl.set_from_region(from_region);
341 if (from_region->has_live()) {
342 _heap->marked_object_iterate(from_region, &cl);
343 }
344
345 // Compacted the region to somewhere else? From-region is empty then.
346 if (!cl.is_compact_same_region()) {
347 empty_regions.append(from_region);
348 }
349 from_region = next_from_region(slice);
350 }
351 cl.finish_region();
352
353 // Mark all remaining regions as empty
354 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
355 ShenandoahHeapRegion* r = empty_regions.at(pos);
356 r->set_new_top(r->bottom());
357 }
358 }
359 };
360
361 void ShenandoahMarkCompact::calculate_target_humongous_objects() {
362 ShenandoahHeap* heap = ShenandoahHeap::heap();
363
364 // Compute the new addresses for humongous objects. We need to do this after addresses
365 // for regular objects are calculated, and we know what regions in heap suffix are
366 // available for humongous moves.
367 //
368 // Scan the heap backwards, because we are compacting humongous regions towards the end.
369 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
370 // humongous start there.
371 //
372 // The complication is potential non-movable regions during the scan. If such region is
373 // detected, then sliding restarts towards that non-movable region.
374
375 size_t to_begin = heap->num_regions();
376 size_t to_end = heap->num_regions();
377
378 for (size_t c = heap->num_regions() - 1; c > 0; c--) {
379 ShenandoahHeapRegion *r = heap->get_region(c);
380 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
381 // To-region candidate: record this, and continue scan
382 to_begin = r->region_number();
383 continue;
384 }
385
386 if (r->is_humongous_start() && r->is_move_allowed()) {
387 // From-region candidate: movable humongous region
388 oop old_obj = oop(r->bottom() + ShenandoahForwarding::word_size());
389 size_t words_size = old_obj->size() + ShenandoahForwarding::word_size();
390 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
391
392 size_t start = to_end - num_regions;
393
394 if (start >= to_begin && start != r->region_number()) {
395 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
396 ShenandoahForwarding::set_forwardee_raw(old_obj, heap->get_region(start)->bottom() + ShenandoahForwarding::word_size());
397 to_end = start;
398 continue;
399 }
400 }
401
402 // Failed to fit. Scan starting from current region.
403 to_begin = r->region_number();
404 to_end = r->region_number();
405 }
406 }
407
408 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
409 private:
410 ShenandoahHeap* const _heap;
411
412 public:
413 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
414 void heap_region_do(ShenandoahHeapRegion* r) {
415 if (r->is_trash()) {
416 r->recycle();
424 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->region_number());
425
426 // Record current region occupancy: this communicates empty regions are free
427 // to the rest of Full GC code.
428 r->set_new_top(r->top());
429 }
430 };
431
432 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
433 private:
434 ShenandoahHeap* const _heap;
435 ShenandoahMarkingContext* const _ctx;
436
437 public:
438 ShenandoahTrashImmediateGarbageClosure() :
439 _heap(ShenandoahHeap::heap()),
440 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
441
442 void heap_region_do(ShenandoahHeapRegion* r) {
443 if (r->is_humongous_start()) {
444 oop humongous_obj = oop(r->bottom() + ShenandoahForwarding::word_size());
445 if (!_ctx->is_marked(humongous_obj)) {
446 assert(!r->has_live(),
447 "Region " SIZE_FORMAT " is not marked, should not have live", r->region_number());
448 _heap->trash_humongous_region_at(r);
449 } else {
450 assert(r->has_live(),
451 "Region " SIZE_FORMAT " should have live", r->region_number());
452 }
453 } else if (r->is_humongous_continuation()) {
454 // If we hit continuation, the non-live humongous starts should have been trashed already
455 assert(r->humongous_start_region()->has_live(),
456 "Region " SIZE_FORMAT " should have live", r->region_number());
457 } else if (r->is_regular()) {
458 if (!r->has_live()) {
459 r->make_trash_immediate();
460 }
461 }
462 }
463 };
464
465 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
466 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
467 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
468
469 ShenandoahHeap* heap = ShenandoahHeap::heap();
470
471 {
472 // Trash the immediately collectible regions before computing addresses
473 ShenandoahTrashImmediateGarbageClosure tigcl;
474 heap->heap_region_iterate(&tigcl);
475
476 // Make sure regions are in good state: committed, active, clean.
477 // This is needed because we are potentially sliding the data through them.
478 ShenandoahEnsureHeapActiveClosure ecl;
479 heap->heap_region_iterate(&ecl);
480 }
481
482 // Compute the new addresses for regular objects
483 {
484 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
485 ShenandoahPrepareForCompactionTask prepare_task(worker_slices);
486 heap->workers()->run_task(&prepare_task);
487 }
488
489 // Compute the new addresses for humongous objects
490 {
491 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
492 calculate_target_humongous_objects();
493 }
494 }
495
496 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
497 private:
498 ShenandoahHeap* const _heap;
499 ShenandoahMarkingContext* const _ctx;
500
501 template <class T>
502 inline void do_oop_work(T* p) {
503 T o = RawAccess<>::oop_load(p);
504 if (!CompressedOops::is_null(o)) {
505 oop obj = CompressedOops::decode_not_null(o);
506 assert(_ctx->is_marked(obj), "must be marked");
507 oop forw = oop(ShenandoahForwarding::get_forwardee_raw(obj));
508 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
509 }
510 }
511
512 public:
513 ShenandoahAdjustPointersClosure() :
514 _heap(ShenandoahHeap::heap()),
515 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
516
517 void do_oop(oop* p) { do_oop_work(p); }
518 void do_oop(narrowOop* p) { do_oop_work(p); }
519 };
520
521 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
522 private:
523 ShenandoahHeap* const _heap;
524 ShenandoahAdjustPointersClosure _cl;
525
526 public:
527 ShenandoahAdjustPointersObjectClosure() :
528 _heap(ShenandoahHeap::heap()) {
529 }
530 void do_object(oop p) {
531 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
532 HeapWord* forw = ShenandoahForwarding::get_forwardee_raw(p);
533 p->oop_iterate(&_cl);
534 }
535 };
536
537 class ShenandoahAdjustPointersTask : public AbstractGangTask {
538 private:
539 ShenandoahHeap* const _heap;
540 ShenandoahRegionIterator _regions;
541
542 public:
543 ShenandoahAdjustPointersTask() :
544 AbstractGangTask("Shenandoah Adjust Pointers Task"),
545 _heap(ShenandoahHeap::heap()) {
546 }
547
548 void work(uint worker_id) {
549 ShenandoahAdjustPointersObjectClosure obj_cl;
550 ShenandoahHeapRegion* r = _regions.next();
551 while (r != NULL) {
552 if (!r->is_humongous_continuation() && r->has_live()) {
583 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
584
585 ShenandoahHeap* heap = ShenandoahHeap::heap();
586
587 WorkGang* workers = heap->workers();
588 uint nworkers = workers->active_workers();
589 {
590 #if COMPILER2_OR_JVMCI
591 DerivedPointerTable::clear();
592 #endif
593 ShenandoahRootProcessor rp(heap, nworkers, ShenandoahPhaseTimings::full_gc_roots);
594 ShenandoahAdjustRootPointersTask task(&rp);
595 workers->run_task(&task);
596 #if COMPILER2_OR_JVMCI
597 DerivedPointerTable::update_pointers();
598 #endif
599 }
600
601 ShenandoahAdjustPointersTask adjust_pointers_task;
602 workers->run_task(&adjust_pointers_task);
603 }
604
605 class ShenandoahCompactObjectsClosure : public ObjectClosure {
606 private:
607 ShenandoahHeap* const _heap;
608 uint const _worker_id;
609
610 public:
611 ShenandoahCompactObjectsClosure(uint worker_id) :
612 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
613
614 void do_object(oop p) {
615 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
616 size_t size = (size_t)p->size();
617 HeapWord* compact_to = ShenandoahForwarding::get_forwardee_raw(p);
618 HeapWord* compact_from = (HeapWord*) p;
619 if (compact_from != compact_to) {
620 Copy::aligned_conjoint_words(compact_from, compact_to, size);
621 }
622 oop new_obj = oop(compact_to);
623 ShenandoahForwarding::initialize(new_obj);
624 }
625 };
626
627 class ShenandoahCompactObjectsTask : public AbstractGangTask {
628 private:
629 ShenandoahHeap* const _heap;
630 ShenandoahHeapRegionSet** const _worker_slices;
631
632 public:
633 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
634 AbstractGangTask("Shenandoah Compact Objects Task"),
635 _heap(ShenandoahHeap::heap()),
636 _worker_slices(worker_slices) {
637 }
638
639 void work(uint worker_id) {
640 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
641
642 ShenandoahCompactObjectsClosure cl(worker_id);
643 ShenandoahHeapRegion* r = slice.next();
694
695 r->set_live_data(live);
696 r->reset_alloc_metadata_to_shared();
697 _live += live;
698 }
699
700 size_t get_live() {
701 return _live;
702 }
703 };
704
705 void ShenandoahMarkCompact::compact_humongous_objects() {
706 // Compact humongous regions, based on their fwdptr objects.
707 //
708 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
709 // humongous regions are already compacted, and do not require further moves, which alleviates
710 // sliding costs. We may consider doing this in parallel in future.
711
712 ShenandoahHeap* heap = ShenandoahHeap::heap();
713
714 for (size_t c = heap->num_regions() - 1; c > 0; c--) {
715 ShenandoahHeapRegion* r = heap->get_region(c);
716 if (r->is_humongous_start()) {
717 oop old_obj = oop(r->bottom() + ShenandoahForwarding::word_size());
718 size_t words_size = old_obj->size() + ShenandoahForwarding::word_size();
719 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
720
721 size_t old_start = r->region_number();
722 size_t old_end = old_start + num_regions - 1;
723 size_t new_start = heap->heap_region_index_containing(ShenandoahForwarding::get_forwardee_raw(old_obj));
724 size_t new_end = new_start + num_regions - 1;
725
726 if (old_start == new_start) {
727 // No need to move the object, it stays at the same slot
728 continue;
729 }
730
731 assert (r->is_move_allowed(), "should be movable");
732
733 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
734 heap->get_region(new_start)->bottom(),
735 ShenandoahHeapRegion::region_size_words()*num_regions);
736
737 oop new_obj = oop(heap->get_region(new_start)->bottom() + ShenandoahForwarding::word_size());
738 ShenandoahForwarding::initialize(new_obj);
739
740 {
741 for (size_t c = old_start; c <= old_end; c++) {
742 ShenandoahHeapRegion* r = heap->get_region(c);
743 r->make_regular_bypass();
744 r->set_top(r->bottom());
745 }
746
747 for (size_t c = new_start; c <= new_end; c++) {
748 ShenandoahHeapRegion* r = heap->get_region(c);
749 if (c == new_start) {
750 r->make_humongous_start_bypass();
751 } else {
752 r->make_humongous_cont_bypass();
753 }
754
755 // Trailing region may be non-full, record the remainder there
756 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
757 if ((c == new_end) && (remainder != 0)) {
758 r->set_top(r->bottom() + remainder);
798
799 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
800 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
801 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
802
803 ShenandoahHeap* heap = ShenandoahHeap::heap();
804
805 // Compact regular objects first
806 {
807 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
808 ShenandoahCompactObjectsTask compact_task(worker_slices);
809 heap->workers()->run_task(&compact_task);
810 }
811
812 // Compact humongous objects after regular object moves
813 {
814 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
815 compact_humongous_objects();
816 }
817
818 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
819 // and must ensure the bitmap is in sync.
820 {
821 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
822 ShenandoahMCResetCompleteBitmapTask task;
823 heap->workers()->run_task(&task);
824 }
825
826 // Bring regions in proper states after the collection, and set heap properties.
827 {
828 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
829
830 ShenandoahPostCompactClosure post_compact;
831 heap->heap_region_iterate(&post_compact);
832 heap->set_used(post_compact.get_live());
833
834 heap->collection_set()->clear();
835 heap->free_set()->rebuild();
836 }
837
838 heap->clear_cancelled_gc();
839 }
|
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25
26 #include "code/codeCache.hpp"
27 #include "gc/shared/gcTraceTime.inline.hpp"
28 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
29 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
30 #include "gc/shenandoah/shenandoahFreeSet.hpp"
31 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
32 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
33 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
34 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
35 #include "gc/shenandoah/shenandoahHeuristics.hpp"
36 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
37 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
38 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
39 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
40 #include "gc/shenandoah/shenandoahUtils.hpp"
41 #include "gc/shenandoah/shenandoahVerifier.hpp"
42 #include "gc/shenandoah/shenandoahVMOperations.hpp"
43 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
44 #include "memory/metaspace.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "runtime/thread.hpp"
47 #include "utilities/copy.hpp"
210 ShenandoahConcurrentMark* cm = heap->concurrent_mark();
211
212 heap->set_process_references(heap->heuristics()->can_process_references());
213 heap->set_unload_classes(heap->heuristics()->can_unload_classes());
214
215 ReferenceProcessor* rp = heap->ref_processor();
216 // enable ("weak") refs discovery
217 rp->enable_discovery(true /*verify_no_refs*/);
218 rp->setup_policy(true); // forcefully purge all soft references
219 rp->set_active_mt_degree(heap->workers()->active_workers());
220
221 cm->update_roots(ShenandoahPhaseTimings::full_gc_roots);
222 cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots);
223 cm->finish_mark_from_roots(/* full_gc = */ true);
224
225 heap->mark_complete_marking_context();
226 }
227
228 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
229 private:
230 ShenandoahMarkCompact* const _mark_compact;
231 ShenandoahHeap* const _heap;
232 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
233 int _empty_regions_pos;
234 ShenandoahHeapRegion* _to_region;
235 ShenandoahHeapRegion* _from_region;
236 HeapWord* _compact_point;
237
238 public:
239 ShenandoahPrepareForCompactionObjectClosure(ShenandoahMarkCompact* mc, GrowableArray<ShenandoahHeapRegion*>& empty_regions, ShenandoahHeapRegion* to_region) :
240 _mark_compact(mc),
241 _heap(ShenandoahHeap::heap()),
242 _empty_regions(empty_regions),
243 _empty_regions_pos(0),
244 _to_region(to_region),
245 _from_region(NULL),
246 _compact_point(to_region->bottom()) {}
247
248 void set_from_region(ShenandoahHeapRegion* from_region) {
249 _from_region = from_region;
250 }
251
252 void finish_region() {
253 assert(_to_region != NULL, "should not happen");
254 _to_region->set_new_top(_compact_point);
255 }
256
257 bool is_compact_same_region() {
258 return _from_region == _to_region;
259 }
260
261 int empty_regions_pos() {
262 return _empty_regions_pos;
263 }
264
265 void do_object(oop p) {
266 assert(_from_region != NULL, "must set before work");
267 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
268 assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked");
269
270 size_t obj_size = p->size();
271 if (_compact_point + obj_size > _to_region->end()) {
272 finish_region();
273
274 // Object doesn't fit. Pick next empty region and start compacting there.
275 ShenandoahHeapRegion* new_to_region;
276 if (_empty_regions_pos < _empty_regions.length()) {
277 new_to_region = _empty_regions.at(_empty_regions_pos);
278 _empty_regions_pos++;
279 } else {
280 // Out of empty region? Compact within the same region.
281 new_to_region = _from_region;
282 }
283
284 assert(new_to_region != _to_region, "must not reuse same to-region");
285 assert(new_to_region != NULL, "must not be NULL");
286 _to_region = new_to_region;
287 _compact_point = _to_region->bottom();
288 }
289
290 // Object fits into current region, record new location:
291 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
292 shenandoah_assert_not_forwarded(NULL, p);
293 _mark_compact->preserve_mark(p);
294 p->forward_to(oop(_compact_point));
295 _compact_point += obj_size;
296 }
297 };
298
299 class ShenandoahPrepareForCompactionTask : public AbstractGangTask {
300 private:
301 ShenandoahMarkCompact* const _mark_compact;
302 ShenandoahHeap* const _heap;
303 ShenandoahHeapRegionSet** const _worker_slices;
304 ShenandoahRegionIterator _heap_regions;
305
306 ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* slice) {
307 ShenandoahHeapRegion* from_region = _heap_regions.next();
308
309 while (from_region != NULL && (!from_region->is_move_allowed() || from_region->is_humongous())) {
310 from_region = _heap_regions.next();
311 }
312
313 if (from_region != NULL) {
314 assert(slice != NULL, "sanity");
315 assert(!from_region->is_humongous(), "this path cannot handle humongous regions");
316 assert(from_region->is_move_allowed(), "only regions that can be moved in mark-compact");
317 slice->add_region(from_region);
318 }
319
320 return from_region;
321 }
322
323 public:
324 ShenandoahPrepareForCompactionTask(ShenandoahMarkCompact* mc, ShenandoahHeapRegionSet** worker_slices) :
325 AbstractGangTask("Shenandoah Prepare For Compaction Task"),
326 _mark_compact(mc),
327 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
328 }
329
330 void work(uint worker_id) {
331 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
332 ShenandoahHeapRegion* from_region = next_from_region(slice);
333 // No work?
334 if (from_region == NULL) {
335 return;
336 }
337
338 // Sliding compaction. Walk all regions in the slice, and compact them.
339 // Remember empty regions and reuse them as needed.
340 ResourceMark rm;
341 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
342 ShenandoahPrepareForCompactionObjectClosure cl(_mark_compact, empty_regions, from_region);
343 while (from_region != NULL) {
344 cl.set_from_region(from_region);
345 if (from_region->has_live()) {
346 _heap->marked_object_iterate(from_region, &cl);
347 }
348
349 // Compacted the region to somewhere else? From-region is empty then.
350 if (!cl.is_compact_same_region()) {
351 empty_regions.append(from_region);
352 }
353 from_region = next_from_region(slice);
354 }
355 cl.finish_region();
356
357 // Mark all remaining regions as empty
358 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
359 ShenandoahHeapRegion* r = empty_regions.at(pos);
360 r->set_new_top(r->bottom());
361 }
362 }
363 };
364
365 void ShenandoahMarkCompact::calculate_target_humongous_objects() {
366 ShenandoahHeap* heap = ShenandoahHeap::heap();
367
368 // Compute the new addresses for humongous objects. We need to do this after addresses
369 // for regular objects are calculated, and we know what regions in heap suffix are
370 // available for humongous moves.
371 //
372 // Scan the heap backwards, because we are compacting humongous regions towards the end.
373 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide
374 // humongous start there.
375 //
376 // The complication is potential non-movable regions during the scan. If such region is
377 // detected, then sliding restarts towards that non-movable region.
378
379 size_t to_begin = heap->num_regions();
380 size_t to_end = heap->num_regions();
381
382 for (size_t c = heap->num_regions(); c > 0; c--) {
383 ShenandoahHeapRegion *r = heap->get_region(c - 1);
384 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) {
385 // To-region candidate: record this, and continue scan
386 to_begin = r->region_number();
387 continue;
388 }
389
390 if (r->is_humongous_start() && r->is_move_allowed()) {
391 // From-region candidate: movable humongous region
392 oop old_obj = oop(r->bottom());
393 size_t words_size = old_obj->size();
394 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
395
396 size_t start = to_end - num_regions;
397
398 if (start >= to_begin && start != r->region_number()) {
399 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
400 old_obj->forward_to(oop(heap->get_region(start)->bottom()));
401 to_end = start;
402 continue;
403 }
404 }
405
406 // Failed to fit. Scan starting from current region.
407 to_begin = r->region_number();
408 to_end = r->region_number();
409 }
410 }
411
412 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
413 private:
414 ShenandoahHeap* const _heap;
415
416 public:
417 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
418 void heap_region_do(ShenandoahHeapRegion* r) {
419 if (r->is_trash()) {
420 r->recycle();
428 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->region_number());
429
430 // Record current region occupancy: this communicates empty regions are free
431 // to the rest of Full GC code.
432 r->set_new_top(r->top());
433 }
434 };
435
436 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
437 private:
438 ShenandoahHeap* const _heap;
439 ShenandoahMarkingContext* const _ctx;
440
441 public:
442 ShenandoahTrashImmediateGarbageClosure() :
443 _heap(ShenandoahHeap::heap()),
444 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
445
446 void heap_region_do(ShenandoahHeapRegion* r) {
447 if (r->is_humongous_start()) {
448 oop humongous_obj = oop(r->bottom());
449 if (!_ctx->is_marked(humongous_obj)) {
450 assert(!r->has_live(),
451 "Region " SIZE_FORMAT " is not marked, should not have live", r->region_number());
452 _heap->trash_humongous_region_at(r);
453 } else {
454 assert(r->has_live(),
455 "Region " SIZE_FORMAT " should have live", r->region_number());
456 }
457 } else if (r->is_humongous_continuation()) {
458 // If we hit continuation, the non-live humongous starts should have been trashed already
459 assert(r->humongous_start_region()->has_live(),
460 "Region " SIZE_FORMAT " should have live", r->region_number());
461 } else if (r->is_regular()) {
462 if (!r->has_live()) {
463 r->make_trash_immediate();
464 }
465 }
466 }
467 };
468
469 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
470 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer);
471 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
472
473 ShenandoahHeap* heap = ShenandoahHeap::heap();
474
475 {
476 // Trash the immediately collectible regions before computing addresses
477 ShenandoahTrashImmediateGarbageClosure tigcl;
478 heap->heap_region_iterate(&tigcl);
479
480 // Make sure regions are in good state: committed, active, clean.
481 // This is needed because we are potentially sliding the data through them.
482 ShenandoahEnsureHeapActiveClosure ecl;
483 heap->heap_region_iterate(&ecl);
484 }
485
486 // Compute the new addresses for regular objects
487 {
488 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
489 ShenandoahPrepareForCompactionTask prepare_task(this, worker_slices);
490 heap->workers()->run_task(&prepare_task);
491 }
492
493 // Compute the new addresses for humongous objects
494 {
495 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
496 calculate_target_humongous_objects();
497 }
498 }
499
500 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
501 private:
502 ShenandoahHeap* const _heap;
503 ShenandoahMarkingContext* const _ctx;
504
505 template <class T>
506 inline void do_oop_work(T* p) {
507 T o = RawAccess<>::oop_load(p);
508 if (!CompressedOops::is_null(o)) {
509 oop obj = CompressedOops::decode_not_null(o);
510 assert(_ctx->is_marked(obj), "must be marked");
511 if (obj->is_forwarded()) {
512 oop forw = obj->forwardee();
513 RawAccess<IS_NOT_NULL>::oop_store(p, forw);
514 }
515 }
516 }
517
518 public:
519 ShenandoahAdjustPointersClosure() :
520 _heap(ShenandoahHeap::heap()),
521 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
522
523 void do_oop(oop* p) { do_oop_work(p); }
524 void do_oop(narrowOop* p) { do_oop_work(p); }
525 };
526
527 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
528 private:
529 ShenandoahHeap* const _heap;
530 ShenandoahAdjustPointersClosure _cl;
531
532 public:
533 ShenandoahAdjustPointersObjectClosure() :
534 _heap(ShenandoahHeap::heap()) {
535 }
536 void do_object(oop p) {
537 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
538 p->oop_iterate(&_cl);
539 }
540 };
541
542 class ShenandoahAdjustPointersTask : public AbstractGangTask {
543 private:
544 ShenandoahHeap* const _heap;
545 ShenandoahRegionIterator _regions;
546
547 public:
548 ShenandoahAdjustPointersTask() :
549 AbstractGangTask("Shenandoah Adjust Pointers Task"),
550 _heap(ShenandoahHeap::heap()) {
551 }
552
553 void work(uint worker_id) {
554 ShenandoahAdjustPointersObjectClosure obj_cl;
555 ShenandoahHeapRegion* r = _regions.next();
556 while (r != NULL) {
557 if (!r->is_humongous_continuation() && r->has_live()) {
588 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers);
589
590 ShenandoahHeap* heap = ShenandoahHeap::heap();
591
592 WorkGang* workers = heap->workers();
593 uint nworkers = workers->active_workers();
594 {
595 #if COMPILER2_OR_JVMCI
596 DerivedPointerTable::clear();
597 #endif
598 ShenandoahRootProcessor rp(heap, nworkers, ShenandoahPhaseTimings::full_gc_roots);
599 ShenandoahAdjustRootPointersTask task(&rp);
600 workers->run_task(&task);
601 #if COMPILER2_OR_JVMCI
602 DerivedPointerTable::update_pointers();
603 #endif
604 }
605
606 ShenandoahAdjustPointersTask adjust_pointers_task;
607 workers->run_task(&adjust_pointers_task);
608
609 adjust_marks();
610 }
611
612 class ShenandoahCompactObjectsClosure : public ObjectClosure {
613 private:
614 ShenandoahHeap* const _heap;
615 uint const _worker_id;
616
617 public:
618 ShenandoahCompactObjectsClosure(uint worker_id) :
619 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
620
621 void do_object(oop p) {
622 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
623 size_t size = (size_t)p->size();
624 if (p->is_forwarded()) {
625 HeapWord* compact_from = (HeapWord*) p;
626 HeapWord* compact_to = (HeapWord*) p->forwardee();
627 Copy::aligned_conjoint_words(compact_from, compact_to, size);
628 oop new_obj = oop(compact_to);
629 new_obj->init_mark();
630 }
631 }
632 };
633
634 class ShenandoahCompactObjectsTask : public AbstractGangTask {
635 private:
636 ShenandoahHeap* const _heap;
637 ShenandoahHeapRegionSet** const _worker_slices;
638
639 public:
640 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
641 AbstractGangTask("Shenandoah Compact Objects Task"),
642 _heap(ShenandoahHeap::heap()),
643 _worker_slices(worker_slices) {
644 }
645
646 void work(uint worker_id) {
647 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
648
649 ShenandoahCompactObjectsClosure cl(worker_id);
650 ShenandoahHeapRegion* r = slice.next();
701
702 r->set_live_data(live);
703 r->reset_alloc_metadata_to_shared();
704 _live += live;
705 }
706
707 size_t get_live() {
708 return _live;
709 }
710 };
711
712 void ShenandoahMarkCompact::compact_humongous_objects() {
713 // Compact humongous regions, based on their fwdptr objects.
714 //
715 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases,
716 // humongous regions are already compacted, and do not require further moves, which alleviates
717 // sliding costs. We may consider doing this in parallel in future.
718
719 ShenandoahHeap* heap = ShenandoahHeap::heap();
720
721 for (size_t c = heap->num_regions(); c > 0; c--) {
722 ShenandoahHeapRegion* r = heap->get_region(c - 1);
723 if (r->is_humongous_start()) {
724 oop old_obj = oop(r->bottom());
725 if (!old_obj->is_forwarded()) {
726 // No need to move the object, it stays at the same slot
727 continue;
728 }
729 size_t words_size = old_obj->size();
730 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
731
732 size_t old_start = r->region_number();
733 size_t old_end = old_start + num_regions - 1;
734 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee());
735 size_t new_end = new_start + num_regions - 1;
736 assert(old_start != new_start, "must be real move");
737 assert (r->is_move_allowed(), "should be movable");
738
739 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(),
740 heap->get_region(new_start)->bottom(),
741 ShenandoahHeapRegion::region_size_words()*num_regions);
742
743 oop new_obj = oop(heap->get_region(new_start)->bottom());
744 new_obj->init_mark();
745
746 {
747 for (size_t c = old_start; c <= old_end; c++) {
748 ShenandoahHeapRegion* r = heap->get_region(c);
749 r->make_regular_bypass();
750 r->set_top(r->bottom());
751 }
752
753 for (size_t c = new_start; c <= new_end; c++) {
754 ShenandoahHeapRegion* r = heap->get_region(c);
755 if (c == new_start) {
756 r->make_humongous_start_bypass();
757 } else {
758 r->make_humongous_cont_bypass();
759 }
760
761 // Trailing region may be non-full, record the remainder there
762 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
763 if ((c == new_end) && (remainder != 0)) {
764 r->set_top(r->bottom() + remainder);
804
805 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
806 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
807 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
808
809 ShenandoahHeap* heap = ShenandoahHeap::heap();
810
811 // Compact regular objects first
812 {
813 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
814 ShenandoahCompactObjectsTask compact_task(worker_slices);
815 heap->workers()->run_task(&compact_task);
816 }
817
818 // Compact humongous objects after regular object moves
819 {
820 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
821 compact_humongous_objects();
822 }
823
824 restore_marks();
825
826 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
827 // and must ensure the bitmap is in sync.
828 {
829 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
830 ShenandoahMCResetCompleteBitmapTask task;
831 heap->workers()->run_task(&task);
832 }
833
834 // Bring regions in proper states after the collection, and set heap properties.
835 {
836 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
837
838 ShenandoahPostCompactClosure post_compact;
839 heap->heap_region_iterate(&post_compact);
840 heap->set_used(post_compact.get_live());
841
842 heap->collection_set()->clear();
843 heap->free_set()->rebuild();
844 }
845
846 heap->clear_cancelled_gc();
847 }
848
849 void ShenandoahMarkCompact::preserve_mark(oop obj) {
850 markOop mark = obj->mark_raw();
851 if (mark->must_be_preserved(obj)) {
852 MutexLocker ml(&_preserved_mark_lock, Mutex::_no_safepoint_check_flag);
853 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
854 "inconsistent preserved oop stacks: oop_stack size: " SIZE_FORMAT ", mark_stack size: " SIZE_FORMAT, _preserved_oop_stack.size(), _preserved_mark_stack.size());
855 _preserved_mark_stack.push(mark);
856 _preserved_oop_stack.push(obj);
857 }
858 }
859
860 void ShenandoahMarkCompact::restore_marks() {
861 MutexLocker ml(&_preserved_mark_lock, Mutex::_no_safepoint_check_flag);
862 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
863 "inconsistent preserved oop stacks: oop_stack size: " SIZE_FORMAT ", mark_stack size: " SIZE_FORMAT, _preserved_oop_stack.size(), _preserved_mark_stack.size());
864 while (!_preserved_oop_stack.is_empty()) {
865 oop obj = _preserved_oop_stack.pop();
866 markOop mark = _preserved_mark_stack.pop();
867 obj->set_mark_raw(mark);
868 }
869 }
870
871 void ShenandoahMarkCompact::adjust_marks() {
872 MutexLocker ml(&_preserved_mark_lock, Mutex::_no_safepoint_check_flag);
873 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
874 "inconsistent preserved oop stacks: oop_stack size: " SIZE_FORMAT ", mark_stack size: " SIZE_FORMAT, _preserved_oop_stack.size(), _preserved_mark_stack.size());
875 StackIterator<oop, mtGC> iter(_preserved_oop_stack);
876 while (!iter.is_empty()) {
877 oop* p = iter.next_addr();
878 oop obj = *p;
879 oop fwd = obj->forwardee();
880 *p = fwd;
881 }
882 }
|