302 inline bool is_update_refs_in_progress() const;
303 inline bool is_evacuation_in_progress() const;
304 inline bool is_degenerated_gc_in_progress() const;
305 inline bool is_full_gc_in_progress() const;
306 inline bool is_full_gc_move_in_progress() const;
307 inline bool has_forwarded_objects() const;
308 inline bool is_gc_in_progress_mask(uint mask) const;
309 inline bool is_stw_gc_in_progress() const;
310 inline bool is_concurrent_strong_root_in_progress() const;
311 inline bool is_concurrent_weak_root_in_progress() const;
312
313 // ---------- GC cancellation and degeneration machinery
314 //
315 // Cancelled GC flag is used to notify concurrent phases that they should terminate.
316 //
317 public:
318 enum ShenandoahDegenPoint {
319 _degenerated_unset,
320 _degenerated_outside_cycle,
321 _degenerated_mark,
322 _degenerated_evac,
323 _degenerated_updaterefs,
324 _DEGENERATED_LIMIT
325 };
326
327 static const char* degen_point_to_string(ShenandoahDegenPoint point) {
328 switch (point) {
329 case _degenerated_unset:
330 return "<UNSET>";
331 case _degenerated_outside_cycle:
332 return "Outside of Cycle";
333 case _degenerated_mark:
334 return "Mark";
335 case _degenerated_evac:
336 return "Evacuation";
337 case _degenerated_updaterefs:
338 return "Update Refs";
339 default:
340 ShouldNotReachHere();
341 return "ERROR";
342 }
343 };
344
345 private:
346 enum CancelState {
347 // Normal state. GC has not been cancelled and is open for cancellation.
348 // Worker threads can suspend for safepoint.
349 CANCELLABLE,
350
351 // GC has been cancelled. Worker threads can not suspend for
352 // safepoint but must finish their work as soon as possible.
353 CANCELLED,
354
355 // GC has not been cancelled and must not be cancelled. At least
356 // one worker thread checks for pending safepoint and may suspend
357 // if a safepoint is pending.
358 NOT_CANCELLED
361 ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
362 bool try_cancel_gc();
363
364 public:
365 static address cancelled_gc_addr();
366
367 inline bool cancelled_gc() const;
368 inline bool check_cancelled_gc_and_yield(bool sts_active = true);
369
370 inline void clear_cancelled_gc();
371
372 void cancel_gc(GCCause::Cause cause);
373
374 // ---------- GC operations entry points
375 //
376 public:
377 // Entry points to STW GC operations, these cause a related safepoint, that then
378 // call the entry method below
379 void vmop_entry_init_mark();
380 void vmop_entry_final_mark();
381 void vmop_entry_init_updaterefs();
382 void vmop_entry_final_updaterefs();
383 void vmop_entry_full(GCCause::Cause cause);
384 void vmop_degenerated(ShenandoahDegenPoint point);
385
386 // Entry methods to normally STW GC operations. These set up logging, monitoring
387 // and workers for net VM operation
388 void entry_init_mark();
389 void entry_final_mark();
390 void entry_init_updaterefs();
391 void entry_final_updaterefs();
392 void entry_full(GCCause::Cause cause);
393 void entry_degenerated(int point);
394
395 // Entry methods to normally concurrent GC operations. These set up logging, monitoring
396 // for concurrent operation.
397 void entry_reset();
398 void entry_mark();
399 void entry_preclean();
400 void entry_weak_roots();
401 void entry_class_unloading();
402 void entry_strong_roots();
403 void entry_cleanup_early();
404 void entry_evac();
405 void entry_updaterefs();
406 void entry_cleanup_complete();
407 void entry_uncommit(double shrink_before);
408
409 private:
410 // Actual work for the phases
411 void op_init_mark();
412 void op_final_mark();
413 void op_init_updaterefs();
414 void op_final_updaterefs();
415 void op_full(GCCause::Cause cause);
416 void op_degenerated(ShenandoahDegenPoint point);
417 void op_degenerated_fail();
418 void op_degenerated_futile();
419
420 void op_reset();
421 void op_mark();
422 void op_preclean();
423 void op_weak_roots();
424 void op_class_unloading();
425 void op_strong_roots();
426 void op_cleanup_early();
427 void op_conc_evac();
428 void op_stw_evac();
429 void op_updaterefs();
430 void op_cleanup_complete();
431 void op_uncommit(double shrink_before);
432
433 // Messages for GC trace events, they have to be immortal for
434 // passing around the logging/tracing systems
435 const char* init_mark_event_message() const;
436 const char* final_mark_event_message() const;
437 const char* conc_mark_event_message() const;
438 const char* degen_event_message(ShenandoahDegenPoint point) const;
439
440 // ---------- GC subsystems
441 //
442 private:
443 ShenandoahControlThread* _control_thread;
444 ShenandoahCollectorPolicy* _shenandoah_policy;
445 ShenandoahMode* _gc_mode;
446 ShenandoahHeuristics* _heuristics;
447 ShenandoahFreeSet* _free_set;
448 ShenandoahConcurrentMark* _scm;
449 ShenandoahMarkCompact* _full_gc;
629 size_t _bitmap_regions_per_slice;
630 size_t _bitmap_bytes_per_slice;
631
632 bool _bitmap_region_special;
633 bool _aux_bitmap_region_special;
634
635 ShenandoahLiveData** _liveness_cache;
636
637 public:
638 inline ShenandoahMarkingContext* complete_marking_context() const;
639 inline ShenandoahMarkingContext* marking_context() const;
640 inline void mark_complete_marking_context();
641 inline void mark_incomplete_marking_context();
642
643 template<class T>
644 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
645
646 template<class T>
647 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
648
649 template<class T>
650 inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
651
652 void reset_mark_bitmap();
653
654 // SATB barriers hooks
655 template<bool RESOLVE>
656 inline bool requires_marking(const void* entry) const;
657 void force_satb_flush_all_threads();
658
659 // Support for bitmap uncommits
660 bool commit_bitmap_slice(ShenandoahHeapRegion *r);
661 bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
662 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
663
664 // Liveness caching support
665 ShenandoahLiveData* get_liveness_cache(uint worker_id);
666 void flush_liveness_cache(uint worker_id);
667
668 // ---------- Evacuation support
669 //
670 private:
671 ShenandoahCollectionSet* _collection_set;
672 ShenandoahEvacOOMHandler _oom_evac_handler;
673
674 void evacuate_and_update_roots();
675
676 public:
677 static address in_cset_fast_test_addr();
678
679 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
680
681 // Checks if object is in the collection set.
682 inline bool in_collection_set(oop obj) const;
683
684 // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
685 inline bool in_collection_set_loc(void* loc) const;
686
687 // Evacuates object src. Returns the evacuated object, either evacuated
688 // by this thread, or by some other thread.
689 inline oop evacuate_object(oop src, Thread* thread);
690
691 // Call before/after evacuation.
692 void enter_evacuation();
693 void leave_evacuation();
694
695 // ---------- Helper functions
696 //
697 public:
698 template <class T>
699 inline oop evac_update_with_forwarded(T* p);
700
701 template <class T>
702 inline oop maybe_update_with_forwarded(T* p);
703
704 template <class T>
705 inline oop maybe_update_with_forwarded_not_null(T* p, oop obj);
706
707 template <class T>
708 inline oop update_with_forwarded_not_null(T* p, oop obj);
709
710 static inline oop cas_oop(oop n, narrowOop* addr, oop c);
711 static inline oop cas_oop(oop n, oop* addr, oop c);
712 static inline oop cas_oop(oop n, narrowOop* addr, narrowOop c);
713
714 void trash_humongous_region_at(ShenandoahHeapRegion *r);
715
716 void deduplicate_string(oop str);
717
718 private:
719 void trash_cset_regions();
720 void update_heap_references(bool concurrent);
721
722 // ---------- Testing helpers functions
723 //
724 private:
725 ShenandoahSharedFlag _inject_alloc_failure;
726
727 void try_inject_alloc_failure();
728 bool should_inject_alloc_failure();
729 };
730
731 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
|
302 inline bool is_update_refs_in_progress() const;
303 inline bool is_evacuation_in_progress() const;
304 inline bool is_degenerated_gc_in_progress() const;
305 inline bool is_full_gc_in_progress() const;
306 inline bool is_full_gc_move_in_progress() const;
307 inline bool has_forwarded_objects() const;
308 inline bool is_gc_in_progress_mask(uint mask) const;
309 inline bool is_stw_gc_in_progress() const;
310 inline bool is_concurrent_strong_root_in_progress() const;
311 inline bool is_concurrent_weak_root_in_progress() const;
312
313 // ---------- GC cancellation and degeneration machinery
314 //
315 // Cancelled GC flag is used to notify concurrent phases that they should terminate.
316 //
317 public:
318 enum ShenandoahDegenPoint {
319 _degenerated_unset,
320 _degenerated_outside_cycle,
321 _degenerated_mark,
322 _degenerated_evac_update,
323 _DEGENERATED_LIMIT
324 };
325
326 static const char* degen_point_to_string(ShenandoahDegenPoint point) {
327 switch (point) {
328 case _degenerated_unset:
329 return "<UNSET>";
330 case _degenerated_outside_cycle:
331 return "Outside of Cycle";
332 case _degenerated_mark:
333 return "Mark";
334 case _degenerated_evac_update:
335 return "Evac Update";
336 default:
337 ShouldNotReachHere();
338 return "ERROR";
339 }
340 };
341
342 private:
343 enum CancelState {
344 // Normal state. GC has not been cancelled and is open for cancellation.
345 // Worker threads can suspend for safepoint.
346 CANCELLABLE,
347
348 // GC has been cancelled. Worker threads can not suspend for
349 // safepoint but must finish their work as soon as possible.
350 CANCELLED,
351
352 // GC has not been cancelled and must not be cancelled. At least
353 // one worker thread checks for pending safepoint and may suspend
354 // if a safepoint is pending.
355 NOT_CANCELLED
358 ShenandoahSharedEnumFlag<CancelState> _cancelled_gc;
359 bool try_cancel_gc();
360
361 public:
362 static address cancelled_gc_addr();
363
364 inline bool cancelled_gc() const;
365 inline bool check_cancelled_gc_and_yield(bool sts_active = true);
366
367 inline void clear_cancelled_gc();
368
369 void cancel_gc(GCCause::Cause cause);
370
371 // ---------- GC operations entry points
372 //
373 public:
374 // Entry points to STW GC operations, these cause a related safepoint, that then
375 // call the entry method below
376 void vmop_entry_init_mark();
377 void vmop_entry_final_mark();
378 void vmop_entry_init_evac_update();
379 void vmop_entry_final_evac_update();
380 void vmop_entry_full(GCCause::Cause cause);
381 void vmop_degenerated(ShenandoahDegenPoint point);
382
383 // Entry methods to normally STW GC operations. These set up logging, monitoring
384 // and workers for net VM operation
385 void entry_init_mark();
386 void entry_final_mark();
387 void entry_init_evac_update();
388 void entry_final_evac_update();
389 void entry_full(GCCause::Cause cause);
390 void entry_degenerated(int point);
391
392 // Entry methods to normally concurrent GC operations. These set up logging, monitoring
393 // for concurrent operation.
394 void entry_reset();
395 void entry_mark();
396 void entry_preclean();
397 void entry_weak_roots();
398 void entry_class_unloading();
399 void entry_strong_roots();
400 void entry_cleanup_early();
401 void entry_evac_update();
402 void entry_cleanup_complete();
403 void entry_uncommit(double shrink_before);
404
405 private:
406 // Actual work for the phases
407 void op_init_mark();
408 void op_final_mark();
409 void op_init_evac_update();
410 void op_final_evac_update();
411 void op_full(GCCause::Cause cause);
412 void op_degenerated(ShenandoahDegenPoint point);
413 void op_degenerated_fail();
414 void op_degenerated_futile();
415
416 void op_reset();
417 void op_mark();
418 void op_preclean();
419 void op_weak_roots();
420 void op_class_unloading();
421 void op_strong_roots();
422 void op_cleanup_early();
423 void op_conc_evac_update();
424 void op_stw_evac_update();
425 void op_cleanup_complete();
426 void op_uncommit(double shrink_before);
427
428 // Messages for GC trace events, they have to be immortal for
429 // passing around the logging/tracing systems
430 const char* init_mark_event_message() const;
431 const char* final_mark_event_message() const;
432 const char* conc_mark_event_message() const;
433 const char* degen_event_message(ShenandoahDegenPoint point) const;
434
435 // ---------- GC subsystems
436 //
437 private:
438 ShenandoahControlThread* _control_thread;
439 ShenandoahCollectorPolicy* _shenandoah_policy;
440 ShenandoahMode* _gc_mode;
441 ShenandoahHeuristics* _heuristics;
442 ShenandoahFreeSet* _free_set;
443 ShenandoahConcurrentMark* _scm;
444 ShenandoahMarkCompact* _full_gc;
624 size_t _bitmap_regions_per_slice;
625 size_t _bitmap_bytes_per_slice;
626
627 bool _bitmap_region_special;
628 bool _aux_bitmap_region_special;
629
630 ShenandoahLiveData** _liveness_cache;
631
632 public:
633 inline ShenandoahMarkingContext* complete_marking_context() const;
634 inline ShenandoahMarkingContext* marking_context() const;
635 inline void mark_complete_marking_context();
636 inline void mark_incomplete_marking_context();
637
638 template<class T>
639 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
640
641 template<class T>
642 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit);
643
644 void reset_mark_bitmap();
645
646 // SATB barriers hooks
647 template<bool RESOLVE>
648 inline bool requires_marking(const void* entry) const;
649 void force_satb_flush_all_threads();
650
651 // Support for bitmap uncommits
652 bool commit_bitmap_slice(ShenandoahHeapRegion *r);
653 bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
654 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
655
656 // Liveness caching support
657 ShenandoahLiveData* get_liveness_cache(uint worker_id);
658 void flush_liveness_cache(uint worker_id);
659
660 // ---------- Evacuation support
661 //
662 private:
663 ShenandoahCollectionSet* _collection_set;
664 ShenandoahEvacOOMHandler _oom_evac_handler;
665
666 void evacuate_and_update_roots();
667
668 public:
669 static address in_cset_fast_test_addr();
670
671 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
672
673 // Checks if object is in the collection set.
674 inline bool in_collection_set(oop obj) const;
675
676 // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
677 inline bool in_collection_set_loc(void* loc) const;
678
679 // Evacuates object src. Returns the evacuated object, either evacuated
680 // by this thread, or by some other thread.
681 inline oop evacuate_object(oop src, Thread* thread);
682
683 template<class T>
684 oop evacuate_object_recursively(oop obj, T* load_addr, Thread* thread);
685
686 // Call before/after evacuation.
687 void enter_evacuation();
688 void leave_evacuation();
689
690 // ---------- Helper functions
691 //
692 public:
693 template <class T>
694 inline oop evac_update_with_forwarded(T* p);
695
696 template <class T>
697 inline oop maybe_update_with_forwarded(T* p);
698
699 template <class T>
700 inline oop maybe_update_with_forwarded_not_null(T* p, oop obj);
701
702 template <class T>
703 inline oop update_with_forwarded_not_null(T* p, oop obj);
704
705 static inline oop cas_oop(oop n, narrowOop* addr, oop c);
706 static inline oop cas_oop(oop n, oop* addr, oop c);
707 static inline oop cas_oop(oop n, narrowOop* addr, narrowOop c);
708
709 void trash_humongous_region_at(ShenandoahHeapRegion *r);
710
711 void deduplicate_string(oop str);
712
713 private:
714 void trash_cset_regions();
715
716 // ---------- Testing helpers functions
717 //
718 private:
719 ShenandoahSharedFlag _inject_alloc_failure;
720
721 void try_inject_alloc_failure();
722 bool should_inject_alloc_failure();
723 };
724
725 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
|