21 *
22 */
23
24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
26
27 #include "gc/shared/markBitMap.hpp"
28 #include "gc/shared/softRefPolicy.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shenandoah/shenandoahHeapLock.hpp"
31 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
32 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
33 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
34 #include "services/memoryManager.hpp"
35
36 class ConcurrentGCTimer;
37 class ShenandoahAsserts;
38 class ShenandoahAllocTracker;
39 class ShenandoahCollectorPolicy;
40 class ShenandoahConnectionMatrix;
41 class ShenandoahPhaseTimings;
42 class ShenandoahHeap;
43 class ShenandoahHeapRegion;
44 class ShenandoahHeapRegionClosure;
45 class ShenandoahHeapRegionSet;
46 class ShenandoahCollectionSet;
47 class ShenandoahFreeSet;
48 class ShenandoahConcurrentMark;
49 class ShenandoahMarkCompact;
50 class ShenandoahPartialGC;
51 class ShenandoahPacer;
52 class ShenandoahTraversalGC;
53 class ShenandoahVerifier;
54 class ShenandoahConcurrentThread;
55 class ShenandoahMonitoringSupport;
56
57 class ShenandoahRegionIterator : public StackObj {
58 private:
59 volatile size_t _index;
60 ShenandoahHeap* _heap;
61 public:
62 ShenandoahRegionIterator();
63 ShenandoahRegionIterator(ShenandoahHeap* heap);
64
65 // Returns next region, or NULL if there are no more regions.
66 // This is multi-thread-safe.
67 inline ShenandoahHeapRegion* next();
68
69 // This is *not* MT safe. However, in the absence of multithreaded access, it
70 // can be used to determine if there is more work to do.
162 public:
163 // GC state describes the important parts of collector state, that may be
164 // used to make barrier selection decisions in the native and generated code.
165 // Multiple bits can be set at once.
166 //
167 // Important invariant: when GC state is zero, the heap is stable, and no barriers
168 // are required.
169 enum GCStateBitPos {
170 // Heap has forwarded objects: need RB, ACMP, CAS barriers.
171 HAS_FORWARDED_BITPOS = 0,
172
173 // Heap is under marking: needs SATB barriers.
174 MARKING_BITPOS = 1,
175
176 // Heap is under evacuation: needs WB barriers. (Set together with UNSTABLE)
177 EVACUATION_BITPOS = 2,
178
179 // Heap is under updating: needs SVRB/SVWB barriers.
180 UPDATEREFS_BITPOS = 3,
181
182 // Heap is under partial collection
183 PARTIAL_BITPOS = 4,
184
185 // Heap is under traversal collection
186 TRAVERSAL_BITPOS = 5,
187 };
188
189 enum GCState {
190 STABLE = 0,
191 HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
192 MARKING = 1 << MARKING_BITPOS,
193 EVACUATION = 1 << EVACUATION_BITPOS,
194 UPDATEREFS = 1 << UPDATEREFS_BITPOS,
195 PARTIAL = 1 << PARTIAL_BITPOS,
196 TRAVERSAL = 1 << TRAVERSAL_BITPOS,
197 };
198
199 enum ShenandoahDegenPoint {
200 _degenerated_unset,
201 _degenerated_partial,
202 _degenerated_traversal,
203 _degenerated_outside_cycle,
204 _degenerated_mark,
205 _degenerated_evac,
206 _degenerated_updaterefs,
207 _DEGENERATED_LIMIT,
208 };
209
210 static const char* degen_point_to_string(ShenandoahDegenPoint point) {
211 switch (point) {
212 case _degenerated_unset:
213 return "<UNSET>";
214 case _degenerated_partial:
215 return "Partial";
216 case _degenerated_traversal:
217 return "Traversal";
218 case _degenerated_outside_cycle:
219 return "Outside of Cycle";
220 case _degenerated_mark:
221 return "Mark";
222 case _degenerated_evac:
223 return "Evacuation";
224 case _degenerated_updaterefs:
225 return "Update Refs";
226 default:
227 ShouldNotReachHere();
228 return "ERROR";
229 }
230 };
231
232 private:
233 ShenandoahSharedBitmap _gc_state;
234 ShenandoahHeapLock _lock;
235 ShenandoahCollectorPolicy* _shenandoah_policy;
236 SoftRefPolicy _soft_ref_policy;
237 size_t _bitmap_size;
238 size_t _bitmap_regions_per_slice;
239 size_t _bitmap_bytes_per_slice;
240 MemRegion _heap_region;
241 MemRegion _bitmap0_region;
242 MemRegion _bitmap1_region;
243 MemRegion _aux_bitmap_region;
244
245 ShenandoahHeapRegion** _regions;
246 ShenandoahFreeSet* _free_set;
247 ShenandoahCollectionSet* _collection_set;
248
249 ShenandoahRegionIterator _update_refs_iterator;
250
251 ShenandoahConcurrentMark* _scm;
252 ShenandoahMarkCompact* _full_gc;
253 ShenandoahPartialGC* _partial_gc;
254 ShenandoahTraversalGC* _traversal_gc;
255 ShenandoahVerifier* _verifier;
256 ShenandoahPacer* _pacer;
257
258 ShenandoahConcurrentThread* _concurrent_gc_thread;
259
260 ShenandoahMonitoringSupport* _monitoring_support;
261
262 ShenandoahPhaseTimings* _phase_timings;
263 ShenandoahAllocTracker* _alloc_tracker;
264
265 size_t _num_regions;
266 size_t _initial_size;
267
268 uint _max_workers;
269 ShenandoahWorkGang* _workers;
270 ShenandoahWorkGang* _safepoint_workers;
271
272 volatile size_t _used;
273 volatile size_t _committed;
297 ShenandoahSharedFlag _unload_classes;
298
299 ShenandoahSharedEnumFlag<CancelState> _cancelled_concgc;
300
301 ReferenceProcessor* _ref_processor;
302
303 ShenandoahForwardedIsAliveClosure _forwarded_is_alive;
304 ShenandoahIsAliveClosure _is_alive;
305
306 ConcurrentGCTimer* _gc_timer;
307
308 ShenandoahConnectionMatrix* _connection_matrix;
309
310 GCMemoryManager _stw_memory_manager;
311 GCMemoryManager _cycle_memory_manager;
312
313 MemoryPool* _memory_pool;
314
315 ShenandoahEvacOOMHandler _oom_evac_handler;
316
317 #ifdef ASSERT
318 int _heap_expansion_count;
319 #endif
320
321 public:
322 ShenandoahHeap(ShenandoahCollectorPolicy* policy);
323
324 const char* name() const /* override */;
325 HeapWord* allocate_new_tlab(size_t word_size) /* override */;
326 void print_on(outputStream* st) const /* override */;
327 void print_extended_on(outputStream *st) const /* override */;
328
329 ShenandoahHeap::Name kind() const /* override */{
330 return CollectedHeap::Shenandoah;
331 }
332
333 jint initialize() /* override */;
334 void post_initialize() /* override */;
335 size_t capacity() const /* override */;
336 size_t used() const /* override */;
387 /* override: object pinning support */
388 bool supports_object_pinning() const { return true; }
389 oop pin_object(JavaThread* thread, oop obj);
390 void unpin_object(JavaThread* thread, oop obj);
391
392 static ShenandoahHeap* heap();
393 static ShenandoahHeap* heap_no_check();
394 static address in_cset_fast_test_addr();
395 static address cancelled_concgc_addr();
396 static address gc_state_addr();
397
398 ShenandoahCollectorPolicy *shenandoahPolicy() const { return _shenandoah_policy; }
399 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
400 ShenandoahAllocTracker* alloc_tracker() const { return _alloc_tracker; }
401
402 inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
403 inline size_t heap_region_index_containing(const void* addr) const;
404 inline bool requires_marking(const void* entry) const;
405
406 template <class T>
407 inline oop evac_update_with_forwarded(T* p, bool &evac);
408
409 template <class T>
410 inline oop maybe_update_with_forwarded(T* p);
411
412 template <class T>
413 inline oop maybe_update_with_forwarded_not_null(T* p, oop obj);
414
415 template <class T>
416 inline oop update_with_forwarded_not_null(T* p, oop obj);
417
418 void trash_cset_regions();
419
420 void stop_concurrent_marking();
421
422 void prepare_for_concurrent_evacuation();
423 void evacuate_and_update_roots();
424 // Fixup roots after concurrent cycle failed
425 void fixup_roots();
426
427 void update_heap_references(bool concurrent);
428
429 void roots_iterate(OopClosure* cl);
430
431 private:
432 void set_gc_state_all_threads(char state);
433 void set_gc_state_mask(uint mask, bool value);
434
435 public:
436 void set_concurrent_mark_in_progress(bool in_progress);
437 void set_evacuation_in_progress(bool in_progress);
438 void set_update_refs_in_progress(bool in_progress);
439 void set_degenerated_gc_in_progress(bool in_progress);
440 void set_full_gc_in_progress(bool in_progress);
441 void set_full_gc_move_in_progress(bool in_progress);
442 void set_concurrent_partial_in_progress(bool in_progress);
443 void set_concurrent_traversal_in_progress(bool in_progress);
444 void set_has_forwarded_objects(bool cond);
445
446 void set_process_references(bool pr);
447 void set_unload_classes(bool uc);
448
449 inline bool is_stable() const;
450 inline bool is_idle() const;
451 inline bool is_concurrent_mark_in_progress() const;
452 inline bool is_update_refs_in_progress() const;
453 inline bool is_evacuation_in_progress() const;
454 inline bool is_degenerated_gc_in_progress() const;
455 inline bool is_full_gc_in_progress() const;
456 inline bool is_full_gc_move_in_progress() const;
457 inline bool is_concurrent_partial_in_progress() const;
458 inline bool is_concurrent_traversal_in_progress() const;
459 inline bool has_forwarded_objects() const;
460 inline bool is_gc_in_progress_mask(uint mask) const;
461
462 char gc_state() const;
463
464 bool process_references() const;
465 bool unload_classes() const;
466
467 inline bool region_in_collection_set(size_t region_index) const;
468
469 // Mainly there to avoid accidentally calling the templated
470 // method below with ShenandoahHeapRegion* which would be *wrong*.
471 inline bool in_collection_set(ShenandoahHeapRegion* r) const;
472
473 template <class T>
474 inline bool in_collection_set(T obj) const;
475
476 inline bool allocated_after_next_mark_start(HeapWord* addr) const;
477 void set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr);
478 HeapWord* next_top_at_mark_start(HeapWord* region_base);
479
480 inline bool allocated_after_complete_mark_start(HeapWord* addr) const;
481 void set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr);
482 HeapWord* complete_top_at_mark_start(HeapWord* region_base);
483
484 // Evacuates object src. Returns the evacuated object if this thread
485 // succeeded, otherwise rolls back the evacuation and returns the
486 // evacuated object by the competing thread. 'succeeded' is an out
487 // param and set to true if this thread succeeded, otherwise to false.
488 inline oop evacuate_object(oop src, Thread* thread, bool& evacuated);
489 inline bool cancelled_concgc() const;
490 inline bool check_cancelled_concgc_and_yield(bool sts_active = true);
491 inline bool try_cancel_concgc();
492 inline void clear_cancelled_concgc();
493
494 inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
495 ShenandoahRegionIterator region_iterator() const;
496 void heap_region_iterate(ShenandoahHeapRegionClosure& cl) const;
497
498 ShenandoahFreeSet* free_set() const { return _free_set; }
499 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
500
501 ShenandoahConnectionMatrix* connection_matrix() const;
502
503 void increase_used(size_t bytes);
504 void decrease_used(size_t bytes);
505
506 void set_used(size_t bytes);
507
508 void increase_committed(size_t bytes);
509 void decrease_committed(size_t bytes);
510
511 void increase_allocated(size_t bytes);
512
513 void notify_alloc(size_t words, bool waste);
514
515 void handle_heap_shrinkage(double shrink_before);
516
517 void reset_next_mark_bitmap();
518
519 MarkBitMap* complete_mark_bit_map();
520 MarkBitMap* next_mark_bit_map();
521 inline bool is_marked_complete(oop obj) const;
522 inline bool mark_next(oop obj) const;
523 inline bool is_marked_next(oop obj) const;
524 bool is_next_bitmap_clear();
525 bool is_next_bitmap_clear_range(HeapWord* start, HeapWord* end);
526 bool is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end);
527
528 bool commit_bitmap_slice(ShenandoahHeapRegion *r);
529 bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
530
531 // Hint that the bitmap slice is not needed
532 bool idle_bitmap_slice(ShenandoahHeapRegion* r);
533 void activate_bitmap_slice(ShenandoahHeapRegion* r);
534
535 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
536
537 void print_heap_regions_on(outputStream* st) const;
538
539 size_t bytes_allocated_since_gc_start();
540 void reset_bytes_allocated_since_gc_start();
541
542 void trash_humongous_region_at(ShenandoahHeapRegion *r);
543
544 virtual GrowableArray<GCMemoryManager*> memory_managers();
545 virtual GrowableArray<MemoryPool*> memory_pools();
546
547 ShenandoahMonitoringSupport* monitoring_support();
548 ShenandoahConcurrentMark* concurrentMark() { return _scm; }
549 ShenandoahMarkCompact* full_gc() { return _full_gc; }
550 ShenandoahPartialGC* partial_gc();
551 ShenandoahTraversalGC* traversal_gc();
552 ShenandoahVerifier* verifier();
553 ShenandoahPacer* pacer() const;
554
555 ReferenceProcessor* ref_processor() { return _ref_processor;}
556
557 WorkGang* workers() const { return _workers;}
558
559 uint max_workers();
560
561 void assert_gc_workers(uint nworker) PRODUCT_RETURN;
562
563 void do_evacuation();
564 ShenandoahHeapRegion* next_compaction_region(const ShenandoahHeapRegion* r);
565
566 void heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions = false, bool skip_humongous_continuation = false) const;
567
568 // Delete entries for dead interned string and clean up unreferenced symbols
569 // in symbol table, possibly in parallel.
570 void unload_classes_and_cleanup_tables(bool full_gc);
670 size_t used_at_last_gc() const { return _used_at_last_gc;}
671
672 void set_alloc_seq_gc_start();
673 void set_alloc_seq_gc_end();
674
675 void set_used_at_last_gc() {_used_at_last_gc = used();}
676
677 void make_tlabs_parsable(bool retire_tlabs) /* override */;
678
679 GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
680 GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
681
682 public:
683 // Entry points to STW GC operations, these cause a related safepoint, that then
684 // call the entry method below
685 void vmop_entry_init_mark();
686 void vmop_entry_final_mark();
687 void vmop_entry_final_evac();
688 void vmop_entry_init_updaterefs();
689 void vmop_entry_final_updaterefs();
690 void vmop_entry_init_partial();
691 void vmop_entry_final_partial();
692 void vmop_entry_init_traversal();
693 void vmop_entry_final_traversal();
694 void vmop_entry_full(GCCause::Cause cause);
695 void vmop_degenerated(ShenandoahDegenPoint point);
696
697 // Entry methods to normally STW GC operations. These set up logging, monitoring
698 // and workers for net VM operation
699 void entry_init_mark();
700 void entry_final_mark();
701 void entry_final_evac();
702 void entry_init_updaterefs();
703 void entry_final_updaterefs();
704 void entry_init_partial();
705 void entry_final_partial();
706 void entry_init_traversal();
707 void entry_final_traversal();
708 void entry_full(GCCause::Cause cause);
709 void entry_degenerated(int point);
710
711 // Entry methods to normally concurrent GC operations. These set up logging, monitoring
712 // for concurrent operation.
713 void entry_mark();
714 void entry_preclean();
715 void entry_cleanup();
716 void entry_cleanup_bitmaps();
717 void entry_evac();
718 void entry_updaterefs();
719 void entry_partial();
720 void entry_traversal();
721
722 private:
723 // Actual work for the phases
724 void op_init_mark();
725 void op_final_mark();
726 void op_final_evac();
727 void op_init_updaterefs();
728 void op_final_updaterefs();
729 void op_init_partial();
730 void op_final_partial();
731 void op_init_traversal();
732 void op_final_traversal();
733 void op_full(GCCause::Cause cause);
734 void op_degenerated(ShenandoahDegenPoint point);
735 void op_degenerated_fail();
736 void op_degenerated_futile();
737
738 void op_mark();
739 void op_preclean();
740 void op_cleanup();
741 void op_evac();
742 void op_updaterefs();
743 void op_cleanup_bitmaps();
744 void op_partial();
745 void op_traversal();
746
747 private:
748 void try_inject_alloc_failure();
749 bool should_inject_alloc_failure();
750 };
751
752 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
|
21 *
22 */
23
24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
26
27 #include "gc/shared/markBitMap.hpp"
28 #include "gc/shared/softRefPolicy.hpp"
29 #include "gc/shared/collectedHeap.hpp"
30 #include "gc/shenandoah/shenandoahHeapLock.hpp"
31 #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
32 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
33 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
34 #include "services/memoryManager.hpp"
35
36 class ConcurrentGCTimer;
37 class ShenandoahAsserts;
38 class ShenandoahAllocTracker;
39 class ShenandoahCollectorPolicy;
40 class ShenandoahConnectionMatrix;
41 class ShenandoahFastRegionSet;
42 class ShenandoahPhaseTimings;
43 class ShenandoahHeap;
44 class ShenandoahHeapRegion;
45 class ShenandoahHeapRegionClosure;
46 class ShenandoahHeapRegionSet;
47 class ShenandoahCollectionSet;
48 class ShenandoahFreeSet;
49 class ShenandoahConcurrentMark;
50 class ShenandoahMarkCompact;
51 class ShenandoahPacer;
52 class ShenandoahTraversalGC;
53 class ShenandoahVerifier;
54 class ShenandoahConcurrentThread;
55 class ShenandoahMonitoringSupport;
56
57 class ShenandoahRegionIterator : public StackObj {
58 private:
59 volatile size_t _index;
60 ShenandoahHeap* _heap;
61 public:
62 ShenandoahRegionIterator();
63 ShenandoahRegionIterator(ShenandoahHeap* heap);
64
65 // Returns next region, or NULL if there are no more regions.
66 // This is multi-thread-safe.
67 inline ShenandoahHeapRegion* next();
68
69 // This is *not* MT safe. However, in the absence of multithreaded access, it
70 // can be used to determine if there is more work to do.
162 public:
163 // GC state describes the important parts of collector state, that may be
164 // used to make barrier selection decisions in the native and generated code.
165 // Multiple bits can be set at once.
166 //
167 // Important invariant: when GC state is zero, the heap is stable, and no barriers
168 // are required.
169 enum GCStateBitPos {
170 // Heap has forwarded objects: need RB, ACMP, CAS barriers.
171 HAS_FORWARDED_BITPOS = 0,
172
173 // Heap is under marking: needs SATB barriers.
174 MARKING_BITPOS = 1,
175
176 // Heap is under evacuation: needs WB barriers. (Set together with UNSTABLE)
177 EVACUATION_BITPOS = 2,
178
179 // Heap is under updating: needs SVRB/SVWB barriers.
180 UPDATEREFS_BITPOS = 3,
181
182 // Heap is under traversal collection
183 TRAVERSAL_BITPOS = 4,
184 };
185
186 enum GCState {
187 STABLE = 0,
188 HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS,
189 MARKING = 1 << MARKING_BITPOS,
190 EVACUATION = 1 << EVACUATION_BITPOS,
191 UPDATEREFS = 1 << UPDATEREFS_BITPOS,
192 TRAVERSAL = 1 << TRAVERSAL_BITPOS,
193 };
194
195 enum ShenandoahDegenPoint {
196 _degenerated_unset,
197 _degenerated_traversal,
198 _degenerated_outside_cycle,
199 _degenerated_mark,
200 _degenerated_evac,
201 _degenerated_updaterefs,
202 _DEGENERATED_LIMIT,
203 };
204
205 enum GCCycleMode {
206 NONE,
207 MINOR,
208 MAJOR
209 };
210
211 static const char* degen_point_to_string(ShenandoahDegenPoint point) {
212 switch (point) {
213 case _degenerated_unset:
214 return "<UNSET>";
215 case _degenerated_traversal:
216 return "Traversal";
217 case _degenerated_outside_cycle:
218 return "Outside of Cycle";
219 case _degenerated_mark:
220 return "Mark";
221 case _degenerated_evac:
222 return "Evacuation";
223 case _degenerated_updaterefs:
224 return "Update Refs";
225 default:
226 ShouldNotReachHere();
227 return "ERROR";
228 }
229 };
230
231 private:
232 ShenandoahSharedBitmap _gc_state;
233 ShenandoahHeapLock _lock;
234 ShenandoahCollectorPolicy* _shenandoah_policy;
235 SoftRefPolicy _soft_ref_policy;
236 size_t _bitmap_size;
237 size_t _bitmap_regions_per_slice;
238 size_t _bitmap_bytes_per_slice;
239 MemRegion _heap_region;
240 MemRegion _bitmap0_region;
241 MemRegion _bitmap1_region;
242 MemRegion _aux_bitmap_region;
243
244 ShenandoahHeapRegion** _regions;
245 ShenandoahFreeSet* _free_set;
246 ShenandoahCollectionSet* _collection_set;
247
248 ShenandoahRegionIterator _update_refs_iterator;
249
250 ShenandoahConcurrentMark* _scm;
251 ShenandoahMarkCompact* _full_gc;
252 ShenandoahTraversalGC* _traversal_gc;
253 ShenandoahVerifier* _verifier;
254 ShenandoahPacer* _pacer;
255
256 ShenandoahConcurrentThread* _concurrent_gc_thread;
257
258 ShenandoahMonitoringSupport* _monitoring_support;
259
260 ShenandoahPhaseTimings* _phase_timings;
261 ShenandoahAllocTracker* _alloc_tracker;
262
263 size_t _num_regions;
264 size_t _initial_size;
265
266 uint _max_workers;
267 ShenandoahWorkGang* _workers;
268 ShenandoahWorkGang* _safepoint_workers;
269
270 volatile size_t _used;
271 volatile size_t _committed;
295 ShenandoahSharedFlag _unload_classes;
296
297 ShenandoahSharedEnumFlag<CancelState> _cancelled_concgc;
298
299 ReferenceProcessor* _ref_processor;
300
301 ShenandoahForwardedIsAliveClosure _forwarded_is_alive;
302 ShenandoahIsAliveClosure _is_alive;
303
304 ConcurrentGCTimer* _gc_timer;
305
306 ShenandoahConnectionMatrix* _connection_matrix;
307
308 GCMemoryManager _stw_memory_manager;
309 GCMemoryManager _cycle_memory_manager;
310
311 MemoryPool* _memory_pool;
312
313 ShenandoahEvacOOMHandler _oom_evac_handler;
314
315 ShenandoahSharedEnumFlag<GCCycleMode> _gc_cycle_mode;
316
317 #ifdef ASSERT
318 int _heap_expansion_count;
319 #endif
320
321 public:
322 ShenandoahHeap(ShenandoahCollectorPolicy* policy);
323
324 const char* name() const /* override */;
325 HeapWord* allocate_new_tlab(size_t word_size) /* override */;
326 void print_on(outputStream* st) const /* override */;
327 void print_extended_on(outputStream *st) const /* override */;
328
329 ShenandoahHeap::Name kind() const /* override */{
330 return CollectedHeap::Shenandoah;
331 }
332
333 jint initialize() /* override */;
334 void post_initialize() /* override */;
335 size_t capacity() const /* override */;
336 size_t used() const /* override */;
387 /* override: object pinning support */
388 bool supports_object_pinning() const { return true; }
389 oop pin_object(JavaThread* thread, oop obj);
390 void unpin_object(JavaThread* thread, oop obj);
391
392 static ShenandoahHeap* heap();
393 static ShenandoahHeap* heap_no_check();
394 static address in_cset_fast_test_addr();
395 static address cancelled_concgc_addr();
396 static address gc_state_addr();
397
398 ShenandoahCollectorPolicy *shenandoahPolicy() const { return _shenandoah_policy; }
399 ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
400 ShenandoahAllocTracker* alloc_tracker() const { return _alloc_tracker; }
401
402 inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
403 inline size_t heap_region_index_containing(const void* addr) const;
404 inline bool requires_marking(const void* entry) const;
405
406 template <class T>
407 inline oop evac_update_with_forwarded(T* p);
408
409 template <class T>
410 inline oop maybe_update_with_forwarded(T* p);
411
412 template <class T>
413 inline oop maybe_update_with_forwarded_not_null(T* p, oop obj);
414
415 template <class T>
416 inline oop update_with_forwarded_not_null(T* p, oop obj);
417
418 void trash_cset_regions();
419
420 void stop_concurrent_marking();
421
422 void prepare_for_concurrent_evacuation();
423 void evacuate_and_update_roots();
424 // Fixup roots after concurrent cycle failed
425 void fixup_roots();
426
427 void update_heap_references(bool concurrent);
428
429 void roots_iterate(OopClosure* cl);
430
431 private:
432 void set_gc_state_all_threads(char state);
433 void set_gc_state_mask(uint mask, bool value);
434
435 public:
436 void set_concurrent_mark_in_progress(bool in_progress);
437 void set_evacuation_in_progress(bool in_progress);
438 void set_update_refs_in_progress(bool in_progress);
439 void set_degenerated_gc_in_progress(bool in_progress);
440 void set_full_gc_in_progress(bool in_progress);
441 void set_full_gc_move_in_progress(bool in_progress);
442 void set_concurrent_traversal_in_progress(bool in_progress);
443 void set_has_forwarded_objects(bool cond);
444
445 void set_process_references(bool pr);
446 void set_unload_classes(bool uc);
447
448 inline bool is_stable() const;
449 inline bool is_idle() const;
450 inline bool is_concurrent_mark_in_progress() const;
451 inline bool is_update_refs_in_progress() const;
452 inline bool is_evacuation_in_progress() const;
453 inline bool is_degenerated_gc_in_progress() const;
454 inline bool is_full_gc_in_progress() const;
455 inline bool is_full_gc_move_in_progress() const;
456 inline bool is_concurrent_traversal_in_progress() const;
457 inline bool has_forwarded_objects() const;
458 inline bool is_gc_in_progress_mask(uint mask) const;
459
460 char gc_state() const;
461
462 bool process_references() const;
463 bool unload_classes() const;
464
465 bool is_minor_gc() const;
466 bool is_major_gc() const;
467 void set_cycle_mode(GCCycleMode gc_cycle_mode);
468
469 inline bool region_in_collection_set(size_t region_index) const;
470
471 // Mainly there to avoid accidentally calling the templated
472 // method below with ShenandoahHeapRegion* which would be *wrong*.
473 inline bool in_collection_set(ShenandoahHeapRegion* r) const;
474
475 template <class T>
476 inline bool in_collection_set(T obj) const;
477
478 inline bool allocated_after_next_mark_start(HeapWord* addr) const;
479 void set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr);
480 HeapWord* next_top_at_mark_start(HeapWord* region_base);
481
482 inline bool allocated_after_complete_mark_start(HeapWord* addr) const;
483 void set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr);
484 HeapWord* complete_top_at_mark_start(HeapWord* region_base);
485
486 // Evacuates object src. Returns the evacuated object if this thread
487 // succeeded, otherwise rolls back the evacuation and returns the
488 // evacuated object by the competing thread.
489 inline oop evacuate_object(oop src, Thread* thread);
490 inline bool cancelled_concgc() const;
491 inline bool check_cancelled_concgc_and_yield(bool sts_active = true);
492 inline bool try_cancel_concgc();
493 inline void clear_cancelled_concgc();
494
495 inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
496 ShenandoahRegionIterator region_iterator() const;
497 void heap_region_iterate(ShenandoahHeapRegionClosure& cl) const;
498
499 ShenandoahFreeSet* free_set() const { return _free_set; }
500 ShenandoahCollectionSet* collection_set() const { return _collection_set; }
501
502 ShenandoahConnectionMatrix* connection_matrix() const;
503
504 void increase_used(size_t bytes);
505 void decrease_used(size_t bytes);
506
507 void set_used(size_t bytes);
508
509 void increase_committed(size_t bytes);
510 void decrease_committed(size_t bytes);
511
512 void increase_allocated(size_t bytes);
513
514 void notify_alloc(size_t words, bool waste);
515
516 void handle_heap_shrinkage(double shrink_before);
517
518 void reset_next_mark_bitmap();
519 void reset_next_mark_bitmap_traversal();
520
521 MarkBitMap* complete_mark_bit_map();
522 MarkBitMap* next_mark_bit_map();
523 inline bool is_marked_complete(oop obj) const;
524 inline bool mark_next(oop obj) const;
525 inline bool is_marked_next(oop obj) const;
526 bool is_next_bitmap_clear();
527 bool is_next_bitmap_clear_range(HeapWord* start, HeapWord* end);
528 bool is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end);
529
530 bool commit_bitmap_slice(ShenandoahHeapRegion *r);
531 bool uncommit_bitmap_slice(ShenandoahHeapRegion *r);
532
533 // Hint that the bitmap slice is not needed
534 bool idle_bitmap_slice(ShenandoahHeapRegion* r);
535 void activate_bitmap_slice(ShenandoahHeapRegion* r);
536
537 bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false);
538
539 void print_heap_regions_on(outputStream* st) const;
540
541 size_t bytes_allocated_since_gc_start();
542 void reset_bytes_allocated_since_gc_start();
543
544 void trash_humongous_region_at(ShenandoahHeapRegion *r);
545
546 virtual GrowableArray<GCMemoryManager*> memory_managers();
547 virtual GrowableArray<MemoryPool*> memory_pools();
548
549 ShenandoahMonitoringSupport* monitoring_support();
550 ShenandoahConcurrentMark* concurrentMark() { return _scm; }
551 ShenandoahMarkCompact* full_gc() { return _full_gc; }
552 ShenandoahTraversalGC* traversal_gc();
553 ShenandoahVerifier* verifier();
554 ShenandoahPacer* pacer() const;
555
556 ReferenceProcessor* ref_processor() { return _ref_processor;}
557
558 WorkGang* workers() const { return _workers;}
559
560 uint max_workers();
561
562 void assert_gc_workers(uint nworker) PRODUCT_RETURN;
563
564 void do_evacuation();
565 ShenandoahHeapRegion* next_compaction_region(const ShenandoahHeapRegion* r);
566
567 void heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions = false, bool skip_humongous_continuation = false) const;
568
569 // Delete entries for dead interned string and clean up unreferenced symbols
570 // in symbol table, possibly in parallel.
571 void unload_classes_and_cleanup_tables(bool full_gc);
671 size_t used_at_last_gc() const { return _used_at_last_gc;}
672
673 void set_alloc_seq_gc_start();
674 void set_alloc_seq_gc_end();
675
676 void set_used_at_last_gc() {_used_at_last_gc = used();}
677
678 void make_tlabs_parsable(bool retire_tlabs) /* override */;
679
680 GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; }
681 GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; }
682
683 public:
684 // Entry points to STW GC operations, these cause a related safepoint, that then
685 // call the entry method below
686 void vmop_entry_init_mark();
687 void vmop_entry_final_mark();
688 void vmop_entry_final_evac();
689 void vmop_entry_init_updaterefs();
690 void vmop_entry_final_updaterefs();
691 void vmop_entry_init_traversal();
692 void vmop_entry_final_traversal();
693 void vmop_entry_full(GCCause::Cause cause);
694 void vmop_degenerated(ShenandoahDegenPoint point);
695
696 // Entry methods to normally STW GC operations. These set up logging, monitoring
697 // and workers for net VM operation
698 void entry_init_mark();
699 void entry_final_mark();
700 void entry_final_evac();
701 void entry_init_updaterefs();
702 void entry_final_updaterefs();
703 void entry_init_traversal();
704 void entry_final_traversal();
705 void entry_full(GCCause::Cause cause);
706 void entry_degenerated(int point);
707
708 // Entry methods to normally concurrent GC operations. These set up logging, monitoring
709 // for concurrent operation.
710 void entry_mark();
711 void entry_preclean();
712 void entry_cleanup();
713 void entry_cleanup_bitmaps();
714 void entry_cleanup_traversal();
715 void entry_evac();
716 void entry_updaterefs();
717 void entry_traversal();
718
719 private:
720 // Actual work for the phases
721 void op_init_mark();
722 void op_final_mark();
723 void op_final_evac();
724 void op_init_updaterefs();
725 void op_final_updaterefs();
726 void op_init_traversal();
727 void op_final_traversal();
728 void op_full(GCCause::Cause cause);
729 void op_degenerated(ShenandoahDegenPoint point);
730 void op_degenerated_fail();
731 void op_degenerated_futile();
732
733 void op_mark();
734 void op_preclean();
735 void op_cleanup();
736 void op_evac();
737 void op_updaterefs();
738 void op_cleanup_bitmaps();
739 void op_cleanup_traversal();
740 void op_traversal();
741
742 private:
743 void try_inject_alloc_failure();
744 bool should_inject_alloc_failure();
745 };
746
747 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHHEAP_HPP
|