217 size_t committed() const;
218
219 // ---------- Workers handling
220 //
221 private:
222 uint _max_workers;
223 ShenandoahWorkGang* _workers;
224
225 public:
226 uint max_workers();
227 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
228
229 ShenandoahWorkGang* workers() const;
230
231 void gc_threads_do(ThreadClosure* tcl) const;
232
233 // ---------- Heap regions handling machinery
234 //
235 private:
236 MemRegion _heap_region;
237 size_t _num_regions;
238 ShenandoahHeapRegion** _regions;
239 ShenandoahRegionIterator _update_refs_iterator;
240
241 public:
242 inline size_t num_regions() const { return _num_regions; }
243
244 inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
245 inline size_t heap_region_index_containing(const void* addr) const;
246
247 inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
248
249 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
250 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
251
252 // ---------- GC state machinery
253 //
254 // GC state describes the important parts of collector state, that may be
255 // used to make barrier selection decisions in the native and generated code.
256 // Multiple bits can be set at once.
257 //
258 // Important invariant: when GC state is zero, the heap is stable, and no barriers
259 // are required.
260 //
261 public:
262 enum GCStateBitPos {
584 void resize_all_tlabs();
585
586 void accumulate_statistics_tlabs();
587 void accumulate_statistics_all_gclabs();
588
589 void make_parsable(bool retire_tlabs);
590 void ensure_parsability(bool retire_tlabs);
591
592 // ---------- Marking support
593 //
594 private:
595 ShenandoahMarkingContext* _marking_context;
596 MemRegion _bitmap_region;
597 MemRegion _aux_bitmap_region;
598 MarkBitMap _verification_bit_map;
599 MarkBitMap _aux_bit_map;
600
601 size_t _bitmap_size;
602 size_t _bitmap_regions_per_slice;
603 size_t _bitmap_bytes_per_slice;
604
605 // Used for buffering per-region liveness data.
606 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
607 //
608 // The array has max-workers elements, each of which is an array of
609 // jushort * max_regions. The choice of jushort is not accidental:
610 // there is a tradeoff between static/dynamic footprint that translates
611 // into cache pressure (which is already high during marking), and
612 // too many atomic updates. size_t/jint is too large, jbyte is too small.
613 jushort** _liveness_cache;
614
615 public:
616 inline ShenandoahMarkingContext* complete_marking_context() const;
617 inline ShenandoahMarkingContext* marking_context() const;
618 inline void mark_complete_marking_context();
619 inline void mark_incomplete_marking_context();
620
621 template<class T>
622 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
623
|
217 size_t committed() const;
218
219 // ---------- Workers handling
220 //
221 private:
222 uint _max_workers;
223 ShenandoahWorkGang* _workers;
224
225 public:
226 uint max_workers();
227 void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN;
228
229 ShenandoahWorkGang* workers() const;
230
231 void gc_threads_do(ThreadClosure* tcl) const;
232
233 // ---------- Heap regions handling machinery
234 //
235 private:
236 MemRegion _heap_region;
237 bool _heap_region_special;
238 size_t _num_regions;
239 ShenandoahHeapRegion** _regions;
240 ShenandoahRegionIterator _update_refs_iterator;
241
242 public:
243 inline size_t num_regions() const { return _num_regions; }
244 inline bool is_heap_region_special() { return _heap_region_special; }
245
246 inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const;
247 inline size_t heap_region_index_containing(const void* addr) const;
248
249 inline ShenandoahHeapRegion* const get_region(size_t region_idx) const;
250
251 void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
252 void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
253
254 // ---------- GC state machinery
255 //
256 // GC state describes the important parts of collector state, that may be
257 // used to make barrier selection decisions in the native and generated code.
258 // Multiple bits can be set at once.
259 //
260 // Important invariant: when GC state is zero, the heap is stable, and no barriers
261 // are required.
262 //
263 public:
264 enum GCStateBitPos {
586 void resize_all_tlabs();
587
588 void accumulate_statistics_tlabs();
589 void accumulate_statistics_all_gclabs();
590
591 void make_parsable(bool retire_tlabs);
592 void ensure_parsability(bool retire_tlabs);
593
594 // ---------- Marking support
595 //
596 private:
597 ShenandoahMarkingContext* _marking_context;
598 MemRegion _bitmap_region;
599 MemRegion _aux_bitmap_region;
600 MarkBitMap _verification_bit_map;
601 MarkBitMap _aux_bit_map;
602
603 size_t _bitmap_size;
604 size_t _bitmap_regions_per_slice;
605 size_t _bitmap_bytes_per_slice;
606
607 bool _bitmap_region_special;
608 bool _aux_bitmap_region_special;
609
610 // Used for buffering per-region liveness data.
611 // Needed since ShenandoahHeapRegion uses atomics to update liveness.
612 //
613 // The array has max-workers elements, each of which is an array of
614 // jushort * max_regions. The choice of jushort is not accidental:
615 // there is a tradeoff between static/dynamic footprint that translates
616 // into cache pressure (which is already high during marking), and
617 // too many atomic updates. size_t/jint is too large, jbyte is too small.
618 jushort** _liveness_cache;
619
620 public:
621 inline ShenandoahMarkingContext* complete_marking_context() const;
622 inline ShenandoahMarkingContext* marking_context() const;
623 inline void mark_complete_marking_context();
624 inline void mark_incomplete_marking_context();
625
626 template<class T>
627 inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl);
628
|